From 9bfd13a22da20591ce667a295aac400a16c76dbe Mon Sep 17 00:00:00 2001 From: Packit Service Date: Jan 06 2021 20:11:49 +0000 Subject: cloud-init-20.3 base --- diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml new file mode 100644 index 0000000..8a0b2c0 --- /dev/null +++ b/.github/workflows/cla.yml @@ -0,0 +1,40 @@ +name: Verify Contributor License Agreement + +on: [pull_request] + +jobs: + cla-validate: + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Check CLA signing status for ${{ github.event.pull_request.user.login }} + run: | + cat > unsigned-cla.txt <.rootfs or , normalise + latest_file="$(basename $latest_file .rootfs)" + # Find all files with that prefix and copy them to our cache dir + sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \; + install: - git fetch --unshallow - - sudo apt-get build-dep -y cloud-init - - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox - # These are build deps but not pulled in by the build-dep call above - - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-contextlib2 + - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper - pip install . - pip install tox # bionic has lxd from deb installed, remove it first to ensure @@ -30,26 +65,68 @@ matrix: - sudo snap install lxd - sudo lxd init --auto - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles + # Move any cached lxd images into lxd's image dir + - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \; - sudo usermod -a -G lxd $USER - sudo sbuild-adduser $USER - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc script: # Ubuntu LTS: Build - - ./packages/bddeb -S - # Use this to get a new shell where we're in the sbuild group - - sudo -E su $USER -c 'mk-sbuild xenial' - - sudo -E su $USER -c 'sbuild --nolog --verbose --dist=xenial cloud-init_*.dsc' + - ./packages/bddeb -S -d --release xenial + - | + needs_caching=false + if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then + # If we have a cached chroot, move it into place + sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64 + sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 + # Write its configuration + cat > sbuild-xenial-amd64 << EOM + [xenial-amd64] + description=xenial-amd64 + groups=sbuild,root,admin + root-groups=sbuild,root,admin + # Uncomment these lines to allow members of these groups to access + # the -source chroots directly (useful for automated updates, etc). + #source-root-users=sbuild,root,admin + #source-root-groups=sbuild,root,admin + type=directory + profile=sbuild + union-type=overlay + directory=/var/lib/schroot/chroots/xenial-amd64 + EOM + sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/ + sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64 + # And ensure it's up-to-date. + before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)" + sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade" + after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum) + if [ "$before_pkgs" != "$after_pkgs" ]; then + needs_caching=true + fi + else + # Otherwise, create the chroot + sudo -E su $USER -c 'mk-sbuild xenial' + needs_caching=true + fi + # If there are changes to the schroot (or it's entirely new), + # tar up the schroot (to preserve ownership/permissions) and + # move it into the cached dir; no need to compress it because + # Travis will do that anyway + if [ "$needs_caching" = "true" ]; then + sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 . + fi + # Use sudo to get a new shell where we're in the sbuild group + - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc' # Ubuntu LTS: Integration - sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb' - - python: 2.7 - env: TOXENV=py27 - - python: 3.4 - env: TOXENV=xenial - # Travis doesn't support Python 3.4 on bionic, so use xenial + - python: 3.5 + env: + TOXENV=xenial + PYTEST_ADDOPTS=-v # List all tests run by pytest dist: xenial - python: 3.6 - env: TOXENV=pycodestyle - - python: 3.6 - env: TOXENV=pyflakes + env: TOXENV=flake8 - python: 3.6 env: TOXENV=pylint + - python: 3.6 + env: TOXENV=doc diff --git a/ChangeLog b/ChangeLog index 0430267..3e68073 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,362 @@ +20.3 + - Azure: Add netplan driver filter when using hv_netvsc driver (#539) + [James Falcon] (LP: #1830740) + - query: do not handle non-decodable non-gzipped content (#543) + - DHCP sandboxing failing on noexec mounted /var/tmp (#521) [Eduardo Otubo] + - Update the list of valid ssh keys. (#487) + [Ole-Martin Bratteng] (LP: #1877869) + - cmd: cloud-init query to handle compressed userdata (#516) (LP: #1889938) + - Pushing cloud-init log to the KVP (#529) [Moustafa Moustafa] + - Add Alpine Linux support. (#535) [dermotbradley] + - Detect kernel version before swap file creation (#428) [Eduardo Otubo] + - cli: add devel make-mime subcommand (#518) + - user-data: only verify mime-types for TYPE_NEEDED and x-shellscript + (#511) (LP: #1888822) + - DataSourceOracle: retry twice (and document why we retry at all) (#536) + - Refactor Azure report ready code (#468) [Johnson Shi] + - tox.ini: pin correct version of httpretty in xenial{,-dev} envs (#531) + - Support Oracle IMDSv2 API (#528) [James Falcon] + - .travis.yml: run a doc build during CI (#534) + - doc/rtd/topics/datasources/ovf.rst: fix doc8 errors (#533) + - Fix 'Users and Groups' configuration documentation (#530) [sshedi] + - cloudinit.distros: update docstrings of add_user and create_user (#527) + - Fix headers for device types in network v2 docs (#532) + [Caleb Xavier Berger] + - Add AlexBaranowski as contributor (#508) [Aleksander Baranowski] + - DataSourceOracle: refactor to use only OPC v1 endpoint (#493) + - .github/workflows/stale.yml: s/Josh/Rick/ (#526) + - Fix a typo in apt pipelining module (#525) [Xiao Liang] + - test_util: parametrize devlist tests (#523) [James Falcon] + - Recognize LABEL_FATBOOT labels (#513) [James Falcon] (LP: #1841466) + - Handle additional identifier for SLES For HPC (#520) [Robert Schweikert] + - Revert "test-requirements.txt: pin pytest to <6 (#512)" (#515) + - test-requirements.txt: pin pytest to <6 (#512) + - Add "tsanghan" as contributor (#504) [tsanghan] + - fix brpm building (LP: #1886107) + - Adding eandersson as a contributor (#502) [Erik Olof Gunnar Andersson] + - azure: disable bouncing hostname when setting hostname fails (#494) + [Anh Vo] + - VMware: Support parsing DEFAULT-RUN-POST-CUST-SCRIPT (#441) + [xiaofengw-vmware] + - DataSourceAzure: Use ValueError when JSONDecodeError is not available + (#490) [Anh Vo] + - cc_ca_certs.py: fix blank line problem when removing CAs and adding + new one (#483) [dermotbradley] + - freebsd: py37-serial is now py37-pyserial (#492) [Gonéri Le Bouder] + - ssh exit with non-zero status on disabled user (#472) + [Eduardo Otubo] (LP: #1170059) + - cloudinit: remove global disable of pylint W0107 and fix errors (#489) + - networking: refactor wait_for_physdevs from cloudinit.net (#466) + (LP: #1884626) + - HACKING.rst: add pytest.param pytest gotcha (#481) + - cloudinit: remove global disable of pylint W0105 and fix errors (#480) + - Fix two minor warnings (#475) + - test_data: fix faulty patch (#476) + - cc_mounts: handle missing fstab (#484) (LP: #1886531) + - LXD cloud_tests: support more lxd image formats (#482) [Paride Legovini] + - Add update_etc_hosts as default module on *BSD (#479) [Adam Dobrawy] + - cloudinit: fix tip-pylint failures and bump pinned pylint version (#478) + - Added BirknerAlex as contributor and sorted the file (#477) + [Alexander Birkner] + - Update list of types of modules in cli.rst [saurabhvartak1982] + - tests: use markers to configure disable_subp_usage (#473) + - Add mention of vendor-data to no-cloud format documentation (#470) + [Landon Kirk] + - Fix broken link to OpenStack metadata service docs (#467) + [Matt Riedemann] + - Disable ec2 mirror for non aws instances (#390) + [lucasmoura] (LP: #1456277) + - cloud_tests: don't pass --python-version to read-dependencies (#465) + - networking: refactor is_physical from cloudinit.net (#457) (LP: #1884619) + - Enable use of the caplog fixture in pytest tests, and add a + cc_final_message test using it (#461) + - RbxCloud: Add support for FreeBSD (#464) [Adam Dobrawy] + - Add schema for cc_chef module (#375) [lucasmoura] (LP: #1858888) + - test_util: add (partial) testing for util.mount_cb (#463) + - .travis.yml: revert to installing ubuntu-dev-tools (#460) + - HACKING.rst: add details of net refactor tracking (#456) + - .travis.yml: rationalise installation of dependencies in host (#449) + - Add dermotbradley as contributor. (#458) [dermotbradley] + - net/networking: remove unused functions/methods (#453) + - distros.networking: initial implementation of layout (#391) + - cloud-init.service.tmpl: use "rhel" instead of "redhat" (#452) + - Change from redhat to rhel in systemd generator tmpl (#450) + [Eduardo Otubo] + - Hetzner: support reading user-data that is base64 encoded. (#448) + [Scott Moser] (LP: #1884071) + - HACKING.rst: add strpath gotcha to testing gotchas section (#446) + - cc_final_message: don't create directories when writing boot-finished + (#445) (LP: #1883903) + - .travis.yml: only store new schroot if something has changed (#440) + - util: add ensure_dir_exists parameter to write_file (#443) + - printing the error stream of the dhclient process before killing it + (#369) [Moustafa Moustafa] + - Fix link to the MAAS documentation (#442) + [Paride Legovini] (LP: #1883666) + - RPM build: disable the dynamic mirror URLs when using a proxy (#437) + [Paride Legovini] + - util: rename write_file's copy_mode parameter to preserve_mode (#439) + - .travis.yml: use $TRAVIS_BUILD_DIR for lxd_image caching (#438) + - cli.rst: alphabetise devel subcommands and add net-convert to list (#430) + - Default to UTF-8 in /var/log/cloud-init.log (#427) [James Falcon] + - travis: cache the chroot we use for package builds (#429) + - test: fix all flake8 E126 errors (#425) [Joshua Powers] + - Fixes KeyError for bridge with no "parameters:" setting (#423) + [Brian Candler] (LP: #1879673) + - When tools.conf does not exist, running cmd "vmware-toolbox-cmd + config get deployPkg enable-custom-scripts", the return code will + be EX_UNAVAILABLE(69), on this condition, it should not take it as + error. (#413) [chengcheng-chcheng] + - Document CloudStack data-server well-known hostname (#399) [Gregor Riepl] + - test: move conftest.py to top-level, to cover tests/ also (#414) + - Replace cc_chef is_installed with use of subp.is_exe. (#421) + [Scott Moser] + - Move runparts to subp. (#420) [Scott Moser] + - Move subp into its own module. (#416) [Scott Moser] + - readme: point at travis-ci.com (#417) [Joshua Powers] + - New feature flag functionality and fix includes failing silently (#367) + [James Falcon] (LP: #1734939) + - Enhance poll imds logging (#365) [Moustafa Moustafa] + - test: fix all flake8 E121 and E123 errors (#404) [Joshua Powers] + - test: fix all flake8 E241 (#403) [Joshua Powers] + - test: ignore flake8 E402 errors in main.py (#402) [Joshua Powers] + - cc_grub_dpkg: determine idevs in more robust manner with grub-probe + (#358) [Matthew Ruffell] (LP: #1877491) + - test: fix all flake8 E741 errors (#401) [Joshua Powers] + - tests: add groovy integration tests for ubuntu (#400) + - Enable chef_license support for chef infra client (#389) [Bipin Bachhao] + - testing: use flake8 again (#392) [Joshua Powers] + - enable Puppet, Chef mcollective in default config (#385) + [Mina Galić (deprecated: Igor Galić)] (LP: #1880279) + - HACKING.rst: introduce .net -> Networking refactor section (#384) + - Travis: do not install python3-contextlib2 (dropped dependency) (#388) + [Paride Legovini] + - HACKING: mention that .github-cla-signers is alpha-sorted (#380) + - Add bipinbachhao as contributor (#379) [Bipin Bachhao] + - cc_snap: validate that assertions property values are strings (#370) + - conftest: implement partial disable_subp_usage (#371) + - test_resolv_conf: refresh stale comment (#374) + - cc_snap: apply validation to snap.commands properties (#364) + - make finding libc platform independent (#366) + [Mina Galić (deprecated: Igor Galić)] + - doc/rtd/topics/faq: Updates LXD docs links to current site (#368) [TomP] + - templater: drop Jinja Python 2 compatibility shim (#353) + - cloudinit: minor pylint fixes (#360) + - cloudinit: remove unneeded __future__ imports (#362) + - migrating momousta lp user to Moustafa-Moustafa GitHub user (#361) + [Moustafa Moustafa] + - cloud_tests: emit dots on Travis while fetching images (#347) + - Add schema to apt configure config (#357) [lucasmoura] (LP: #1858884) + - conftest: add docs and tests regarding CiTestCase's subp functionality + (#343) + - analyze/dump: refactor shared string into variable (#350) + - doc: update boot.rst with correct timing of runcmd (#351) + - HACKING.rst: change contact info to Rick Harding (#359) [lucasmoura] + - HACKING.rst: guide people to add themselves to the CLA file (#349) + - HACKING.rst: more unit testing documentation (#354) + - .travis.yml: don't run lintian during integration test package builds + (#352) + - Add test to ensure docs examples are valid cloud-init configs (#355) + [James Falcon] (LP: #1876414) + - make suse and sles support 127.0.1.1 (#336) [chengcheng-chcheng] + - Create tests to validate schema examples (#348) + [lucasmoura] (LP: #1876412) + - analyze/dump: add support for Amazon Linux 2 log lines (#346) + (LP: #1876323) + - bsd: upgrade support (#305) [Gonéri Le Bouder] + - Add lucasmoura as contributor (#345) [lucasmoura] + - Add "therealfalcon" as contributor (#344) [James Falcon] + - Adapt the package building scripts to use Python 3 (#231) + [Paride Legovini] + - DataSourceEc2: use metadata's NIC ordering to determine route-metrics + (#342) (LP: #1876312) + - .travis.yml: introduce caching (#329) + - cc_locale: introduce schema (#335) + - doc/rtd/conf.py: bump copyright year to 2020 (#341) + - yum_add_repo: Add Centos to the supported distro list (#340) + +20.2 + - doc/format: reference make-mime.py instead of an inline script (#334) + - Add docs about creating parent folders (#330) [Adrian Wilkins] + - DataSourceNoCloud/OVF: drop claim to support FTP (#333) (LP: #1875470) + - schema: ignore spurious pylint error (#332) + - schema: add json schema for write_files module (#152) + - BSD: find_devs_with_ refactoring (#298) [Gonéri Le Bouder] + - nocloud: drop work around for Linux 2.6 (#324) [Gonéri Le Bouder] + - cloudinit: drop dependencies on unittest2 and contextlib2 (#322) + - distros: handle a potential mirror filtering error case (#328) + - log: remove unnecessary import fallback logic (#327) + - .travis.yml: don't run integration test on ubuntu/* branches (#321) + - More unit test documentation (#314) + - conftest: introduce disable_subp_usage autouse fixture (#304) + - YAML align indent sizes for docs readability (#323) [Tak Nishigori] + - network_state: add missing space to log message (#325) + - tests: add missing mocks for get_interfaces_by_mac (#326) (LP: #1873910) + - test_mounts: expand happy path test for both happy paths (#319) + - cc_mounts: fix incorrect format specifiers (#316) (LP: #1872836) + - swap file "size" being used before checked if str (#315) [Eduardo Otubo] + - HACKING.rst: add pytest version gotchas section (#311) + - docs: Add steps to re-run cloud-id and cloud-init (#313) [Joshua Powers] + - readme: OpenBSD is now supported (#309) [Gonéri Le Bouder] + - net: ignore 'renderer' key in netplan config (#306) (LP: #1870421) + - Add support for NFS/EFS mounts (#300) [Andrew Beresford] (LP: #1870370) + - openbsd: set_passwd should not unlock user (#289) [Gonéri Le Bouder] + - tools/.github-cla-signers: add beezly as CLA signer (#301) + - util: remove unnecessary lru_cache import fallback (#299) + - HACKING.rst: reorganise/update CLA signature info (#297) + - distros: drop leading/trailing hyphens from mirror URL labels (#296) + - HACKING.rst: add note about variable annotations (#295) + - CiTestCase: stop using and remove sys_exit helper (#283) + - distros: replace invalid characters in mirror URLs with hyphens (#291) + (LP: #1868232) + - rbxcloud: gracefully handle arping errors (#262) [Adam Dobrawy] + - Fix cloud-init ignoring some misdeclared mimetypes in user-data. + [Kurt Garloff] + - net: ubuntu focal prioritize netplan over eni even if both present + (#267) (LP: #1867029) + - cloudinit: refactor util.is_ipv4 to net.is_ipv4_address (#292) + - net/cmdline: replace type comments with annotations (#294) + - HACKING.rst: add Type Annotations design section (#293) + - net: introduce is_ip_address function (#288) + - CiTestCase: remove now-unneeded parse_and_read helper method (#286) + - .travis.yml: allow 30 minutes of inactivity in cloud tests (#287) + - sources/tests/test_init: drop use of deprecated inspect.getargspec (#285) + - setup.py: drop NIH check_output implementation (#282) + - Identify SAP Converged Cloud as OpenStack [Silvio Knizek] + - add Openbsd support (#147) [Gonéri Le Bouder] + - HACKING.rst: add examples of the two test class types (#278) + - VMWware: support to update guest info gc status if enabled (#261) + [xiaofengw-vmware] + - Add lp-to-git mapping for kgarloff (#279) + - set_passwords: avoid chpasswd on BSD (#268) [Gonéri Le Bouder] + - HACKING.rst: add Unit Testing design section (#277) + - util: read_cc_from_cmdline handle urlencoded yaml content (#275) + - distros/tests/test_init: add tests for _get_package_mirror_info (#272) + - HACKING.rst: add links to new Code Review Process doc (#276) + - freebsd: ensure package update works (#273) [Gonéri Le Bouder] + - doc: introduce Code Review Process documentation (#160) + - tools: use python3 (#274) + - cc_disk_setup: fix RuntimeError (#270) (LP: #1868327) + - cc_apt_configure/util: combine search_for_mirror implementations (#271) + - bsd: boottime does not depend on the libc soname (#269) + [Gonéri Le Bouder] + - test_oracle,DataSourceOracle: sort imports (#266) + - DataSourceOracle: update .network_config docstring (#257) + - cloudinit/tests: remove unneeded with_logs configuration (#263) + - .travis.yml: drop stale comment (#255) + - .gitignore: add more common directories (#258) + - ec2: render network on all NICs and add secondary IPs as static (#114) + (LP: #1866930) + - ec2 json validation: fix the reference to the 'merged_cfg' key (#256) + [Paride Legovini] + - releases.yaml: quote the Ubuntu version numbers (#254) [Paride Legovini] + - cloudinit: remove six from packaging/tooling (#253) + - util/netbsd: drop six usage (#252) + - workflows: introduce stale pull request workflow (#125) + - cc_resolv_conf: introduce tests and stabilise output across Python + versions (#251) + - fix minor issue with resolv_conf template (#144) [andreaf74] + - doc: CloudInit also support NetBSD (#250) [Gonéri Le Bouder] + - Add Netbsd support (#62) [Gonéri Le Bouder] + - tox.ini: avoid substition syntax that causes a traceback on xenial (#245) + - Add pub_key_ed25519 to cc_phone_home (#237) [Daniel Hensby] + - Introduce and use of a list of GitHub usernames that have signed CLA + (#244) + - workflows/cla.yml: use correct username for CLA check (#243) + - tox.ini: use xenial version of jsonpatch in CI (#242) + - workflows: CLA validation altered to fail status on pull_request (#164) + - tox.ini: bump pyflakes version to 2.1.1 (#239) + - cloudinit: move to pytest for running tests (#211) + - instance-data: add cloud-init merged_cfg and sys_info keys to json + (#214) (LP: #1865969) + - ec2: Do not fallback to IMDSv1 on EC2 (#216) + - instance-data: write redacted cfg to instance-data.json (#233) + (LP: #1865947) + - net: support network-config:disabled on the kernel commandline (#232) + (LP: #1862702) + - ec2: only redact token request headers in logs, avoid altering request + (#230) (LP: #1865882) + - docs: typo fixed: dta → data [Alexey Vazhnov] + - Fixes typo on Amazon Web Services (#217) [Nick Wales] + - Fix docs for OpenStack DMI Asset Tag (#228) + [Mark T. Voelker] (LP: #1669875) + - Add physical network type: cascading to openstack helpers (#200) + [sab-systems] + - tests: add focal integration tests for ubuntu (#225) + +20.1 + - ec2: Do not log IMDSv2 token values, instead use REDACTED (#219) + (LP: #1863943) + - utils: use SystemRandom when generating random password. (#204) + [Dimitri John Ledkov] + - docs: mount_default_files is a list of 6 items, not 7 (#212) + - azurecloud: fix issues with instances not starting (#205) (LP: #1861921) + - unittest: fix stderr leak in cc_set_password random unittest + output. (#208) + - cc_disk_setup: add swap filesystem force flag (#207) + - import sysvinit patches from freebsd-ports tree (#161) [Igor Galić] + - docs: fix typo (#195) [Edwin Kofler] + - sysconfig: distro-specific config rendering for BOOTPROTO option (#162) + [Robert Schweikert] (LP: #1800854) + - cloudinit: replace "from six import X" imports (except in util.py) (#183) + - run-container: use 'test -n' instead of 'test ! -z' (#202) + [Paride Legovini] + - net/cmdline: correctly handle static ip= config (#201) + [Dimitri John Ledkov] (LP: #1861412) + - Replace mock library with unittest.mock (#186) + - HACKING.rst: update CLA link (#199) + - Scaleway: Fix DatasourceScaleway to avoid backtrace (#128) + [Louis Bouchard] + - cloudinit/cmd/devel/net_convert.py: add missing space (#191) + - tools/run-container: drop support for python2 (#192) [Paride Legovini] + - Print ssh key fingerprints using sha256 hash (#188) (LP: #1860789) + - Make the RPM build use Python 3 (#190) [Paride Legovini] + - cc_set_password: increase random pwlength from 9 to 20 (#189) + (LP: #1860795) + - .travis.yml: use correct Python version for xenial tests (#185) + - cloudinit: remove ImportError handling for mock imports (#182) + - Do not use fallocate in swap file creation on xfs. (#70) + [Eduardo Otubo] (LP: #1781781) + - .readthedocs.yaml: install cloud-init when building docs (#181) + (LP: #1860450) + - Introduce an RTD config file, and pin the Sphinx version to the RTD + default (#180) + - Drop most of the remaining use of six (#179) + - Start removing dependency on six (#178) + - Add Rootbox & HyperOne to list of cloud in README (#176) [Adam Dobrawy] + - docs: add proposed SRU testing procedure (#167) + - util: rename get_architecture to get_dpkg_architecture (#173) + - Ensure util.get_architecture() runs only once (#172) + - Only use gpart if it is the BSD gpart (#131) [Conrad Hoffmann] + - freebsd: remove superflu exception mapping (#166) [Gonéri Le Bouder] + - ssh_auth_key_fingerprints_disable test: fix capitalization (#165) + [Paride Legovini] + - util: move uptime's else branch into its own boottime function (#53) + [Igor Galić] (LP: #1853160) + - workflows: add contributor license agreement checker (#155) + - net: fix rendering of 'static6' in network config (#77) (LP: #1850988) + - Make tests work with Python 3.8 (#139) [Conrad Hoffmann] + - fixed minor bug with mkswap in cc_disk_setup.py (#143) [andreaf74] + - freebsd: fix create_group() cmd (#146) [Gonéri Le Bouder] + - doc: make apt_update example consistent (#154) + - doc: add modules page toc with links (#153) (LP: #1852456) + - Add support for the amazon variant in cloud.cfg.tmpl (#119) + [Frederick Lefebvre] + - ci: remove Python 2.7 from CI runs (#137) + - modules: drop cc_snap_config config module (#134) + - migrate-lp-user-to-github: ensure Launchpad repo exists (#136) + - docs: add initial troubleshooting to FAQ (#104) [Joshua Powers] + - doc: update cc_set_hostname frequency and descrip (#109) + [Joshua Powers] (LP: #1827021) + - freebsd: introduce the freebsd renderer (#61) [Gonéri Le Bouder] + - cc_snappy: remove deprecated module (#127) + - HACKING.rst: clarify that everyone needs to do the LP->GH dance (#130) + - freebsd: cloudinit service requires devd (#132) [Gonéri Le Bouder] + - cloud-init: fix capitalisation of SSH (#126) + - doc: update cc_ssh clarify host and auth keys + [Joshua Powers] (LP: #1827021) + - ci: emit names of tests run in Travis (#120) + 19.4 - doc: specify _ over - in cloud config modules [Joshua Powers] (LP: #1293254) diff --git a/HACKING.rst b/HACKING.rst index c74bd5a..60c7b5e 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -6,37 +6,35 @@ This document describes how to contribute changes to cloud-init. It assumes you have a `GitHub`_ account, and refers to your GitHub user as ``GH_USER`` throughout. -Do these things once -==================== +Submitting your first pull request +================================== -* To contribute, you must sign the Canonical `contributor license agreement`_ +Follow these steps to submit your first pull request to cloud-init: - If you have already signed it as an individual, your Launchpad user will be - listed in the `contributor-agreement-canonical`_ group. Unfortunately there - is no easy way to check if an organization or company you are doing work for - has signed. When signing the CLA and prompted for 'Project contact' or - 'Canonical Project Manager' enter 'Josh Powers'. +* To contribute to cloud-init, you must sign the Canonical `contributor + license agreement`_ - For existing contributors who've already signed the agreement, we can verify - the link between your `Launchpad`_ account and your `GitHub`_ account by - creating a branch with both your Launchpad and GitHub usernames into both - Launchpad and GitHub cloud-init repositories. We've added a tool - (tools/migrate-lp-user-to-github) to the cloud-init repository to handle this - migration as automatically as possible. + * If you have already signed it as an individual, your Launchpad user + will be listed in the `contributor-agreement-canonical`_ group. + (Unfortunately there is no easy way to check if an organization or + company you are doing work for has signed.) - The cloud-init team will review the two merge proposals and verify - that the CLA has been signed for the Launchpad user and record the - associated GitHub account. We will reply to the email address - associated with your Launchpad account that you've been clear to - contribute to cloud-init on GitHub. + * When signing it: - If your company has signed the CLA for you, please contact us to help - in verifying which launchad/GitHub accounts are associated with the - company. For any questions or help with the process, please email: + * ensure that you fill in the GitHub username field. + * when prompted for 'Project contact' or 'Canonical Project + Manager', enter 'Rick Harding'. - `Josh Powers `_ with the subject: Cloud-Init CLA + * If your company has signed the CLA for you, please contact us to + help in verifying which Launchpad/GitHub accounts are associated + with the company. - You also may contanct user ``powersj`` in ``#cloud-init`` channel via IRC freenode. + * For any questions or help with the process, please email `Rick + Harding `_ with the subject, + "Cloud-Init CLA" + + * You also may contact user ``rick_h`` in the ``#cloud-init`` + channel on the Freenode IRC network. * Configure git with your email and name for commit messages. @@ -59,11 +57,46 @@ Do these things once git remote add GH_USER git@github.com:GH_USER/cloud-init.git git push GH_USER master +* Read through the cloud-init `Code Review Process`_, so you understand + how your changes will end up in cloud-init's codebase. + +* Submit your first cloud-init pull request, adding yourself to the + in-repository list that we use to track CLA signatures: + `tools/.github-cla-signers`_ + + * See `PR #344`_ and `PR #345`_ for examples of what this pull + request should look like. + + * Note that ``.github-cla-signers`` is sorted alphabetically. + + * (If you already have a change that you want to submit, you can + also include the change to ``tools/.github-cla-signers`` in that + pull request, there is no need for two separate PRs.) + .. _GitHub: https://github.com .. _Launchpad: https://launchpad.net .. _repository: https://github.com/canonical/cloud-init -.. _contributor license agreement: http://www.canonical.com/contributors +.. _contributor license agreement: https://ubuntu.com/legal/contributors .. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members +.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/master/tools/.github-cla-signers +.. _PR #344: https://github.com/canonical/cloud-init/pull/344 +.. _PR #345: https://github.com/canonical/cloud-init/pull/345 + +Transferring CLA Signatures from Launchpad to Github +---------------------------------------------------- + +For existing contributors who have signed the agreement in Launchpad +before the Github username field was included, we need to verify the +link between your `Launchpad`_ account and your `GitHub`_ account. To +enable us to do this, we ask that you create a branch with both your +Launchpad and GitHub usernames against both the Launchpad and GitHub +cloud-init repositories. We've added a tool +(``tools/migrate-lp-user-to-github``) to the cloud-init repository to +handle this migration as automatically as possible. + +The cloud-init team will review the two merge proposals and verify that +the CLA has been signed for the Launchpad user and record the +associated GitHub account. Do these things for each feature or bug ======================================= @@ -118,13 +151,15 @@ Do these things for each feature or bug - Click 'Create Pull Request` Then, someone in the `Ubuntu Server`_ team will review your changes and -follow up in the pull request. +follow up in the pull request. Look at the `Code Review Process`_ doc +to understand the following steps. Feel free to ping and/or join ``#cloud-init`` on freenode irc if you have any questions. .. _tox: https://tox.readthedocs.io/en/latest/ .. _Ubuntu Server: https://github.com/orgs/canonical/teams/ubuntu-server +.. _Code Review Process: https://cloudinit.readthedocs.io/en/latest/topics/code_review.html Design ====== @@ -137,3 +172,491 @@ Cloud Config Modules * Any new modules should use underscores in any new config options and not hyphens (e.g. `new_option` and *not* `new-option`). + +Unit Testing +------------ + +cloud-init uses `pytest`_ to run its tests, and has tests written both +as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests. +The following guidelines should be followed: + +* For ease of organisation and greater accessibility for developers not + familiar with pytest, all cloud-init unit tests must be contained + within test classes + + * Put another way, module-level test functions should not be used + +* pytest test classes should use `pytest fixtures`_ to share + functionality instead of inheritance + +* As all tests are contained within classes, it is acceptable to mix + ``TestCase`` test classes and pytest test classes within the same + test file + + * These can be easily distinguished by their definition: pytest + classes will not use inheritance at all (e.g. + `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will + subclass (indirectly) from ``TestCase`` (e.g. + `TestPrependBaseCommands`_) + +* pytest tests should use bare ``assert`` statements, to take advantage + of pytest's `assertion introspection`_ + + * For ``==`` and other commutative assertions, the expected value + should be placed before the value under test: + ``assert expected_value == function_under_test()`` + +* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use + pytest features that are available in v2.8.7. This is an + inexhaustive list of ways in which this may catch you out: + + * Support for using ``yield`` in ``pytest.fixture`` functions was + only introduced in `pytest 3.0`_. Such functions must instead use + the ``pytest.yield_fixture`` decorator. + + * Only the following built-in fixtures are available + [#fixture-list]_: + + * ``cache`` + * ``capfd`` + * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial) + * ``capsys`` + * ``monkeypatch`` + * ``pytestconfig`` + * ``record_xml_property`` + * ``recwarn`` + * ``tmpdir_factory`` + * ``tmpdir`` + + * On xenial, the objects returned by the ``tmpdir`` fixture cannot be + used where paths are required; they are rejected as invalid paths. + You must instead use their ``.strpath`` attribute. + + * For example, instead of + ``util.write_file(tmpdir.join("some_file"), ...)``, you should + write ``util.write_file(tmpdir.join("some_file").strpath, ...)``. + + * The `pytest.param`_ function cannot be used. It was introduced in + pytest 3.1, which means it is not available on xenial. The more + limited mechanism it replaced was removed in pytest 4.0, so is not + available in focal or later. The only available alternatives are + to write mark-requiring test instances as completely separate + tests, without utilising parameterisation, or to apply the mark to + the entire parameterized test (and therefore every test instance). + +* Variables/parameter names for ``Mock`` or ``MagicMock`` instances + should start with ``m_`` to clearly distinguish them from non-mock + variables + + * For example, ``m_readurl`` (which would be a mock for ``readurl``) + +* The ``assert_*`` methods that are available on ``Mock`` and + ``MagicMock`` objects should be avoided, as typos in these method + names may not raise ``AttributeError`` (and so can cause tests to + silently pass). An important exception: if a ``Mock`` is + `autospecced`_ then misspelled assertion methods *will* raise an + ``AttributeError``, so these assertion methods may be used on + autospecced ``Mock`` objects. + + For non-autospecced ``Mock`` s, these substitutions can be used + (``m`` is assumed to be a ``Mock``): + + * ``m.assert_any_call(*args, **kwargs)`` => ``assert + mock.call(*args, **kwargs) in m.call_args_list`` + * ``m.assert_called()`` => ``assert 0 != m.call_count`` + * ``m.assert_called_once()`` => ``assert 1 == m.call_count`` + * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert + [mock.call(*args, **kwargs)] == m.call_args_list`` + * ``m.assert_called_with(*args, **kwargs)`` => ``assert + mock.call(*args, **kwargs) == m.call_args_list[-1]`` + * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in + call_list: assert call in m.call_args_list`` + + * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(..., + any_order=False)`` are not easily replicated in a single + statement, so their use when appropriate is acceptable. + + * ``m.assert_not_called()`` => ``assert 0 == m.call_count`` + +* Test arguments should be ordered as follows: + + * ``mock.patch`` arguments. When used as a decorator, ``mock.patch`` + partially applies its generated ``Mock`` object as the first + argument, so these arguments must go first. + * ``pytest.mark.parametrize`` arguments, in the order specified to + the ``parametrize`` decorator. These arguments are also provided + by a decorator, so it's natural that they sit next to the + ``mock.patch`` arguments. + * Fixture arguments, alphabetically. These are not provided by a + decorator, so they are last, and their order has no defined + meaning, so we default to alphabetical. + +* It follows from this ordering of test arguments (so that we retain + the property that arguments left-to-right correspond to decorators + bottom-to-top) that test decorators should be ordered as follows: + + * ``pytest.mark.parametrize`` + * ``mock.patch`` + +* When there are multiple patch calls in a test file for the module it + is testing, it may be desirable to capture the shared string prefix + for these patch calls in a module-level variable. If used, such + variables should be named ``M_PATH`` or, for datasource tests, + ``DS_PATH``. + +.. _pytest: https://docs.pytest.org/ +.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html +.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15 +.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9 +.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html +.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093 +.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param +.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing + +Type Annotations +---------------- + +The cloud-init codebase uses Python's annotation support for storing +type annotations in the style specified by `PEP-484`_. Their use in +the codebase is encouraged but with one important caveat: types from +the ``typing`` module cannot be used. + +cloud-init still supports Python 3.4, which doesn't have the ``typing`` +module in the stdlib. This means that the use of any types from the +``typing`` module in the codebase would require installation of an +additional Python module on platforms using Python 3.4. As such +platforms are generally in maintenance mode, the introduction of a new +dependency may act as a break in compatibility in practical terms. + +Similarly, only function annotations are appropriate for use, as the +variable annotations specified in `PEP-526`_ were introduced in Python +3.6. + +.. _PEP-484: https://www.python.org/dev/peps/pep-0484/ +.. _PEP-526: https://www.python.org/dev/peps/pep-0526/ + +.. [#fixture-list] This list of fixtures (with markup) can be + reproduced by running:: + + py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/' + + in a xenial lxd container with python3-pytest-catchlog installed. + +Feature Flags +------------- + +.. automodule:: cloudinit.features + :members: + + +Ongoing Refactors +================= + +This captures ongoing refactoring projects in the codebase. This is +intended as documentation for developers involved in the refactoring, +but also for other developers who may interact with the code being +refactored in the meantime. + +``cloudinit.net`` -> ``cloudinit.distros.networking`` Hierarchy +--------------------------------------------------------------- + +``cloudinit.net`` was imported from the curtin codebase as a chunk, and +then modified enough that it integrated with the rest of the cloud-init +codebase. Over the ~4 years since, the fact that it is not fully +integrated into the ``Distro`` hierarchy has caused several issues. + +The common pattern of these problems is that the commands used for +networking are different across distributions and operating systems. +This has lead to ``cloudinit.net`` developing its own "distro +determination" logic: `get_interfaces_by_mac`_ is probably the clearest +example of this. Currently, these differences are primarily split +along Linux/BSD lines. However, it would be short-sighted to only +refactor in a way that captures this difference: we can anticipate that +differences will develop between Linux-based distros in future, or +there may already be differences in tooling that we currently +work around in less obvious ways. + +The high-level plan is to introduce a hierarchy of networking classes +in ``cloudinit.distros.networking``, which each ``Distro`` subclass +will reference. These will capture the differences between networking +on our various distros, while still allowing easy reuse of code between +distros that share functionality (e.g. most of the Linux networking +behaviour). ``Distro`` objects will instantiate the networking classes +at ``self.net``, so callers will call ``distro.net.`` instead of +``cloudinit.net.``; this will necessitate access to an +instantiated ``Distro`` object. + +An implementation note: there may be external consumers of the +``cloudinit.net`` module. We don't consider this a public API, so we +will be removing it as part of this refactor. However, we will ensure +that the new API is complete from its introduction, so that any such +consumers can move over to it wholesale. (Note, however, that this new +API is still not considered public or stable, and may not replicate the +existing API exactly.) + +In more detail: + +* The root of this hierarchy will be the + ``cloudinit.distros.networking.Networking`` class. This class will + have a corresponding method for every ``cloudinit.net`` function that + we identify to be involved in refactoring. Initially, these methods' + implementations will simply call the corresponding ``cloudinit.net`` + function. (This gives us the complete API from day one, for existing + consumers.) +* As the biggest differentiator in behaviour, the next layer of the + hierarchy will be two subclasses: ``LinuxNetworking`` and + ``BSDNetworking``. These will be introduced in the initial PR. +* When a difference in behaviour for a particular distro is identified, + a new ``Networking`` subclass will be created. This new class should + generally subclass either ``LinuxNetworking`` or ``BSDNetworking``. +* To be clear: ``Networking`` subclasses will only be created when + needed, we will not create a full hierarchy of per-``Distro`` + subclasses up-front. +* Each ``Distro`` class will have a class variable + (``cls.networking_cls``) which points at the appropriate + networking class (initially this will be either ``LinuxNetworking`` + or ``BSDNetworking``). +* When ``Distro`` classes are instantiated, they will instantiate + ``cls.networking_cls`` and store the instance at ``self.net``. (This + will be implemented in ``cloudinit.distros.Distro.__init__``.) +* A helper function will be added which will determine the appropriate + ``Distro`` subclass for the current system, instantiate it and return + its ``net`` attribute. (This is the entry point for existing + consumers to migrate to.) +* Callers of refactored functions will change from calling + ``cloudinit.net.`` to ``distro.net.``, where ``distro`` + is an instance of the appropriate ``Distro`` class for this system. + (This will require making such an instance available to callers, + which will constitute a large part of the work in this project.) + +After the initial structure is in place, the work in this refactor will +consist of replacing the ``cloudinit.net.some_func`` call in each +``cloudinit.distros.networking.Networking`` method with the actual +implementation. This can be done incrementally, one function at a +time: + +* pick an unmigrated ``cloudinit.distros.networking.Networking`` method +* find it in the `the list of bugs tagged net-refactor`_ and assign + yourself to it (see :ref:`Managing Work/Tracking Progress` below for + more details) +* refactor all of its callers to call the ``distro.net.`` method + on ``Distro`` instead of the ``cloudinit.net.`` function. (This + is likely to be the most time-consuming step, as it may require + plumbing ``Distro`` objects through to places that previously have + not consumed them.) +* refactor its implementation from ``cloudinit.net`` into the + ``Networking`` hierarchy (e.g. if it has an if/else on BSD, this is + the time to put the implementations in their respective subclasses) + + * if part of the method contains distro-independent logic, then you + may need to create new methods to capture this distro-specific + logic; we don't want to replicate common logic in different + ``Networking`` subclasses + * if after the refactor, the method on the root ``Networking`` class + no longer has any implementation, it should be converted to an + `abstractmethod`_ + +* ensure that the new implementation has unit tests (either by moving + existing tests, or by writing new ones) +* ensure that the new implementation has a docstring +* add any appropriate type annotations + + * note that we must follow the constraints described in the "Type + Annotations" section above, so you may not be able to write + complete annotations + * we have `type aliases`_ defined in ``cloudinit.distros.networking`` + which should be used when applicable + +* finally, remove it (and any other now-unused functions) from + cloudinit.net (to avoid having two parallel implementations) + +``cloudinit.net`` Functions/Classes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The functions/classes that need refactoring break down into some broad +categories: + +* helpers for accessing ``/sys`` (that should not be on the top-level + ``Networking`` class as they are Linux-specific): + + * ``get_sys_class_path`` + * ``sys_dev_path`` + * ``read_sys_net`` + * ``read_sys_net_safe`` + * ``read_sys_net_int`` + +* those that directly access ``/sys`` (via helpers) and should (IMO) be + included in the API of the ``Networking`` class: + + * ``generate_fallback_config`` + + * the ``config_driver`` parameter is used and passed as a boolean, + so we can change the default value to ``False`` (instead of + ``None``) + + * ``get_ib_interface_hwaddr`` + * ``get_interface_mac`` + * ``interface_has_own_mac`` + * ``is_bond`` + * ``is_bridge`` + * ``is_physical`` + * ``is_renamed`` + * ``is_up`` + * ``is_vlan`` + * ``wait_for_physdevs`` + +* those that directly access ``/sys`` (via helpers) but may be + Linux-specific concepts or names: + + * ``get_master`` + * ``device_devid`` + * ``device_driver`` + +* those that directly use ``ip``: + + * ``_get_current_rename_info`` + + * this has non-distro-specific logic so should potentially be + refactored to use helpers on ``self`` instead of ``ip`` directly + (rather than being wholesale reimplemented in each of + ``BSDNetworking`` or ``LinuxNetworking``) + * we can also remove the ``check_downable`` argument, it's never + specified so is always ``True`` + + * ``_rename_interfaces`` + + * this has several internal helper functions which use ``ip`` + directly, and it calls ``_get_current_rename_info``. That said, + there appears to be a lot of non-distro-specific logic that could + live in a function on ``Networking``, so this will require some + careful refactoring to avoid duplicating that logic in each of + ``BSDNetworking`` and ``LinuxNetworking``. + * only the ``renames`` and ``current_info`` parameters are ever + passed in (and ``current_info`` only by tests), so we can remove + the others from the definition + + * ``EphemeralIPv4Network`` + + * this is another case where it mixes distro-specific and + non-specific functionality. Specifically, ``__init__``, + ``__enter__`` and ``__exit__`` are non-specific, and the + remaining methods are distro-specific. + * when refactoring this, the need to track ``cleanup_cmds`` likely + means that the distro-specific behaviour cannot be captured only + in the ``Networking`` class. See `this comment in PR #363`_ for + more thoughts. + +* those that implicitly use ``/sys`` via their call dependencies: + + * ``master_is_bridge_or_bond`` + + * appends to ``get_master`` return value, which is a ``/sys`` path + + * ``extract_physdevs`` + + * calls ``device_driver`` and ``device_devid`` in both + ``_version_*`` impls + + * ``apply_network_config_names`` + + * calls ``extract_physdevs`` + * there is already a ``Distro.apply_network_config_names`` which in + the default implementation calls this function; this and its BSD + subclass implementations should be refactored at the same time + * the ``strict_present`` and ``strict_busy`` parameters are never + passed, nor are they used in the function definition, so they can + be removed + + * ``get_interfaces`` + + * calls ``device_driver``, ``device_devid`` amongst others + + * ``get_ib_hwaddrs_by_interface`` + + * calls ``get_interfaces`` + +* those that may fall into the above categories, but whose use is only + related to netfailover (which relies on a Linux-specific network + driver, so is unlikely to be relevant elsewhere without a substantial + refactor; these probably only need implementing in + ``LinuxNetworking``): + + * ``get_dev_features`` + + * ``has_netfail_standby_feature`` + + * calls ``get_dev_features`` + + * ``is_netfailover`` + * ``is_netfail_master`` + + * this is called from ``generate_fallback_config`` + + * ``is_netfail_primary`` + * ``is_netfail_standby`` + + * N.B. all of these take an optional ``driver`` argument which is + used to pass around a value to avoid having to look it up by + calling ``device_driver`` every time. This is something of a leaky + abstraction, and is better served by caching on ``device_driver`` + or storing the cached value on ``self``, so we can drop the + parameter from the new API. + +* those that use ``/sys`` (via helpers) and have non-exhaustive BSD + logic: + + * ``get_devicelist`` + +* those that already have separate Linux/BSD implementations: + + * ``find_fallback_nic`` + * ``get_interfaces_by_mac`` + +* those that have no OS-specific functionality (so do not need to be + refactored): + + * ``ParserError`` + * ``RendererNotFoundError`` + * ``has_url_connectivity`` + * ``is_ip_address`` + * ``is_ipv4_address`` + * ``natural_sort_key`` + +Note that the functions in ``cloudinit.net`` use inconsistent parameter +names for "string that contains a device name"; we can standardise on +``devname`` (the most common one) in the refactor. + +Managing Work/Tracking Progress +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To ensure that we won't have multiple people working on the same part +of the refactor at the same time, there is a bug for each function. +You can see the current status by looking at `the list of bugs tagged +net-refactor`_. + +When you're working on refactoring a particular method, ensure that you +have assigned yourself to the corresponding bug, to avoid duplicate +work. + +Generally, when considering what to pick up to refactor, it is best to +start with functions in ``cloudinit.net`` which are not called by +anything else in ``cloudinit.net``. This allows you to focus only on +refactoring that function and its callsites, rather than having to +update the other ``cloudinit.net`` function also. + +References +~~~~~~~~~~ + +* `Mina Galić's email the the cloud-init ML in 2018`_ (plus its thread) +* `Mina Galić's email to the cloud-init ML in 2019`_ (plus its thread) +* `PR #363`_, the discussion which prompted finally starting this + refactor (and where a lot of the above details were hashed out) + +.. _get_interfaces_by_mac: https://github.com/canonical/cloud-init/blob/961239749106daead88da483e7319e9268c67cde/cloudinit/net/__init__.py#L810-L818 +.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html +.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html +.. _PR #363: https://github.com/canonical/cloud-init/pull/363 +.. _this comment in PR #363: https://github.com/canonical/cloud-init/pull/363#issuecomment-628829489 +.. _abstractmethod: https://docs.python.org/3/library/abc.html#abc.abstractmethod +.. _type aliases: https://docs.python.org/3/library/typing.html#type-aliases +.. _the list of bugs tagged net-refactor: https://bugs.launchpad.net/cloud-init/+bugs?field.tag=net-refactor diff --git a/Makefile b/Makefile index 315e6b4..5fb0fcb 100644 --- a/Makefile +++ b/Makefile @@ -1,40 +1,22 @@ CWD=$(shell pwd) -PYVER ?= $(shell for p in python3 python2; do \ - out=$$(command -v $$p 2>&1) && echo $$p && exit; done; exit 1) - -noseopts ?= -v YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f ) YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) -PIP_INSTALL := pip install - -ifeq ($(PYVER),python3) - pyflakes = pyflakes3 - unittests = unittest3 - yaml = yaml -else -ifeq ($(PYVER),python2) - pyflakes = pyflakes - unittests = unittest -else - pyflakes = pyflakes pyflakes3 - unittests = unittest unittest3 -endif -endif +PYTHON = python3 +PIP_INSTALL := pip3 install ifeq ($(distro),) distro = redhat endif -READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \ - echo read-version-failed) -CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())") +READ_VERSION=$(shell $(PYTHON) $(CWD)/tools/read-version || echo read-version-failed) +CODE_VERSION=$(shell $(PYTHON) -c "from cloudinit import version; print(version.version_string())") all: check -check: check_version test $(yaml) +check: check_version test yaml style-check: pep8 $(pyflakes) @@ -44,20 +26,14 @@ pep8: pyflakes: @$(CWD)/tools/run-pyflakes -pyflakes3: - @$(CWD)/tools/run-pyflakes3 - unittest: clean_pyc - nosetests $(noseopts) tests/unittests cloudinit - -unittest3: clean_pyc - nosetests3 $(noseopts) tests/unittests cloudinit + python3 -m pytest -v tests/unittests cloudinit ci-deps-ubuntu: - @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro + @$(PYTHON) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro ci-deps-centos: - @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --test-distro + @$(PYTHON) $(CWD)/tools/read-dependencies --distro centos --test-distro pip-requirements: @echo "Installing cloud-init dependencies..." @@ -67,7 +43,7 @@ pip-test-requirements: @echo "Installing cloud-init test dependencies..." $(PIP_INSTALL) -r "$@.txt" -q -test: $(unittests) +test: unittest check_version: @if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \ @@ -76,7 +52,7 @@ check_version: else true; fi config/cloud.cfg: - $(PYVER) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg + $(PYTHON) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg clean_pyc: @find . -type f -name "*.pyc" -delete @@ -86,30 +62,30 @@ clean: clean_pyc rm -rf doc/rtd_html .tox .coverage yaml: - @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES) + @$(PYTHON) $(CWD)/tools/validate-yaml.py $(YAML_FILES) rpm: - $(PYVER) ./packages/brpm --distro=$(distro) + $(PYTHON) ./packages/brpm --distro=$(distro) srpm: - $(PYVER) ./packages/brpm --srpm --distro=$(distro) + $(PYTHON) ./packages/brpm --srpm --distro=$(distro) deb: @which debuild || \ { echo "Missing devscripts dependency. Install with:"; \ echo sudo apt-get install devscripts; exit 1; } - $(PYVER) ./packages/bddeb + $(PYTHON) ./packages/bddeb deb-src: @which debuild || \ { echo "Missing devscripts dependency. Install with:"; \ echo sudo apt-get install devscripts; exit 1; } - $(PYVER) ./packages/bddeb -S -d + $(PYTHON) ./packages/bddeb -S -d doc: tox -e doc -.PHONY: test pyflakes pyflakes3 clean pep8 rpm srpm deb deb-src yaml +.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml .PHONY: check_version pip-test-requirements pip-requirements clean_pyc -.PHONY: unittest unittest3 style-check doc +.PHONY: unittest style-check doc diff --git a/README.md b/README.md index d648e42..435405d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # cloud-init -[![Build Status](https://travis-ci.org/canonical/cloud-init.svg?branch=master)](https://travis-ci.org/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org) +[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=master)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org) Cloud-init is the *industry standard* multi-distribution method for cross-platform cloud instance initialization. It is supported across all @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Ubuntu
SLES/openSUSE
RHEL/CentOS
Fedora
Gentoo Linux
Debian
ArchLinux
FreeBSD












| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 1f3060d..62ad51f 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -4,6 +4,7 @@ import calendar from datetime import datetime import sys +from cloudinit import subp from cloudinit import util stage_to_description = { @@ -51,7 +52,7 @@ def parse_timestamp(timestampstr): def parse_timestamp_from_date(timestampstr): - out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr]) + out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr]) timestamp = out.strip() return float(timestamp) @@ -74,8 +75,12 @@ def parse_ci_logline(line): # # 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \ # 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds. + # + # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \ + # init-local/check-cache: attempting to read from cache [check] - separators = [' - ', ' [CLOUDINIT] '] + amazon_linux_2_sep = ' cloud-init[' + separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep] found = False for sep in separators: if sep in line: @@ -98,7 +103,14 @@ def parse_ci_logline(line): hostname = extra.split()[-1] else: hostname = timehost.split()[-1] - timestampstr = timehost.split(hostname)[0].strip() + if sep == amazon_linux_2_sep: + # This is an Amazon Linux style line, with no hostname and a PID. + # Use the whole of timehost as timestampstr, and strip off the PID + # from the start of eventstr. + timestampstr = timehost.strip() + eventstr = eventstr.split(maxsplit=1)[1] + else: + timestampstr = timehost.split(hostname)[0].strip() if 'Cloud-init v.' in eventstr: event_type = 'start' if 'running' in eventstr: diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index fb152b1..01a4d3e 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -11,31 +11,29 @@ import os import time import sys +from cloudinit import subp from cloudinit import util from cloudinit.distros import uses_systemd -# An event: -''' -{ - "description": "executing late commands", - "event_type": "start", - "level": "INFO", - "name": "cmd-install/stage-late" - "origin": "cloudinit", - "timestamp": 1461164249.1590767, -}, - - { - "description": "executing late commands", - "event_type": "finish", - "level": "INFO", - "name": "cmd-install/stage-late", - "origin": "cloudinit", - "result": "SUCCESS", - "timestamp": 1461164249.1590767 - } - -''' +# Example events: +# { +# "description": "executing late commands", +# "event_type": "start", +# "level": "INFO", +# "name": "cmd-install/stage-late" +# "origin": "cloudinit", +# "timestamp": 1461164249.1590767, +# } +# { +# "description": "executing late commands", +# "event_type": "finish", +# "level": "INFO", +# "name": "cmd-install/stage-late", +# "origin": "cloudinit", +# "result": "SUCCESS", +# "timestamp": 1461164249.1590767 +# } + format_key = { '%d': 'delta', '%D': 'description', @@ -155,7 +153,7 @@ class SystemctlReader(object): :return: whether the subp call failed or not ''' try: - value, err = util.subp(self.args, capture=True) + value, err = subp.subp(self.args, capture=True) if err: return err self.epoch = value @@ -215,7 +213,7 @@ def gather_timestamps_using_dmesg(): with gather_timestamps_using_systemd ''' try: - data, _ = util.subp(['dmesg'], capture=True) + data, _ = subp.subp(['dmesg'], capture=True) split_entries = data[0].splitlines() for i in split_entries: if i.decode('UTF-8').find('user') != -1: @@ -269,7 +267,7 @@ def gather_timestamps_using_systemd(): except OSError as err: raise RuntimeError('Could not determine container boot ' 'time from /proc/1/cmdline. ({})' - .format(err)) + .format(err)) from err status = CONTAINER_CODE else: status = FAIL_CODE diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py index 706e2cc..f69423c 100644 --- a/cloudinit/analyze/tests/test_boot.py +++ b/cloudinit/analyze/tests/test_boot.py @@ -12,20 +12,20 @@ class TestDistroChecker(CiTestCase): @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '', ''), 'system': ''}) - @mock.patch('platform.linux_distribution', return_value=('', '', '')) + @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', '')) @mock.patch('cloudinit.util.is_FreeBSD', return_value=False) - def test_blank_distro(self, m_sys_info, m_linux_distribution, m_free_bsd): + def test_blank_distro(self, m_sys_info, m_get_linux_distro, m_free_bsd): self.assertEqual(err_code, dist_check_timestamp()) @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '', '')}) - @mock.patch('platform.linux_distribution', return_value=('', '', '')) + @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', '')) @mock.patch('cloudinit.util.is_FreeBSD', return_value=True) def test_freebsd_gentoo_cant_find(self, m_sys_info, - m_linux_distribution, m_is_FreeBSD): + m_get_linux_distro, m_is_FreeBSD): self.assertEqual(err_code, dist_check_timestamp()) - @mock.patch('cloudinit.util.subp', return_value=(0, 1)) + @mock.patch('cloudinit.subp.subp', return_value=(0, 1)) def test_subp_fails(self, m_subp): self.assertEqual(err_code, dist_check_timestamp()) @@ -42,7 +42,7 @@ class TestSystemCtlReader(CiTestCase): with self.assertRaises(RuntimeError): reader.parse_epoch_as_float() - @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None)) + @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) def test_systemctl_works_correctly_threshold(self, m_subp): reader = SystemctlReader('dummyProperty', 'dummyParameter') self.assertEqual(1.0, reader.parse_epoch_as_float()) @@ -50,12 +50,12 @@ class TestSystemCtlReader(CiTestCase): self.assertTrue(thresh < 1e-6) self.assertTrue(thresh > (-1 * 1e-6)) - @mock.patch('cloudinit.util.subp', return_value=('U=0', None)) + @mock.patch('cloudinit.subp.subp', return_value=('U=0', None)) def test_systemctl_succeed_zero(self, m_subp): reader = SystemctlReader('dummyProperty', 'dummyParameter') self.assertEqual(0.0, reader.parse_epoch_as_float()) - @mock.patch('cloudinit.util.subp', return_value=('U=1', None)) + @mock.patch('cloudinit.subp.subp', return_value=('U=1', None)) def test_systemctl_succeed_distinct(self, m_subp): reader = SystemctlReader('dummyProperty', 'dummyParameter') val1 = reader.parse_epoch_as_float() @@ -64,13 +64,13 @@ class TestSystemCtlReader(CiTestCase): val2 = reader2.parse_epoch_as_float() self.assertNotEqual(val1, val2) - @mock.patch('cloudinit.util.subp', return_value=('100', None)) + @mock.patch('cloudinit.subp.subp', return_value=('100', None)) def test_systemctl_epoch_not_splittable(self, m_subp): reader = SystemctlReader('dummyProperty', 'dummyParameter') with self.assertRaises(IndexError): reader.parse_epoch_as_float() - @mock.patch('cloudinit.util.subp', return_value=('U=foobar', None)) + @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None)) def test_systemctl_cannot_convert_epoch_to_float(self, m_subp): reader = SystemctlReader('dummyProperty', 'dummyParameter') with self.assertRaises(ValueError): @@ -130,7 +130,7 @@ class TestAnalyzeBoot(CiTestCase): self.assertEqual(err_string, data) @mock.patch("cloudinit.util.is_container", return_value=True) - @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None)) + @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) def test_container_no_ci_log_line(self, m_is_container, m_subp): path = os.path.dirname(os.path.abspath(__file__)) log_path = path + '/boot-test.log' @@ -148,7 +148,7 @@ class TestAnalyzeBoot(CiTestCase): self.assertEqual(FAIL_CODE, finish_code) @mock.patch("cloudinit.util.is_container", return_value=True) - @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None)) + @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{ 'name': 'init-local', 'description': 'starting search', 'timestamp': 100000}]) diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py index db2a667..dac1efb 100644 --- a/cloudinit/analyze/tests/test_dump.py +++ b/cloudinit/analyze/tests/test_dump.py @@ -5,7 +5,8 @@ from textwrap import dedent from cloudinit.analyze.dump import ( dump_events, parse_ci_logline, parse_timestamp) -from cloudinit.util import which, write_file +from cloudinit.util import write_file +from cloudinit.subp import which from cloudinit.tests.helpers import CiTestCase, mock, skipIf @@ -119,6 +120,23 @@ class TestParseCILogLine(CiTestCase): m_parse_from_date.assert_has_calls( [mock.call("2016-08-30 21:53:25.972325+00:00")]) + def test_parse_logline_returns_event_for_amazon_linux_2_line(self): + line = ( + "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:" + " init-local/check-cache: attempting to read from cache [check]") + # Generate the expected value using `datetime`, so that TZ + # determination is consistent with the code under test. + timestamp_dt = datetime.strptime( + "Apr 30 19:39:11", "%b %d %H:%M:%S" + ).replace(year=datetime.now().year) + expected = { + 'description': 'attempting to read from cache [check]', + 'event_type': 'start', + 'name': 'init-local/check-cache', + 'origin': 'cloudinit', + 'timestamp': timestamp_dt.timestamp()} + self.assertEqual(expected, parse_ci_logline(line)) + SAMPLE_LOGS = dedent("""\ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 1f2c2e7..9bded16 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -36,6 +36,7 @@ KNOWN_CLOUD_NAMES = [ 'OVF', 'RbxCloud - (HyperOne, Rootbox, Rubikon)', 'OpenTelekomCloud', + 'SAP Converged Cloud', 'Scaleway', 'SmartOS', 'VMware', diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 1f61faa..485ff92 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -11,10 +11,10 @@ LOG = logging.getLogger(__name__) def write_file(filename, content, mode=_DEF_PERMS, - omode="wb", copy_mode=False): + omode="wb", preserve_mode=False): # open filename in mode 'omode', write content, set permissions to 'mode' - if copy_mode: + if preserve_mode: try: file_stat = os.stat(filename) mode = stat.S_IMODE(file_stat.st_mode) diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 30e49de..928a8ee 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,9 +10,8 @@ import os import sys from cloudinit.stages import Init -from cloudinit.util import ( - ProcessExecutionError, del_dir, del_file, get_config_logfiles, - is_link, subp) +from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link) def error(msg): diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 4c086b5..51c61cc 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -12,8 +12,8 @@ import sys from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE from cloudinit.temp_utils import tempdir -from cloudinit.util import ( - ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file) +from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.util import (chdir, copy, ensure_dir, write_file) CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py new file mode 100755 index 0000000..4e6a577 --- /dev/null +++ b/cloudinit/cmd/devel/make_mime.py @@ -0,0 +1,114 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Generate multi-part mime messages for user-data """ + +import argparse +import sys +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +from cloudinit import log +from cloudinit.handlers import INCLUSION_TYPES_MAP +from . import addLogHandlerCLI + +NAME = 'make-mime' +LOG = log.getLogger(NAME) +EPILOG = ("Example: make-mime -a config.yaml:cloud-config " + "-a script.sh:x-shellscript > user-data") + + +def file_content_type(text): + """ Return file content type by reading the first line of the input. """ + try: + filename, content_type = text.split(":", 1) + return (open(filename, 'r'), filename, content_type.strip()) + except ValueError as e: + raise argparse.ArgumentError( + text, "Invalid value for %r" % (text) + ) from e + + +def get_parser(parser=None): + """Build or extend and arg parser for make-mime utility. + + @param parser: Optional existing ArgumentParser instance representing the + subcommand which will be extended to support the args of this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser() + # update the parser's doc and add an epilog to show an example + parser.description = __doc__ + parser.epilog = EPILOG + parser.add_argument("-a", "--attach", dest="files", type=file_content_type, + action='append', default=[], + metavar=":", + help=("attach the given file as the specified " + "content-type")) + parser.add_argument('-l', '--list-types', action='store_true', + default=False, + help='List support cloud-init content types.') + parser.add_argument('-f', '--force', action='store_true', + default=False, + help='Ignore unknown content-type warnings') + return parser + + +def get_content_types(strip_prefix=False): + """ Return a list of cloud-init supported content types. Optionally + strip out the leading 'text/' of the type if strip_prefix=True. + """ + return sorted([ctype.replace("text/", "") if strip_prefix else ctype + for ctype in INCLUSION_TYPES_MAP.values()]) + + +def handle_args(name, args): + """Create a multi-part MIME archive for use as user-data. Optionally + print out the list of supported content types of cloud-init. + + Also setup CLI log handlers to report to stderr since this is a development + utility which should be run by a human on the CLI. + + @return 0 on success, 1 on failure. + """ + addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) + if args.list_types: + print("\n".join(get_content_types(strip_prefix=True))) + return 0 + + sub_messages = [] + errors = [] + for i, (fh, filename, format_type) in enumerate(args.files): + contents = fh.read() + sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) + sub_message.add_header('Content-Disposition', + 'attachment; filename="%s"' % (filename)) + content_type = sub_message.get_content_type().lower() + if content_type not in get_content_types(): + level = "WARNING" if args.force else "ERROR" + msg = (level + ": content type %r for attachment %s " + "may be incorrect!") % (content_type, i + 1) + sys.stderr.write(msg + '\n') + errors.append(msg) + sub_messages.append(sub_message) + if len(errors) and not args.force: + sys.stderr.write("Invalid content-types, override with --force\n") + return 1 + combined_message = MIMEMultipart() + for msg in sub_messages: + combined_message.attach(msg) + print(combined_message) + return 0 + + +def main(): + args = get_parser().parse_args() + return(handle_args(NAME, args)) + + +if __name__ == '__main__': + sys.exit(main()) + + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 2d27a76..80d217c 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -95,7 +95,7 @@ def handle_args(name, args): ns = network_state.parse_net_config_data(pre_ns) if not ns: raise RuntimeError("No valid network_state object created from" - "input data") + " input data") if args.debug: sys.stderr.write('\n'.join( diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index 99a234c..1a3c46a 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -9,6 +9,7 @@ from cloudinit.config import schema from . import net_convert from . import render +from . import make_mime def get_parser(parser=None): @@ -25,7 +26,9 @@ def get_parser(parser=None): (net_convert.NAME, net_convert.__doc__, net_convert.get_parser, net_convert.handle_args), (render.NAME, render.__doc__, - render.get_parser, render.handle_args) + render.get_parser, render.handle_args), + (make_mime.NAME, make_mime.__doc__, + make_mime.get_parser, make_mime.handle_args), ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1bc2240..1090aa1 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -57,8 +57,9 @@ def handle_args(name, args): paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn) + 'Missing root-readable %s. Using redacted %s instead.', + instance_data_fn, redacted_data_fn + ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py index 4951797..ddfd58e 100644 --- a/cloudinit/cmd/devel/tests/test_logs.py +++ b/cloudinit/cmd/devel/tests/test_logs.py @@ -2,13 +2,14 @@ from datetime import datetime import os -from six import StringIO +from io import StringIO from cloudinit.cmd.devel import logs from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE from cloudinit.tests.helpers import ( FilesystemMockingTestCase, mock, wrap_and_call) -from cloudinit.util import ensure_dir, load_file, subp, write_file +from cloudinit.subp import subp +from cloudinit.util import ensure_dir, load_file, write_file @mock.patch('cloudinit.cmd.devel.logs.os.getuid') diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py index 988bba0..a7fcf2c 100644 --- a/cloudinit/cmd/devel/tests/test_render.py +++ b/cloudinit/cmd/devel/tests/test_render.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO import os +from io import StringIO from collections import namedtuple from cloudinit.cmd.devel import render diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index 1d888b9..07db955 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -1,11 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. -"""Query standardized instance metadata from the command line.""" +"""Query standardized instance metadata provided to machine, returning a JSON +structure. + +Some instance-data values may be binary on some platforms, such as userdata and +vendordata. Attempt to decompress and decode UTF-8 any binary values. + +Any binary values in the instance metadata will be base64-encoded and prefixed +with "ci-b64:" in the output. userdata and, where applicable, vendordata may +be provided to the machine gzip-compressed (and therefore as binary data). +query will attempt to decompress these to a string before emitting the JSON +output; if this fails, they are treated as binary. +""" import argparse from errno import EACCES import os -import six import sys from cloudinit.handlers.jinja_template import ( @@ -31,7 +41,7 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog=NAME, description='Query cloud-init instance data') + prog=NAME, description=__doc__) parser.add_argument( '-d', '--debug', action='store_true', default=False, help='Add verbose messages during template render') @@ -53,8 +63,10 @@ def get_parser(parser=None): ' /var/lib/cloud/instance/vendor-data.txt')) parser.add_argument( 'varname', type=str, nargs='?', - help=('A dot-delimited instance data variable to query from' - ' instance-data query. For example: v2.local_hostname')) + help=('A dot-delimited specific variable to query from' + ' instance-data. For example: v1.local_hostname. If the' + ' value is not JSON serializable, it will be base64-encoded and' + ' will contain the prefix "ci-b64:". ')) parser.add_argument( '-a', '--all', action='store_true', default=False, dest='dump_all', help='Dump all available instance-data') @@ -66,6 +78,21 @@ def get_parser(parser=None): return parser +def load_userdata(ud_file_path): + """Attempt to return a string of user-data from ud_file_path + + Attempt to decode or decompress if needed. + If unable to decode the content, raw bytes will be returned. + + @returns: String of uncompressed userdata if possible, otherwise bytes. + """ + bdata = util.load_file(ud_file_path, decode=False) + try: + return bdata.decode('utf-8') + except UnicodeDecodeError: + return util.decomp_gzip(bdata, quiet=False, decode=True) + + def handle_args(name, args): """Handle calls to 'cloud-init query' as a subcommand.""" paths = None @@ -91,8 +118,9 @@ def handle_args(name, args): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn) + 'Missing root-readable %s. Using redacted %s instead.', + sensitive_data_fn, redacted_data_fn + ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn @@ -121,8 +149,8 @@ def handle_args(name, args): instance_data['vendordata'] = ( '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn)) else: - instance_data['userdata'] = util.load_file(user_data_fn) - instance_data['vendordata'] = util.load_file(vendor_data_fn) + instance_data['userdata'] = load_userdata(user_data_fn) + instance_data['vendordata'] = load_userdata(vendor_data_fn) if args.format: payload = '## template: jinja\n{fmt}'.format(fmt=args.format) rendered_payload = render_jinja_payload( @@ -149,7 +177,7 @@ def handle_args(name, args): response = '\n'.join(sorted(response.keys())) elif args.list_keys: response = '\n'.join(sorted(response.keys())) - if not isinstance(response, six.string_types): + if not isinstance(response, str): response = util.json_dumps(response) print(response) return 0 diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index f092ab3..a848a81 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -5,7 +5,7 @@ from cloudinit.util import ensure_dir, sym_link, write_file from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock from collections import namedtuple import os -from six import StringIO +from io import StringIO mypaths = namedtuple('MyPaths', 'cloud_dir') @@ -167,7 +167,6 @@ class TestClean(CiTestCase): wrap_and_call( 'cloudinit.cmd.clean', {'Init': {'side_effect': self.init_class}, - 'sys.exit': {'side_effect': self.sys_exit}, 'sys.argv': {'new': ['clean', '--logs']}}, clean.main) diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py index 7373817..3f3727f 100644 --- a/cloudinit/cmd/tests/test_cloud_id.py +++ b/cloudinit/cmd/tests/test_cloud_id.py @@ -4,7 +4,7 @@ from cloudinit import util from collections import namedtuple -from six import StringIO +from io import StringIO from cloudinit.cmd import cloud_id diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 57b8fdf..585b3b0 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -3,7 +3,7 @@ from collections import namedtuple import copy import os -from six import StringIO +from io import StringIO from cloudinit.cmd import main from cloudinit import safeyaml @@ -18,8 +18,6 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') class TestMain(FilesystemMockingTestCase): - with_logs = True - def setUp(self): super(TestMain, self).setUp() self.new_root = self.tmp_dir() diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py index c48605a..c258d32 100644 --- a/cloudinit/cmd/tests/test_query.py +++ b/cloudinit/cmd/tests/test_query.py @@ -1,195 +1,260 @@ # This file is part of cloud-init. See LICENSE file for license information. import errno -from six import StringIO +import gzip +from io import BytesIO +import json from textwrap import dedent -import os + +import pytest from collections import namedtuple from cloudinit.cmd import query from cloudinit.helpers import Paths from cloudinit.sources import ( REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) -from cloudinit.tests.helpers import CiTestCase, mock -from cloudinit.util import ensure_dir, write_file +from cloudinit.tests.helpers import mock + +from cloudinit.util import b64e, write_file + +def _gzip_data(data): + with BytesIO() as iobuf: + with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp: + gzfp.write(data) + return iobuf.getvalue() -class TestQuery(CiTestCase): - with_logs = True +@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "") +class TestQuery: args = namedtuple( 'queryargs', ('debug dump_all format instance_data list_keys user_data vendor_data' ' varname')) - def setUp(self): - super(TestQuery, self).setUp() - self.tmp = self.tmp_dir() - self.instance_data = self.tmp_path('instance-data', dir=self.tmp) + def _setup_paths(self, tmpdir, ud_val=None, vd_val=None): + """Write userdata and vendordata into a tmpdir. - def test_handle_args_error_on_missing_param(self): + Return: + 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path) + """ + if ud_val: + user_data = tmpdir.join('user-data') + write_file(user_data.strpath, ud_val) + else: + user_data = None + if vd_val: + vendor_data = tmpdir.join('vendor-data') + write_file(vendor_data.strpath, vd_val) + else: + vendor_data = None + run_dir = tmpdir.join('run_dir') + run_dir.ensure_dir() + return ( + Paths({'run_dir': run_dir.strpath}), + run_dir, + user_data, + vendor_data + ) + + def test_handle_args_error_on_missing_param(self, caplog, capsys): """Error when missing required parameters and print usage.""" args = self.args( debug=False, dump_all=False, format=None, instance_data=None, list_keys=False, user_data=None, vendor_data=None, varname=None) - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(1, query.handle_args('anyname', args)) + with mock.patch( + "cloudinit.cmd.query.addLogHandlerCLI", return_value="" + ) as m_cli_log: + assert 1 == query.handle_args('anyname', args) expected_error = ( - 'ERROR: Expected one of the options: --all, --format, --list-keys' + 'Expected one of the options: --all, --format, --list-keys' ' or varname\n') - self.assertIn(expected_error, self.logs.getvalue()) - self.assertIn('usage: query', m_stdout.getvalue()) - self.assertIn(expected_error, m_stderr.getvalue()) + assert expected_error in caplog.text + out, _err = capsys.readouterr() + assert 'usage: query' in out + assert 1 == m_cli_log.call_count - def test_handle_args_error_on_missing_instance_data(self): + def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): """When instance_data file path does not exist, log an error.""" - absent_fn = self.tmp_path('absent', dir=self.tmp) + absent_fn = tmpdir.join('absent') args = self.args( - debug=False, dump_all=True, format=None, instance_data=absent_fn, + debug=False, dump_all=True, format=None, + instance_data=absent_fn.strpath, list_keys=False, user_data='ud', vendor_data='vd', varname=None) - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - self.assertEqual(1, query.handle_args('anyname', args)) - self.assertIn( - 'ERROR: Missing instance-data file: %s' % absent_fn, - self.logs.getvalue()) - self.assertIn( - 'ERROR: Missing instance-data file: %s' % absent_fn, - m_stderr.getvalue()) + assert 1 == query.handle_args('anyname', args) - def test_handle_args_error_when_no_read_permission_instance_data(self): + msg = 'Missing instance-data file: %s' % absent_fn + assert msg in caplog.text + + def test_handle_args_error_when_no_read_permission_instance_data( + self, caplog, tmpdir + ): """When instance_data file is unreadable, log an error.""" - noread_fn = self.tmp_path('unreadable', dir=self.tmp) - write_file(noread_fn, 'thou shall not pass') + noread_fn = tmpdir.join('unreadable') + noread_fn.write('thou shall not pass') args = self.args( - debug=False, dump_all=True, format=None, instance_data=noread_fn, + debug=False, dump_all=True, format=None, + instance_data=noread_fn.strpath, list_keys=False, user_data='ud', vendor_data='vd', varname=None) - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: - m_load.side_effect = OSError(errno.EACCES, 'Not allowed') - self.assertEqual(1, query.handle_args('anyname', args)) - self.assertIn( - "ERROR: No read permission on '%s'. Try sudo" % noread_fn, - self.logs.getvalue()) - self.assertIn( - "ERROR: No read permission on '%s'. Try sudo" % noread_fn, - m_stderr.getvalue()) + with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: + m_load.side_effect = OSError(errno.EACCES, 'Not allowed') + assert 1 == query.handle_args('anyname', args) + msg = "No read permission on '%s'. Try sudo" % noread_fn + assert msg in caplog.text - def test_handle_args_defaults_instance_data(self): + def test_handle_args_defaults_instance_data(self, caplog, tmpdir): """When no instance_data argument, default to configured run_dir.""" args = self.args( debug=False, dump_all=True, format=None, instance_data=None, list_keys=False, user_data=None, vendor_data=None, varname=None) - run_dir = self.tmp_path('run_dir', dir=self.tmp) - ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') - self.m_paths.return_value = paths - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - self.assertEqual(1, query.handle_args('anyname', args)) - json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) - self.assertIn( - 'ERROR: Missing instance-data file: %s' % json_file, - self.logs.getvalue()) - self.assertIn( - 'ERROR: Missing instance-data file: %s' % json_file, - m_stderr.getvalue()) + paths, run_dir, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + assert 1 == query.handle_args('anyname', args) + json_file = run_dir.join(INSTANCE_JSON_FILE) + msg = 'Missing instance-data file: %s' % json_file.strpath + assert msg in caplog.text - def test_handle_args_root_fallsback_to_instance_data(self): + def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir): """When no instance_data argument, root falls back to redacted json.""" args = self.args( debug=False, dump_all=True, format=None, instance_data=None, list_keys=False, user_data=None, vendor_data=None, varname=None) - run_dir = self.tmp_path('run_dir', dir=self.tmp) - ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') - self.m_paths.return_value = paths - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + paths, run_dir, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths with mock.patch('os.getuid') as m_getuid: m_getuid.return_value = 0 - self.assertEqual(1, query.handle_args('anyname', args)) - json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) - sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) - self.assertIn( - 'WARNING: Missing root-readable %s. Using redacted %s instead.' % ( - sensitive_file, json_file), - m_stderr.getvalue()) + assert 1 == query.handle_args('anyname', args) + json_file = run_dir.join(INSTANCE_JSON_FILE) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + msg = ( + 'Missing root-readable %s. Using redacted %s instead.' % + ( + sensitive_file.strpath, json_file.strpath + ) + ) + assert msg in caplog.text - def test_handle_args_root_uses_instance_sensitive_data(self): - """When no instance_data argument, root uses semsitive json.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - vendor_data = self.tmp_path('vendor-data', dir=self.tmp) - write_file(user_data, 'ud') - write_file(vendor_data, 'vd') - run_dir = self.tmp_path('run_dir', dir=self.tmp) - sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) - write_file(sensitive_file, '{"my-var": "it worked"}') - ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths') - self.m_paths.return_value = paths + @pytest.mark.parametrize( + 'ud_src,ud_expected,vd_src,vd_expected', + ( + ('hi mom', 'hi mom', 'hi pops', 'hi pops'), + ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'), + (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'), + (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'), + ) + ) + def test_handle_args_root_processes_user_data( + self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir + ): + """Support reading multiple user-data file content types""" + paths, run_dir, user_data, vendor_data = self._setup_paths( + tmpdir, ud_val=ud_src, vd_val=vd_src + ) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + sensitive_file.write('{"my-var": "it worked"}') args = self.args( debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=vendor_data, vendor_data=vendor_data, - varname=None) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + list_keys=False, user_data=user_data.strpath, + vendor_data=vendor_data.strpath, varname=None) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths with mock.patch('os.getuid') as m_getuid: m_getuid.return_value = 0 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual( - '{\n "my_var": "it worked",\n "userdata": "vd",\n ' - '"vendordata": "vd"\n}\n', m_stdout.getvalue()) + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + cmd_output = json.loads(out) + assert "it worked" == cmd_output['my_var'] + if ud_expected == "ci-b64:": + ud_expected = "ci-b64:{}".format(b64e(ud_src)) + if vd_expected == "ci-b64:": + vd_expected = "ci-b64:{}".format(b64e(vd_src)) + assert ud_expected == cmd_output['userdata'] + assert vd_expected == cmd_output['vendordata'] - def test_handle_args_dumps_all_instance_data(self): + def test_handle_args_root_uses_instance_sensitive_data( + self, capsys, tmpdir + ): + """When no instance_data argument, root uses sensitive json.""" + paths, run_dir, user_data, vendor_data = self._setup_paths( + tmpdir, ud_val='ud', vd_val='vd' + ) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + sensitive_file.write('{"my-var": "it worked"}') + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=user_data.strpath, + vendor_data=vendor_data.strpath, varname=None) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + assert 0 == query.handle_args('anyname', args) + expected = ( + '{\n "my_var": "it worked",\n "userdata": "ud",\n ' + '"vendordata": "vd"\n}\n' + ) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir): """When --all is specified query will dump all instance data vars.""" - write_file(self.instance_data, '{"my-var": "it worked"}') + instance_data = tmpdir.join('instance-data') + instance_data.write('{"my-var": "it worked"}') args = self.args( debug=False, dump_all=True, format=None, - instance_data=self.instance_data, list_keys=False, + instance_data=instance_data.strpath, list_keys=False, user_data='ud', vendor_data='vd', varname=None) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual( + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + expected = ( '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n' ' "vendordata": "<%s> file:vd"\n}\n' % ( - REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE), - m_stdout.getvalue()) + REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE + ) + ) + out, _err = capsys.readouterr() + assert expected == out - def test_handle_args_returns_top_level_varname(self): + def test_handle_args_returns_top_level_varname(self, capsys, tmpdir): """When the argument varname is passed, report its value.""" - write_file(self.instance_data, '{"my-var": "it worked"}') + instance_data = tmpdir.join('instance-data') + instance_data.write('{"my-var": "it worked"}') args = self.args( debug=False, dump_all=True, format=None, - instance_data=self.instance_data, list_keys=False, + instance_data=instance_data.strpath, list_keys=False, user_data='ud', vendor_data='vd', varname='my_var') - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual('it worked\n', m_stdout.getvalue()) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert 'it worked\n' == out - def test_handle_args_returns_nested_varname(self): + def test_handle_args_returns_nested_varname(self, capsys, tmpdir): """If user_data file is a jinja template render instance-data vars.""" - write_file(self.instance_data, - '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}') + instance_data = tmpdir.join('instance-data') + instance_data.write( + '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}' + ) args = self.args( debug=False, dump_all=False, format=None, - instance_data=self.instance_data, user_data='ud', vendor_data='vd', - list_keys=False, varname='v1.key_2') - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual('value-2\n', m_stdout.getvalue()) + instance_data=instance_data.strpath, user_data='ud', + vendor_data='vd', list_keys=False, varname='v1.key_2') + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert 'value-2\n' == out - def test_handle_args_returns_standardized_vars_to_top_level_aliases(self): + def test_handle_args_returns_standardized_vars_to_top_level_aliases( + self, capsys, tmpdir + ): """Any standardized vars under v# are promoted as top-level aliases.""" - write_file( - self.instance_data, + instance_data = tmpdir.join('instance-data') + instance_data.write( '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' ' "top": "gun"}') expected = dedent("""\ @@ -209,65 +274,68 @@ class TestQuery(CiTestCase): """) args = self.args( debug=False, dump_all=True, format=None, - instance_data=self.instance_data, user_data='ud', vendor_data='vd', - list_keys=False, varname=None) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual(expected, m_stdout.getvalue()) + instance_data=instance_data.strpath, user_data='ud', + vendor_data='vd', list_keys=False, varname=None) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out - def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self): + def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname( + self, capsys, tmpdir + ): """Sort all top-level keys when only --list-keys provided.""" - write_file( - self.instance_data, + instance_data = tmpdir.join('instance-data') + instance_data.write( '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' ' "top": "gun"}') expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n' args = self.args( debug=False, dump_all=False, format=None, - instance_data=self.instance_data, list_keys=True, user_data='ud', - vendor_data='vd', varname=None) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual(expected, m_stdout.getvalue()) + instance_data=instance_data.strpath, list_keys=True, + user_data='ud', vendor_data='vd', varname=None) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out - def test_handle_args_list_keys_sorts_nested_keys_when_varname(self): + def test_handle_args_list_keys_sorts_nested_keys_when_varname( + self, capsys, tmpdir + ): """Sort all nested keys of varname object when --list-keys provided.""" - write_file( - self.instance_data, + instance_data = tmpdir.join('instance-data') + instance_data.write( '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' + ' {"v2_2": "val2.2"}, "top": "gun"}') expected = 'v1_1\nv1_2\n' args = self.args( debug=False, dump_all=False, format=None, - instance_data=self.instance_data, list_keys=True, + instance_data=instance_data.strpath, list_keys=True, user_data='ud', vendor_data='vd', varname='v1') - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(0, query.handle_args('anyname', args)) - self.assertEqual(expected, m_stdout.getvalue()) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out - def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self): + def test_handle_args_list_keys_errors_when_varname_is_not_a_dict( + self, caplog, tmpdir + ): """Raise an error when --list-keys and varname specify a non-list.""" - write_file( - self.instance_data, + instance_data = tmpdir.join('instance-data') + instance_data.write( '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + '{"v2_2": "val2.2"}, "top": "gun"}') - expected_error = "ERROR: --list-keys provided but 'top' is not a dict" + expected_error = "--list-keys provided but 'top' is not a dict" args = self.args( debug=False, dump_all=False, format=None, - instance_data=self.instance_data, list_keys=True, user_data='ud', - vendor_data='vd', varname='top') - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - self.assertEqual(1, query.handle_args('anyname', args)) - self.assertEqual('', m_stdout.getvalue()) - self.assertIn(expected_error, m_stderr.getvalue()) + instance_data=instance_data.strpath, list_keys=True, + user_data='ud', vendor_data='vd', varname='top') + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 1 == query.handle_args('anyname', args) + assert expected_error in caplog.text # vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index aded858..1c9eec3 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -2,7 +2,7 @@ from collections import namedtuple import os -from six import StringIO +from io import StringIO from textwrap import dedent from cloudinit.atomic_helper import write_json @@ -382,7 +382,6 @@ class TestStatus(CiTestCase): wrap_and_call( 'cloudinit.cmd.status', {'sys.argv': {'new': ['status']}, - 'sys.exit': {'side_effect': self.sys_exit}, '_is_cloudinit_disabled': (False, ''), 'Init': {'side_effect': self.init_class}}, status.main) diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py new file mode 100644 index 0000000..84d7a0b --- /dev/null +++ b/cloudinit/config/cc_apk_configure.py @@ -0,0 +1,263 @@ +# Copyright (c) 2020 Dermot Bradley +# +# Author: Dermot Bradley +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Apk Configure: Configures apk repositories file.""" + +from textwrap import dedent + +from cloudinit import log as logging +from cloudinit import temp_utils +from cloudinit import templater +from cloudinit import util +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + + +# If no mirror is specified then use this one +DEFAULT_MIRROR = "https://alpine.global.ssl.fastly.net/alpine" + + +REPOSITORIES_TEMPLATE = """\ +## template:jinja +# +# Created by cloud-init +# +# This file is written on first boot of an instance +# + +{{ alpine_baseurl }}/{{ alpine_version }}/main +{% if community_enabled -%} +{{ alpine_baseurl }}/{{ alpine_version }}/community +{% endif -%} +{% if testing_enabled -%} +{% if alpine_version != 'edge' %} +# +# Testing - using with non-Edge installation may cause problems! +# +{% endif %} +{{ alpine_baseurl }}/edge/testing +{% endif %} +{% if local_repo != '' %} + +# +# Local repo +# +{{ local_repo }}/{{ alpine_version }} +{% endif %} + +""" + + +frequency = PER_INSTANCE +distros = ['alpine'] +schema = { + 'id': 'cc_apk_configure', + 'name': 'APK Configure', + 'title': 'Configure apk repositories file', + 'description': dedent("""\ + This module handles configuration of the /etc/apk/repositories file. + + .. note:: + To ensure that apk configuration is valid yaml, any strings + containing special characters, especially ``:`` should be quoted. + """), + 'distros': distros, + 'examples': [ + dedent("""\ + # Keep the existing /etc/apk/repositories file unaltered. + apk_repos: + preserve_repositories: true + """), + dedent("""\ + # Create repositories file for Alpine v3.12 main and community + # using default mirror site. + apk_repos: + alpine_repo: + community_enabled: true + version: 'v3.12' + """), + dedent("""\ + # Create repositories file for Alpine Edge main, community, and + # testing using a specified mirror site and also a local repo. + apk_repos: + alpine_repo: + base_url: 'https://some-alpine-mirror/alpine' + community_enabled: true + testing_enabled: true + version: 'edge' + local_repo_base_url: 'https://my-local-server/local-alpine' + """), + ], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'apk_repos': { + 'type': 'object', + 'properties': { + 'preserve_repositories': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + By default, cloud-init will generate a new repositories + file ``/etc/apk/repositories`` based on any valid + configuration settings specified within a apk_repos + section of cloud config. To disable this behavior and + preserve the repositories file from the pristine image, + set ``preserve_repositories`` to ``true``. + + The ``preserve_repositories`` option overrides + all other config keys that would alter + ``/etc/apk/repositories``. + """) + }, + 'alpine_repo': { + 'type': ['object', 'null'], + 'properties': { + 'base_url': { + 'type': 'string', + 'default': DEFAULT_MIRROR, + 'description': dedent("""\ + The base URL of an Alpine repository, or + mirror, to download official packages from. + If not specified then it defaults to ``{}`` + """.format(DEFAULT_MIRROR)) + }, + 'community_enabled': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + Whether to add the Community repo to the + repositories file. By default the Community + repo is not included. + """) + }, + 'testing_enabled': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + Whether to add the Testing repo to the + repositories file. By default the Testing + repo is not included. It is only recommended + to use the Testing repo on a machine running + the ``Edge`` version of Alpine as packages + installed from Testing may have dependancies + that conflict with those in non-Edge Main or + Community repos." + """) + }, + 'version': { + 'type': 'string', + 'description': dedent("""\ + The Alpine version to use (e.g. ``v3.12`` or + ``edge``) + """) + }, + }, + 'required': ['version'], + 'minProperties': 1, + 'additionalProperties': False, + }, + 'local_repo_base_url': { + 'type': 'string', + 'description': dedent("""\ + The base URL of an Alpine repository containing + unofficial packages + """) + } + }, + 'required': [], + 'minProperties': 1, # Either preserve_repositories or alpine_repo + 'additionalProperties': False, + } + } +} + +__doc__ = get_schema_doc(schema) + + +def handle(name, cfg, cloud, log, _args): + """ + Call to handle apk_repos sections in cloud-config file. + + @param name: The module name "apk-configure" from cloud.cfg + @param cfg: A nested dict containing the entire cloud config contents. + @param cloud: The CloudInit object in use. + @param log: Pre-initialized Python logger object to use for logging. + @param _args: Any module arguments from cloud.cfg + """ + + # If there is no "apk_repos" section in the configuration + # then do nothing. + apk_section = cfg.get('apk_repos') + if not apk_section: + LOG.debug(("Skipping module named %s," + " no 'apk_repos' section found"), name) + return + + validate_cloudconfig_schema(cfg, schema) + + # If "preserve_repositories" is explicitly set to True in + # the configuration do nothing. + if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): + LOG.debug(("Skipping module named %s," + " 'preserve_repositories' is set"), name) + return + + # If there is no "alpine_repo" subsection of "apk_repos" present in the + # configuration then do nothing, as at least "version" is required to + # create valid repositories entries. + alpine_repo = apk_section.get('alpine_repo') + if not alpine_repo: + LOG.debug(("Skipping module named %s," + " no 'alpine_repo' configuration found"), name) + return + + # If there is no "version" value present in configuration then do nothing. + alpine_version = alpine_repo.get('version') + if not alpine_version: + LOG.debug(("Skipping module named %s," + " 'version' not specified in alpine_repo"), name) + return + + local_repo = apk_section.get('local_repo_base_url', '') + + _write_repositories_file(alpine_repo, alpine_version, local_repo) + + +def _write_repositories_file(alpine_repo, alpine_version, local_repo): + """ + Write the /etc/apk/repositories file with the specified entries. + + @param alpine_repo: A nested dict of the alpine_repo configuration. + @param alpine_version: A string of the Alpine version to use. + @param local_repo: A string containing the base URL of a local repo. + """ + + repo_file = '/etc/apk/repositories' + + alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + + params = {'alpine_baseurl': alpine_baseurl, + 'alpine_version': alpine_version, + 'community_enabled': alpine_repo.get('community_enabled'), + 'testing_enabled': alpine_repo.get('testing_enabled'), + 'local_repo': local_repo} + + tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + template_fn = tfile[1] # Filepath is second item in tuple + util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) + + LOG.debug('Generating Alpine repository configuration file: %s', + repo_file) + templater.render_to_file(template_fn, repo_file, params) + # Clean up temporary template + util.del_file(template_fn) + + +# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index f01e2aa..73d8719 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -6,228 +6,372 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Apt Configure -------------- -**Summary:** configure apt - -This module handles both configuration of apt options and adding source lists. -There are configuration options such as ``apt_get_wrapper`` and -``apt_get_command`` that control how cloud-init invokes apt-get. -These configuration options are handled on a per-distro basis, so consult -documentation for cloud-init's distro support for instructions on using -these config options. - -.. note:: - To ensure that apt configuration is valid yaml, any strings containing - special characters, especially ``:`` should be quoted. - -.. note:: - For more information about apt configuration, see the - ``Additional apt configuration`` example. - -**Preserve sources.list:** - -By default, cloud-init will generate a new sources list in -``/etc/apt/sources.list.d`` based on any changes specified in cloud config. -To disable this behavior and preserve the sources list from the pristine image, -set ``preserve_sources_list`` to ``true``. - -.. note:: - The ``preserve_sources_list`` option overrides all other config keys that - would alter ``sources.list`` or ``sources.list.d``, **except** for - additional sources to be added to ``sources.list.d``. - -**Disable source suites:** - -Entries in the sources list can be disabled using ``disable_suites``, which -takes a list of suites to be disabled. If the string ``$RELEASE`` is present in -a suite in the ``disable_suites`` list, it will be replaced with the release -name. If a suite specified in ``disable_suites`` is not present in -``sources.list`` it will be ignored. For convenience, several aliases are -provided for ``disable_suites``: - - - ``updates`` => ``$RELEASE-updates`` - - ``backports`` => ``$RELEASE-backports`` - - ``security`` => ``$RELEASE-security`` - - ``proposed`` => ``$RELEASE-proposed`` - - ``release`` => ``$RELEASE`` - -.. note:: - When a suite is disabled using ``disable_suites``, its entry in - ``sources.list`` is not deleted; it is just commented out. - -**Configure primary and security mirrors:** - -The primary and security archive mirrors can be specified using the ``primary`` -and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys -take a list of configs, allowing mirrors to be specified on a per-architecture -basis. Each config is a dictionary which must have an entry for ``arches``, -specifying which architectures that config entry is for. The keyword -``default`` applies to any architecture not explicitly listed. The mirror url -can be specified with the ``uri`` key, or a list of mirrors to check can be -provided in order, with the first mirror that can be resolved being selected. -This allows the same configuration to be used in different environment, with -different hosts used for a local apt mirror. If no mirror is provided by -``uri`` or ``search``, ``search_dns`` may be used to search for dns names in -the format ``-mirror`` in each of the following: - - - fqdn of this host per cloud metadata - - localdomain - - domains listed in ``/etc/resolv.conf`` - -If there is a dns entry for ``-mirror``, then it is assumed that there -is a distro mirror at ``http://-mirror./``. If the -``primary`` key is defined, but not the ``security`` key, then then -configuration for ``primary`` is also used for ``security``. If ``search_dns`` -is used for the ``security`` key, the search pattern will be. -``-security-mirror``. - -If no mirrors are specified, or all lookups fail, then default mirrors defined -in the datasource are used. If none are present in the datasource either the -following defaults are used: - - - primary: ``http://archive.ubuntu.com/ubuntu`` - - security: ``http://security.ubuntu.com/ubuntu`` - -**Specify sources.list template:** - -A custom template for rendering ``sources.list`` can be specefied with -``sources_list``. If no ``sources_list`` template is given, cloud-init will -use sane default. Within this template, the following strings will be replaced -with the appropriate values: - - - ``$MIRROR`` - - ``$RELEASE`` - - ``$PRIMARY`` - - ``$SECURITY`` - -**Pass configuration to apt:** - -Apt configuration can be specified using ``conf``. Configuration is specified -as a string. For multiline apt configuration, make sure to follow yaml syntax. - -**Configure apt proxy:** - -Proxy configuration for apt can be specified using ``conf``, but proxy config -keys also exist for convenience. The proxy config keys, ``http_proxy``, -``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp -and https protocols respectively. The ``proxy`` key also exists as an alias for -``http_proxy``. Proxy url is specified in the format -``://[[user][:pass]@]host[:port]/``. - -**Add apt repos by regex:** +"""Apt Configure: Configure apt for the user.""" -All source entries in ``apt-sources`` that match regex in -``add_apt_repo_match`` will be added to the system using -``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults -to ``^[\\w-]+:\\w`` - -**Add source list entries:** - -Source list entries can be specified as a dictionary under the ``sources`` -config key, with key in the dict representing a different source file. The key -of each source entry will be used as an id that can be referenced in -other config entries, as well as the filename for the source's configuration -under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, -it will be appended. If there is no configuration for a key in ``sources``, no -file will be written, but the key may still be referred to as an id in other -``sources`` entries. - -Each entry under ``sources`` is a dictionary which may contain any of the -following optional keys: - - - ``source``: a sources.list entry (some variable replacements apply) - - ``keyid``: a key to import via shortid or fingerprint - - ``key``: a raw PGP key - - ``keyserver``: alternate keyserver to pull ``keyid`` key from - -The ``source`` key supports variable replacements for the following strings: - - - ``$MIRROR`` - - ``$PRIMARY`` - - ``$SECURITY`` - - ``$RELEASE`` - -**Internal name:** ``cc_apt_configure`` +import glob +import os +import re +from textwrap import dedent -**Module frequency:** per instance +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit import gpg +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import templater +from cloudinit import util +from cloudinit.settings import PER_INSTANCE -**Supported distros:** ubuntu, debian +LOG = logging.getLogger(__name__) -**Config keys**:: +# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') +ADD_APT_REPO_MATCH = r"^[\w-]+:\w" - apt: - preserve_sources_list: - disable_suites: +frequency = PER_INSTANCE +distros = ["ubuntu", "debian"] +mirror_property = { + 'type': 'array', + 'item': { + 'type': 'object', + 'additionalProperties': False, + 'required': ['arches'], + 'properties': { + 'arches': { + 'type': 'array', + 'item': { + 'type': 'string' + }, + 'minItems': 1 + }, + 'uri': { + 'type': 'string', + 'format': 'uri' + }, + 'search': { + 'type': 'array', + 'item': { + 'type': 'string', + 'format': 'uri' + }, + 'minItems': 1 + }, + 'search_dns': { + 'type': 'boolean', + } + } + } +} +schema = { + 'id': 'cc_apt_configure', + 'name': 'Apt Configure', + 'title': 'Configure apt for the user', + 'description': dedent("""\ + This module handles both configuration of apt options and adding + source lists. There are configuration options such as + ``apt_get_wrapper`` and ``apt_get_command`` that control how + cloud-init invokes apt-get. These configuration options are + handled on a per-distro basis, so consult documentation for + cloud-init's distro support for instructions on using + these config options. + + .. note:: + To ensure that apt configuration is valid yaml, any strings + containing special characters, especially ``:`` should be quoted. + + .. note:: + For more information about apt configuration, see the + ``Additional apt configuration`` example."""), + 'distros': distros, + 'examples': [dedent("""\ + apt: + preserve_sources_list: false + disable_suites: - $RELEASE-updates - backports - $RELEASE - mysuite - primary: + primary: - arches: - amd64 - i386 - default - uri: "http://us.archive.ubuntu.com/ubuntu" + uri: 'http://us.archive.ubuntu.com/ubuntu' search: - - "http://cool.but-sometimes-unreachable.com/ubuntu" - - "http://us.archive.ubuntu.com/ubuntu" + - 'http://cool.but-sometimes-unreachable.com/ubuntu' + - 'http://us.archive.ubuntu.com/ubuntu' search_dns: - arches: - s390x - arm64 - uri: "http://archive-to-use-for-arm64.example.com/ubuntu" - security: + uri: 'http://archive-to-use-for-arm64.example.com/ubuntu' + security: - arches: - default search_dns: true - sources_list: | - deb $MIRROR $RELEASE main restricted - deb-src $MIRROR $RELEASE main restricted - deb $PRIMARY $RELEASE universe restricted - deb $SECURITY $RELEASE-security multiverse - debconf_selections: - set1: the-package the-package/some-flag boolean true - conf: | - APT { - Get { - Assume-Yes "true"; - Fix-Broken "true"; + sources_list: | + deb $MIRROR $RELEASE main restricted + deb-src $MIRROR $RELEASE main restricted + deb $PRIMARY $RELEASE universe restricted + deb $SECURITY $RELEASE-security multiverse + debconf_selections: + set1: the-package the-package/some-flag boolean true + conf: | + APT { + Get { + Assume-Yes 'true'; + Fix-Broken 'true'; + } + } + proxy: 'http://[[user][:pass]@]host[:port]/' + http_proxy: 'http://[[user][:pass]@]host[:port]/' + ftp_proxy: 'ftp://[[user][:pass]@]host[:port]/' + https_proxy: 'https://[[user][:pass]@]host[:port]/' + sources: + source1: + keyid: 'keyid' + keyserver: 'keyserverurl' + source: 'deb http:/// xenial main' + source2: + source: 'ppa:' + source3: + source: 'deb $MIRROR $RELEASE multiverse' + key: | + ------BEGIN PGP PUBLIC KEY BLOCK------- + + ------END PGP PUBLIC KEY BLOCK-------""")], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'apt': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'preserve_sources_list': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + By default, cloud-init will generate a new sources + list in ``/etc/apt/sources.list.d`` based on any + changes specified in cloud config. To disable this + behavior and preserve the sources list from the + pristine image, set ``preserve_sources_list`` + to ``true``. + + The ``preserve_sources_list`` option overrides + all other config keys that would alter + ``sources.list`` or ``sources.list.d``, + **except** for additional sources to be added + to ``sources.list.d``.""") + }, + 'disable_suites': { + 'type': 'array', + 'items': { + 'type': 'string' + }, + 'uniqueItems': True, + 'description': dedent("""\ + Entries in the sources list can be disabled using + ``disable_suites``, which takes a list of suites + to be disabled. If the string ``$RELEASE`` is + present in a suite in the ``disable_suites`` list, + it will be replaced with the release name. If a + suite specified in ``disable_suites`` is not + present in ``sources.list`` it will be ignored. + For convenience, several aliases are provided for + ``disable_suites``: + + - ``updates`` => ``$RELEASE-updates`` + - ``backports`` => ``$RELEASE-backports`` + - ``security`` => ``$RELEASE-security`` + - ``proposed`` => ``$RELEASE-proposed`` + - ``release`` => ``$RELEASE``. + + When a suite is disabled using ``disable_suites``, + its entry in ``sources.list`` is not deleted; it + is just commented out.""") + }, + 'primary': { + **mirror_property, + 'description': dedent("""\ + The primary and security archive mirrors can + be specified using the ``primary`` and + ``security`` keys, respectively. Both the + ``primary`` and ``security`` keys take a list + of configs, allowing mirrors to be specified + on a per-architecture basis. Each config is a + dictionary which must have an entry for + ``arches``, specifying which architectures + that config entry is for. The keyword + ``default`` applies to any architecture not + explicitly listed. The mirror url can be specified + with the ``uri`` key, or a list of mirrors to + check can be provided in order, with the first + mirror that can be resolved being selected. This + allows the same configuration to be used in + different environment, with different hosts used + for a local apt mirror. If no mirror is provided + by ``uri`` or ``search``, ``search_dns`` may be + used to search for dns names in the format + ``-mirror`` in each of the following: + + - fqdn of this host per cloud metadata, + - localdomain, + - domains listed in ``/etc/resolv.conf``. + + If there is a dns entry for ``-mirror``, + then it is assumed that there is a distro mirror + at ``http://-mirror./``. + If the ``primary`` key is defined, but not the + ``security`` key, then then configuration for + ``primary`` is also used for ``security``. + If ``search_dns`` is used for the ``security`` + key, the search pattern will be + ``-security-mirror``. + + If no mirrors are specified, or all lookups fail, + then default mirrors defined in the datasource + are used. If none are present in the datasource + either the following defaults are used: + + - ``primary`` => \ + ``http://archive.ubuntu.com/ubuntu``. + - ``security`` => \ + ``http://security.ubuntu.com/ubuntu`` + """)}, + 'security': { + **mirror_property, + 'description': dedent("""\ + Please refer to the primary config documentation""") + }, + 'add_apt_repo_match': { + 'type': 'string', + 'default': ADD_APT_REPO_MATCH, + 'description': dedent("""\ + All source entries in ``apt-sources`` that match + regex in ``add_apt_repo_match`` will be added to + the system using ``add-apt-repository``. If + ``add_apt_repo_match`` is not specified, it + defaults to ``{}``""".format(ADD_APT_REPO_MATCH)) + }, + 'debconf_selections': { + 'type': 'object', + 'items': {'type': 'string'}, + 'description': dedent("""\ + Debconf additional configurations can be specified as a + dictionary under the ``debconf_selections`` config + key, with each key in the dict representing a + different set of configurations. The value of each key + must be a string containing all the debconf + configurations that must be applied. We will bundle + all of the values and pass them to + ``debconf-set-selections``. Therefore, each value line + must be a valid entry for ``debconf-set-selections``, + meaning that they must possess for distinct fields: + + ``pkgname question type answer`` + + Where: + + - ``pkgname`` is the name of the package. + - ``question`` the name of the questions. + - ``type`` is the type of question. + - ``answer`` is the value used to ansert the \ + question. + + For example: \ + ``ippackage ippackage/ip string 127.0.01`` + """) + }, + 'sources_list': { + 'type': 'string', + 'description': dedent("""\ + Specifies a custom template for rendering + ``sources.list`` . If no ``sources_list`` template + is given, cloud-init will use sane default. Within + this template, the following strings will be + replaced with the appropriate values: + + - ``$MIRROR`` + - ``$RELEASE`` + - ``$PRIMARY`` + - ``$SECURITY``""") + }, + 'conf': { + 'type': 'string', + 'description': dedent("""\ + Specify configuration for apt, such as proxy + configuration. This configuration is specified as a + string. For multiline apt configuration, make sure + to follow yaml syntax.""") + }, + 'https_proxy': { + 'type': 'string', + 'description': dedent("""\ + More convenient way to specify https apt proxy. + https proxy url is specified in the format + ``https://[[user][:pass]@]host[:port]/``.""") + }, + 'http_proxy': { + 'type': 'string', + 'description': dedent("""\ + More convenient way to specify http apt proxy. + http proxy url is specified in the format + ``http://[[user][:pass]@]host[:port]/``.""") + }, + 'proxy': { + 'type': 'string', + 'description': 'Alias for defining a http apt proxy.' + }, + 'ftp_proxy': { + 'type': 'string', + 'description': dedent("""\ + More convenient way to specify ftp apt proxy. + ftp proxy url is specified in the format + ``ftp://[[user][:pass]@]host[:port]/``.""") + }, + 'sources': { + 'type': 'object', + 'items': {'type': 'string'}, + 'description': dedent("""\ + Source list entries can be specified as a + dictionary under the ``sources`` config key, with + each key in the dict representing a different source + file. The key of each source entry will be used + as an id that can be referenced in other config + entries, as well as the filename for the source's + configuration under ``/etc/apt/sources.list.d``. + If the name does not end with ``.list``, it will + be appended. If there is no configuration for a + key in ``sources``, no file will be written, but + the key may still be referred to as an id in other + ``sources`` entries. + + Each entry under ``sources`` is a dictionary which + may contain any of the following optional keys: + + - ``source``: a sources.list entry \ + (some variable replacements apply). + - ``keyid``: a key to import via shortid or \ + fingerprint. + - ``key``: a raw PGP key. + - ``keyserver``: alternate keyserver to pull \ + ``keyid`` key from. + + The ``source`` key supports variable + replacements for the following strings: + + - ``$MIRROR`` + - ``$PRIMARY`` + - ``$SECURITY`` + - ``$RELEASE``""") } } - proxy: "http://[[user][:pass]@]host[:port]/" - http_proxy: "http://[[user][:pass]@]host[:port]/" - ftp_proxy: "ftp://[[user][:pass]@]host[:port]/" - https_proxy: "https://[[user][:pass]@]host[:port]/" - sources: - source1: - keyid: "keyid" - keyserver: "keyserverurl" - source: "deb http:/// xenial main" - source2: - source: "ppa:" - source3: - source: "deb $MIRROR $RELEASE multiverse" - key: | - ------BEGIN PGP PUBLIC KEY BLOCK------- - - ------END PGP PUBLIC KEY BLOCK------- -""" - -import glob -import os -import re - -from cloudinit import gpg -from cloudinit import log as logging -from cloudinit import templater -from cloudinit import util + } + } +} -LOG = logging.getLogger(__name__) +__doc__ = get_schema_doc(schema) -# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') -ADD_APT_REPO_MATCH = r"^[\w-]+:\w" # place where apt stores cached repository data APT_LISTS = "/var/lib/apt/lists" @@ -253,7 +397,7 @@ def get_default_mirrors(arch=None, target=None): architecture, for more see: https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: - arch = util.get_architecture(target) + arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: return PRIMARY_ARCH_MIRRORS.copy() if arch in PORTS_ARCHES: @@ -279,6 +423,7 @@ def handle(name, ocfg, cloud, log, _): "Expected dictionary for 'apt' config, found {config_type}".format( config_type=type(cfg))) + validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) apply_apt(cfg, cloud, target) @@ -287,7 +432,7 @@ def _should_configure_on_empty_apt(): # if no config was provided, should apt configuration be done? if util.system_is_snappy(): return False, "system is snappy." - if not (util.which('apt-get') or util.which('apt')): + if not (subp.which('apt-get') or subp.which('apt')): return False, "no apt commands." return True, "Apt is available." @@ -303,13 +448,13 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) release = util.lsb_release(target=target)['codename'] - arch = util.get_architecture(target) + arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get('preserve_sources_list', False)): generate_sources_list(cfg, release, mirrors, cloud) - rename_apt_lists(mirrors, target) + rename_apt_lists(mirrors, target, arch) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) @@ -334,7 +479,7 @@ def apply_apt(cfg, cloud, target): def debconf_set_selections(selections, target=None): if not selections.endswith(b'\n'): selections += b'\n' - util.subp(['debconf-set-selections'], data=selections, target=target, + subp.subp(['debconf-set-selections'], data=selections, target=target, capture=True) @@ -359,7 +504,7 @@ def dpkg_reconfigure(packages, target=None): "but cannot be unconfigured: %s", unhandled) if len(to_config): - util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + + subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + list(to_config), data=None, target=target, capture=True) @@ -402,7 +547,7 @@ def apply_debconf_selections(cfg, target=None): def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -427,11 +572,11 @@ def mirrorurl_to_apt_fileprefix(mirror): return string -def rename_apt_lists(new_mirrors, target=None): +def rename_apt_lists(new_mirrors, target, arch): """rename_apt_lists - rename apt lists to preserve old cache data""" - default_mirrors = get_default_mirrors(util.get_architecture(target)) + default_mirrors = get_default_mirrors(arch) - pre = util.target_path(target, APT_LISTS) + pre = subp.target_path(target, APT_LISTS) for (name, omirror) in default_mirrors.items(): nmirror = new_mirrors.get(name) if not nmirror: @@ -550,8 +695,8 @@ def add_apt_key_raw(key, target=None): """ LOG.debug("Adding key:\n'%s'", key) try: - util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target) - except util.ProcessExecutionError: + subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target) + except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -614,13 +759,13 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, if aa_repo_match(source): try: - util.subp(["add-apt-repository", source], target=target) - except util.ProcessExecutionError: + subp.subp(["add-apt-repository", source], target=target) + except subp.ProcessExecutionError: LOG.exception("add-apt-repository failed.") raise continue - sourcefn = util.target_path(target, ent['filename']) + sourcefn = subp.target_path(target, ent['filename']) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -763,25 +908,6 @@ def convert_to_v3_apt_format(cfg): return cfg -def search_for_mirror(candidates): - """ - Search through a list of mirror urls for one that works - This needs to return quickly. - """ - if candidates is None: - return None - - LOG.debug("search for mirror in candidates: '%s'", candidates) - for cand in candidates: - try: - if util.is_resolvable_url(cand): - LOG.debug("found working mirror: '%s'", cand) - return cand - except Exception: - pass - return None - - def search_for_mirror_dns(configured, mirrortype, cfg, cloud): """ Try to resolve a list of predefines DNS names to pick mirrors @@ -813,7 +939,7 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): for post in doms: mirror_list.append(mirrorfmt % (post)) - mirror = search_for_mirror(mirror_list) + mirror = util.search_for_mirror(mirror_list) return mirror @@ -876,7 +1002,7 @@ def get_mirror(cfg, mirrortype, arch, cloud): # fallback to search if specified if mirror is None: # list of mirrors to try to resolve - mirror = search_for_mirror(mcfg.get("search", None)) + mirror = util.search_for_mirror(mcfg.get("search", None)) # fallback to search_dns if specified if mirror is None: @@ -896,7 +1022,7 @@ def find_apt_mirror_info(cfg, cloud, arch=None): """ if arch is None: - arch = util.get_architecture() + arch = util.get_dpkg_architecture() LOG.debug("got arch for mirror selection: %s", arch) pmirror = get_mirror(cfg, "primary", arch, cloud) LOG.debug("got primary mirror: %s", pmirror) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 225d090..aa186ce 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -9,7 +9,7 @@ Apt Pipelining -------------- **Summary:** configure apt pipelining -This module configures apt's ``Acquite::http::Pipeline-Depth`` option, whcih +This module configures apt's ``Acquite::http::Pipeline-Depth`` option, which controls how apt handles HTTP pipelining. It may be useful for pipelining to be disabled, because some web servers, such as S3 do not pipeline properly (LP: #948461). The ``apt_pipelining`` config key may be set to ``false`` to disable diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 6813f53..246e449 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -16,6 +16,7 @@ from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit.settings import PER_ALWAYS from cloudinit import temp_utils +from cloudinit import subp from cloudinit import util frequency = PER_ALWAYS @@ -99,7 +100,7 @@ def handle(name, cfg, cloud, log, _args): if iid: env['INSTANCE_ID'] = str(iid) cmd = ['/bin/sh', tmpf.name] - util.subp(cmd, env=env, capture=False) + subp.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 0b4352c..9fdaeba 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -39,6 +39,7 @@ Valid configuration options for this module are: """ from cloudinit.distros import ug_util +from cloudinit import subp from cloudinit import util distros = ['ubuntu', 'debian'] @@ -93,6 +94,6 @@ def handle(name, cfg, cloud, log, args): if len(shcmd): cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] log.debug("Setting byobu to %s", value) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 64bc900..3c453d9 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -16,11 +16,16 @@ can be removed from the system with the configuration option certificates must be specified using valid yaml. in order to specify a multiline certificate, the yaml multiline list syntax must be used +.. note:: + For Alpine Linux the "remove-defaults" functionality works if the + ca-certificates package is installed but not if the + ca-certificates-bundle package is installed. + **Internal name:** ``cc_ca_certs`` **Module frequency:** per instance -**Supported distros:** ubuntu, debian +**Supported distros:** alpine, debian, ubuntu **Config keys**:: @@ -36,6 +41,7 @@ can be removed from the system with the configuration option import os +from cloudinit import subp from cloudinit import util CA_CERT_PATH = "/usr/share/ca-certificates/" @@ -44,14 +50,14 @@ CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) -distros = ['ubuntu', 'debian'] +distros = ['alpine', 'debian', 'ubuntu'] def update_ca_certs(): """ Updates the CA certificate cache on the current machine. """ - util.subp(["update-ca-certificates"], capture=False) + subp.subp(["update-ca-certificates"], capture=False) def add_ca_certs(certs): @@ -66,17 +72,23 @@ def add_ca_certs(certs): cert_file_contents = "\n".join([str(c) for c in certs]) util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644) - # Append cert filename to CA_CERT_CONFIG file. - # We have to strip the content because blank lines in the file - # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(CA_CERT_CONFIG) - cur_cont = '\n'.join([line for line in orig.splitlines() - if line != CA_CERT_FILENAME]) - out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) + if os.stat(CA_CERT_CONFIG).st_size == 0: + # If the CA_CERT_CONFIG file is empty (i.e. all existing + # CA certs have been deleted) then simply output a single + # line with the cloud-init cert filename. + out = "%s\n" % CA_CERT_FILENAME + else: + # Append cert filename to CA_CERT_CONFIG file. + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(CA_CERT_CONFIG) + cur_cont = '\n'.join([line for line in orig.splitlines() + if line != CA_CERT_FILENAME]) + out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) util.write_file(CA_CERT_CONFIG, out, omode="wb") -def remove_default_ca_certs(): +def remove_default_ca_certs(distro_name): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. @@ -84,11 +96,14 @@ def remove_default_ca_certs(): util.delete_dir_contents(CA_CERT_PATH) util.delete_dir_contents(CA_CERT_SYSTEM_PATH) util.write_file(CA_CERT_CONFIG, "", mode=0o644) - debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" - util.subp(('debconf-set-selections', '-'), debconf_sel) + + if distro_name != 'alpine': + debconf_sel = ( + "ca-certificates ca-certificates/trust_new_crts " + "select no") + subp.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, _cloud, log, _args): +def handle(name, cfg, cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -110,7 +125,7 @@ def handle(name, cfg, _cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs() + remove_default_ca_certs(cloud.distro.name) # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 0ad6b7f..aaf7136 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -6,80 +6,22 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Chef ----- -**Summary:** module that configures, starts and installs chef. - -This module enables chef to be installed (from packages or -from gems, or from omnibus). Before this occurs chef configurations are -written to disk (validation.pem, client.pem, firstboot.json, client.rb), -and needed chef folders/directories are created (/etc/chef and /var/log/chef -and so-on). Then once installing proceeds correctly if configured chef will -be started (in daemon mode or in non-daemon mode) and then once that has -finished (if ran in non-daemon mode this will be when chef finishes -converging, if ran in daemon mode then no further actions are possible since -chef will have forked into its own process) then a post run function can -run that can do finishing activities (such as removing the validation pem -file). - -**Internal name:** ``cc_chef`` - -**Module frequency:** per always - -**Supported distros:** all - -**Config keys**:: - - chef: - directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, - /var/cache/chef, /var/backups/chef, /var/run/chef) - validation_cert: (optional string to be written to file validation_key) - special value 'system' means set use existing file - validation_key: (optional the path for validation_cert. default - /etc/chef/validation.pem) - firstboot_path: (path to write run_list and initial_attributes keys that - should also be present in this configuration, defaults - to /etc/chef/firstboot.json) - exec: boolean to run or not run chef (defaults to false, unless - a gem installed is requested - where this will then default - to true) - - chef.rb template keys (if falsey, then will be skipped and not - written to /etc/chef/client.rb) - - chef: - client_key: - encrypted_data_bag_secret: - environment: - file_backup_path: - file_cache_path: - json_attribs: - log_level: - log_location: - node_name: - omnibus_url: - omnibus_url_retries: - omnibus_version: - pid_file: - server_url: - show_time: - ssl_verify_mode: - validation_cert: - validation_key: - validation_name: -""" +"""Chef: module that configures, starts and installs chef.""" import itertools import json import os +from textwrap import dedent +from cloudinit import subp +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) from cloudinit import templater +from cloudinit import temp_utils from cloudinit import url_helper from cloudinit import util +from cloudinit.settings import PER_ALWAYS -import six RUBY_VERSION_DEFAULT = "1.8" @@ -100,6 +42,8 @@ OMNIBUS_URL = "https://www.chef.io/chef/install.sh" OMNIBUS_URL_RETRIES = 5 CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' +CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret' +CHEF_ENVIRONMENT = '_default' CHEF_FB_PATH = '/etc/chef/firstboot.json' CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... @@ -109,11 +53,11 @@ CHEF_RB_TPL_DEFAULTS = { 'log_location': '/var/log/chef/client.log', 'validation_key': CHEF_VALIDATION_PEM_PATH, 'validation_cert': None, - 'client_key': "/etc/chef/client.pem", + 'client_key': '/etc/chef/client.pem', 'json_attribs': CHEF_FB_PATH, - 'file_cache_path': "/var/cache/chef", - 'file_backup_path': "/var/backups/chef", - 'pid_file': "/var/run/chef/client.pid", + 'file_cache_path': '/var/cache/chef', + 'file_backup_path': '/var/backups/chef', + 'pid_file': '/var/run/chef/client.pid', 'show_time': True, 'encrypted_data_bag_secret': None, } @@ -124,9 +68,9 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([ 'client_key', 'file_cache_path', 'json_attribs', - 'file_cache_path', 'pid_file', 'encrypted_data_bag_secret', + 'chef_license', ]) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) @@ -143,12 +87,277 @@ CHEF_EXEC_PATH = '/usr/bin/chef-client' CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) -def is_installed(): - if not os.path.isfile(CHEF_EXEC_PATH): - return False - if not os.access(CHEF_EXEC_PATH, os.X_OK): - return False - return True +frequency = PER_ALWAYS +distros = ["all"] +schema = { + 'id': 'cc_chef', + 'name': 'Chef', + 'title': 'module that configures, starts and installs chef', + 'description': dedent("""\ + This module enables chef to be installed (from packages, + gems, or from omnibus). Before this occurs, chef configuration is + written to disk (validation.pem, client.pem, firstboot.json, + client.rb), and required directories are created (/etc/chef and + /var/log/chef and so-on). If configured, chef will be + installed and started in either daemon or non-daemon mode. + If run in non-daemon mode, post run actions are executed to do + finishing activities such as removing validation.pem."""), + 'distros': distros, + 'examples': [dedent(""" + chef: + directories: + - /etc/chef + - /var/log/chef + validation_cert: system + install_type: omnibus + initial_attributes: + apache: + prefork: + maxclients: 100 + keepalive: off + run_list: + - recipe[apache2] + - role[db] + encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret + environment: _default + log_level: :auto + omnibus_url_retries: 2 + server_url: https://chef.yourorg.com:4000 + ssl_verify_mode: :verify_peer + validation_name: yourorg-validator""")], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'chef': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'directories': { + 'type': 'array', + 'items': { + 'type': 'string' + }, + 'uniqueItems': True, + 'description': dedent("""\ + Create the necessary directories for chef to run. By + default, it creates the following directories: + + {chef_dirs}""").format( + chef_dirs="\n".join( + [" - ``{}``".format(d) for d in CHEF_DIRS] + ) + ) + }, + 'validation_cert': { + 'type': 'string', + 'description': dedent("""\ + Optional string to be written to file validation_key. + Special value ``system`` means set use existing file. + """) + }, + 'validation_key': { + 'type': 'string', + 'default': CHEF_VALIDATION_PEM_PATH, + 'description': dedent("""\ + Optional path for validation_cert. default to + ``{}``.""".format(CHEF_VALIDATION_PEM_PATH)) + }, + 'firstboot_path': { + 'type': 'string', + 'default': CHEF_FB_PATH, + 'description': dedent("""\ + Path to write run_list and initial_attributes keys that + should also be present in this configuration, defaults + to ``{}``.""".format(CHEF_FB_PATH)) + }, + 'exec': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + define if we should run or not run chef (defaults to + false, unless a gem installed is requested where this + will then default to true).""") + }, + 'client_key': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['client_key'], + 'description': dedent("""\ + Optional path for client_cert. default to + ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key'])) + }, + 'encrypted_data_bag_secret': { + 'type': 'string', + 'default': None, + 'description': dedent("""\ + Specifies the location of the secret key used by chef + to encrypt data items. By default, this path is set + to None, meaning that chef will have to look at the + path ``{}`` for it. + """.format(CHEF_ENCRYPTED_DATA_BAG_PATH)) + }, + 'environment': { + 'type': 'string', + 'default': CHEF_ENVIRONMENT, + 'description': dedent("""\ + Specifies which environment chef will use. By default, + it will use the ``{}`` configuration. + """.format(CHEF_ENVIRONMENT)) + }, + 'file_backup_path': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'], + 'description': dedent("""\ + Specifies the location in which backup files are + stored. By default, it uses the + ``{}`` location.""".format( + CHEF_RB_TPL_DEFAULTS['file_backup_path'])) + }, + 'file_cache_path': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'], + 'description': dedent("""\ + Specifies the location in which chef cache files will + be saved. By default, it uses the ``{}`` + location.""".format( + CHEF_RB_TPL_DEFAULTS['file_cache_path'])) + }, + 'json_attribs': { + 'type': 'string', + 'default': CHEF_FB_PATH, + 'description': dedent("""\ + Specifies the location in which some chef json data is + stored. By default, it uses the + ``{}`` location.""".format(CHEF_FB_PATH)) + }, + 'log_level': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['log_level'], + 'description': dedent("""\ + Defines the level of logging to be stored in the log + file. By default this value is set to ``{}``. + """.format(CHEF_RB_TPL_DEFAULTS['log_level'])) + }, + 'log_location': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['log_location'], + 'description': dedent("""\ + Specifies the location of the chef lof file. By + default, the location is specified at + ``{}``.""".format( + CHEF_RB_TPL_DEFAULTS['log_location'])) + }, + 'node_name': { + 'type': 'string', + 'description': dedent("""\ + The name of the node to run. By default, we will + use th instance id as the node name.""") + }, + 'omnibus_url': { + 'type': 'string', + 'default': OMNIBUS_URL, + 'description': dedent("""\ + Omnibus URL if chef should be installed through + Omnibus. By default, it uses the + ``{}``.""".format(OMNIBUS_URL)) + }, + 'omnibus_url_retries': { + 'type': 'integer', + 'default': OMNIBUS_URL_RETRIES, + 'description': dedent("""\ + The number of retries that will be attempted to reach + the Omnibus URL""") + }, + 'omnibus_version': { + 'type': 'string', + 'description': dedent("""\ + Optional version string to require for omnibus + install.""") + }, + 'pid_file': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['pid_file'], + 'description': dedent("""\ + The location in which a process identification + number (pid) is saved. By default, it saves + in the ``{}`` location.""".format( + CHEF_RB_TPL_DEFAULTS['pid_file'])) + }, + 'server_url': { + 'type': 'string', + 'description': 'The URL for the chef server' + }, + 'show_time': { + 'type': 'boolean', + 'default': True, + 'description': 'Show time in chef logs' + }, + 'ssl_verify_mode': { + 'type': 'string', + 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'], + 'description': dedent("""\ + Set the verify mode for HTTPS requests. We can have + two possible values for this parameter: + + - ``:verify_none``: No validation of SSL \ + certificates. + - ``:verify_peer``: Validate all SSL certificates. + + By default, the parameter is set as ``{}``. + """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'])) + }, + 'validation_name': { + 'type': 'string', + 'description': dedent("""\ + The name of the chef-validator key that Chef Infra + Client uses to access the Chef Infra Server during + the initial Chef Infra Client run.""") + }, + 'force_install': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + If set to ``True``, forces chef installation, even + if it is already installed.""") + }, + 'initial_attributes': { + 'type': 'object', + 'items': { + 'type': 'string' + }, + 'description': dedent("""\ + Specify a list of initial attributes used by the + cookbooks.""") + }, + 'install_type': { + 'type': 'string', + 'default': 'packages', + 'description': dedent("""\ + The type of installation for chef. It can be one of + the following values: + + - ``packages`` + - ``gems`` + - ``omnibus``""") + }, + 'run_list': { + 'type': 'array', + 'items': { + 'type': 'string' + }, + 'description': 'A run list for a first boot json.' + }, + "chef_license": { + 'type': 'string', + 'description': dedent("""\ + string that indicates if user accepts or not license + related to some of chef products""") + } + } + } + } +} + +__doc__ = get_schema_doc(schema) def post_run_chef(chef_cfg, log): @@ -198,6 +407,8 @@ def handle(name, cfg, cloud, log, _args): log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return + + validate_cloudconfig_schema(cfg, schema) chef_cfg = cfg['chef'] # Ensure the chef directories we use exist @@ -225,7 +436,7 @@ def handle(name, cfg, cloud, log, _args): iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) # Do a best effort attempt to ensure that the template values that - # are associated with paths have there parent directory created + # are associated with paths have their parent directory created # before they are used by the chef-client itself. param_paths = set() for (k, v) in params.items(): @@ -255,9 +466,10 @@ def handle(name, cfg, cloud, log, _args): # Try to install chef, if its not already installed... force_install = util.get_cfg_option_bool(chef_cfg, 'force_install', default=False) - if not is_installed() or force_install: + installed = subp.is_exe(CHEF_EXEC_PATH) + if not installed or force_install: run = install_chef(cloud, chef_cfg, log) - elif is_installed(): + elif installed: run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) else: run = False @@ -273,7 +485,7 @@ def run_chef(chef_cfg, log): cmd_args = chef_cfg['exec_arguments'] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) - elif isinstance(cmd_args, six.string_types): + elif isinstance(cmd_args, str): cmd.append(cmd_args) else: log.warning("Unknown type %s provided for chef" @@ -282,7 +494,32 @@ def run_chef(chef_cfg, log): cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) + + +def subp_blob_in_tempfile(blob, *args, **kwargs): + """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup. + + 'basename' as a kwarg allows providing the basename for the file. + The 'args' argument to subp will be updated with the full path to the + filename as the first argument. + """ + basename = kwargs.pop('basename', "subp_blob") + + if len(args) == 0 and 'args' not in kwargs: + args = [tuple()] + + # Use tmpdir over tmpfile to avoid 'text file busy' on execute + with temp_utils.tempdir(needs_exe=True) as tmpd: + tmpf = os.path.join(tmpd, basename) + if 'args' in kwargs: + kwargs['args'] = [tmpf] + list(kwargs['args']) + else: + args = list(args) + args[0] = [tmpf] + args[0] + + util.write_file(tmpf, blob, mode=0o700) + return subp.subp(*args, **kwargs) def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): @@ -305,7 +542,7 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): else: args = ['-v', omnibus_version] content = url_helper.readurl(url=url, retries=retries).contents - return util.subp_blob_in_tempfile( + return subp_blob_in_tempfile( blob=content, args=args, basename='chef-omnibus-install', capture=False) @@ -354,11 +591,11 @@ def install_chef_from_gems(ruby_version, chef_version, distro): if not os.path.exists('/usr/bin/ruby'): util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') if chef_version: - util.subp(['/usr/bin/gem', 'install', 'chef', + subp.subp(['/usr/bin/gem', 'install', 'chef', '-v %s' % chef_version, '--no-ri', '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) else: - util.subp(['/usr/bin/gem', 'install', 'chef', + subp.subp(['/usr/bin/gem', 'install', 'chef', '--no-ri', '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 610dbc8..4d5a6aa 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -28,8 +28,7 @@ location that this cloud-init has been configured with when running. """ import copy - -from six import StringIO +from io import StringIO from cloudinit import type_utils from cloudinit import util diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 885b313..dff9324 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -26,6 +26,7 @@ by default. disable_ec2_metadata: """ +from cloudinit import subp from cloudinit import util from cloudinit.settings import PER_ALWAYS @@ -40,15 +41,15 @@ def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: reject_cmd = None - if util.which('ip'): + if subp.which('ip'): reject_cmd = REJECT_CMD_IP - elif util.which('ifconfig'): + elif subp.which('ifconfig'): reject_cmd = REJECT_CMD_IF else: log.error(('Neither "route" nor "ip" command found, unable to ' 'manipulate routing table')) return - util.subp(reject_cmd, capture=False) + subp.subp(reject_cmd, capture=False) else: log.debug(("Skipping module named %s," " disabling the ec2 route not enabled"), name) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 29e192e..a7bdc70 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -99,6 +99,7 @@ specified using ``filesystem``. from cloudinit.settings import PER_INSTANCE from cloudinit import util +from cloudinit import subp import logging import os import shlex @@ -106,13 +107,13 @@ import shlex frequency = PER_INSTANCE # Define the commands to use -UDEVADM_CMD = util.which('udevadm') -SFDISK_CMD = util.which("sfdisk") -SGDISK_CMD = util.which("sgdisk") -LSBLK_CMD = util.which("lsblk") -BLKID_CMD = util.which("blkid") -BLKDEV_CMD = util.which("blockdev") -WIPEFS_CMD = util.which("wipefs") +UDEVADM_CMD = subp.which('udevadm') +SFDISK_CMD = subp.which("sfdisk") +SGDISK_CMD = subp.which("sgdisk") +LSBLK_CMD = subp.which("lsblk") +BLKID_CMD = subp.which("blkid") +BLKDEV_CMD = subp.which("blockdev") +WIPEFS_CMD = subp.which("wipefs") LANG_C_ENV = {'LANG': 'C'} @@ -163,7 +164,7 @@ def handle(_name, cfg, cloud, log, _args): def update_disk_setup_devices(disk_setup, tformer): # update 'disk_setup' dictionary anywhere were a device may occur # update it with the response from 'tformer' - for origname in disk_setup.keys(): + for origname in list(disk_setup): transformed = tformer(origname) if transformed is None or transformed == origname: continue @@ -248,9 +249,11 @@ def enumerate_disk(device, nodeps=False): info = None try: - info, _err = util.subp(lsblk_cmd) + info, _err = subp.subp(lsblk_cmd) except Exception as e: - raise Exception("Failed during disk check for %s\n%s" % (device, e)) + raise Exception( + "Failed during disk check for %s\n%s" % (device, e) + ) from e parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0] @@ -310,9 +313,11 @@ def check_fs(device): blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] try: - out, _err = util.subp(blkid_cmd, rcs=[0, 2]) + out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: - raise Exception("Failed during disk check for %s\n%s" % (device, e)) + raise Exception( + "Failed during disk check for %s\n%s" % (device, e) + ) from e if out: if len(out.splitlines()) == 1: @@ -427,16 +432,16 @@ def get_dyn_func(*args): else: return globals()[func_name] - except KeyError: - raise Exception("No such function %s to call!" % func_name) + except KeyError as e: + raise Exception("No such function %s to call!" % func_name) from e def get_hdd_size(device): try: - size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) - sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) + size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) + sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) except Exception as e: - raise Exception("Failed to get %s size\n%s" % (device, e)) + raise Exception("Failed to get %s size\n%s" % (device, e)) from e return int(size_in_bytes) / int(sector_size) @@ -452,10 +457,11 @@ def check_partition_mbr_layout(device, layout): read_parttbl(device) prt_cmd = [SFDISK_CMD, "-l", device] try: - out, _err = util.subp(prt_cmd, data="%s\n" % layout) + out, _err = subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: - raise Exception("Error running partition command on %s\n%s" % ( - device, e)) + raise Exception( + "Error running partition command on %s\n%s" % (device, e) + ) from e found_layout = [] for line in out.splitlines(): @@ -482,10 +488,11 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): prt_cmd = [SGDISK_CMD, '-p', device] try: - out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV) + out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: - raise Exception("Error running partition command on %s\n%s" % ( - device, e)) + raise Exception( + "Error running partition command on %s\n%s" % (device, e) + ) from e out_lines = iter(out.splitlines()) # Skip header. Output looks like: @@ -655,9 +662,11 @@ def purge_disk(device): wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] try: LOG.info("Purging filesystem on /dev/%s", d['name']) - util.subp(wipefs_cmd) - except Exception: - raise Exception("Failed FS purge of /dev/%s" % d['name']) + subp.subp(wipefs_cmd) + except Exception as e: + raise Exception( + "Failed FS purge of /dev/%s" % d['name'] + ) from e purge_disk_ptable(device) @@ -682,7 +691,7 @@ def read_parttbl(device): blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] util.udevadm_settle() try: - util.subp(blkdev_cmd) + subp.subp(blkdev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) @@ -697,25 +706,27 @@ def exec_mkpart_mbr(device, layout): # Create the partitions prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device] try: - util.subp(prt_cmd, data="%s\n" % layout) + subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: - raise Exception("Failed to partition device %s\n%s" % (device, e)) + raise Exception( + "Failed to partition device %s\n%s" % (device, e) + ) from e read_parttbl(device) def exec_mkpart_gpt(device, layout): try: - util.subp([SGDISK_CMD, '-Z', device]) + subp.subp([SGDISK_CMD, '-Z', device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 - util.subp([SGDISK_CMD, + subp.subp([SGDISK_CMD, '-n', '{}:{}:{}'.format(index, start, end), device]) if partition_type is not None: # convert to a 4 char (or more) string right padded with 0 # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") - util.subp( + subp.subp( [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) except Exception: LOG.warning("Failed to partition device %s", device) @@ -825,6 +836,7 @@ def lookup_force_flag(fs): 'btrfs': '-f', 'xfs': '-f', 'reiserfs': '-f', + 'swap': '-f', } if 'ext' in fs.lower(): @@ -966,9 +978,9 @@ def mkfs(fs_cfg): fs_cmd) else: # Find the mkfs command - mkfs_cmd = util.which("mkfs.%s" % fs_type) + mkfs_cmd = subp.which("mkfs.%s" % fs_type) if not mkfs_cmd: - mkfs_cmd = util.which("mk%s" % fs_type) + mkfs_cmd = subp.which("mk%s" % fs_type) if not mkfs_cmd: LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", @@ -982,7 +994,9 @@ def mkfs(fs_cfg): # File systems that support the -F flag if overwrite or device_type(device) == "disk": - fs_cmd.append(lookup_force_flag(fs_type)) + force_flag = lookup_force_flag(fs_type) + if force_flag: + fs_cmd.append(force_flag) # Add the extends FS options if fs_opts: @@ -991,8 +1005,8 @@ def mkfs(fs_cfg): LOG.debug("Creating file system %s on %s", label, device) LOG.debug(" Using cmd: %s", str(fs_cmd)) try: - util.subp(fs_cmd, shell=shell) + subp.subp(fs_cmd, shell=shell) except Exception as e: - raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) + raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index b342e04..b1d99f9 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -25,7 +25,7 @@ import os from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS -from cloudinit import util +from cloudinit import subp frequency = PER_ALWAYS @@ -43,9 +43,9 @@ def is_upstart_system(): del myenv['UPSTART_SESSION'] check_cmd = ['initctl', 'version'] try: - (out, _err) = util.subp(check_cmd, env=myenv) + (out, _err) = subp.subp(check_cmd, env=myenv) return 'upstart' in out - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: LOG.debug("'%s' returned '%s', not using upstart", ' '.join(check_cmd), e.exit_code) return False @@ -66,7 +66,7 @@ def handle(name, _cfg, cloud, log, args): for n in event_names: cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath] try: - util.subp(cmd) + subp.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? log.warning("Emission of upstart event %s failed due to: %s", n, e) diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 0a135bb..77984bc 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -39,6 +39,7 @@ If cloud-init sees a ``fan`` entry in cloud-config it will: from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import subp from cloudinit import util LOG = logging.getLogger(__name__) @@ -62,8 +63,8 @@ def stop_update_start(service, config_file, content, systemd=False): def run(cmd, msg): try: - return util.subp(cmd, capture=True) - except util.ProcessExecutionError as e: + return subp.subp(cmd, capture=True) + except subp.ProcessExecutionError as e: LOG.warning("failed: %s (%s): %s", service, cmd, e) return False @@ -94,7 +95,7 @@ def handle(name, cfg, cloud, log, args): util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") distro = cloud.distro - if not util.which('fanctl'): + if not subp.which('fanctl'): distro.install_packages(['ubuntu-fan']) stop_update_start( diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index fd14154..3441f7a 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -78,7 +78,7 @@ def handle(_name, cfg, cloud, log, args): boot_fin_fn = cloud.paths.boot_finished try: contents = "%s - %s - v. %s\n" % (uptime, ts, cver) - util.write_file(boot_fin_fn, contents) + util.write_file(boot_fin_fn, contents, ensure_dir_exists=False) except Exception: util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index aa9716e..237c3d0 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -22,11 +22,11 @@ mountpoint in the filesystem or a path to the block device in ``/dev``. The utility to use for resizing can be selected using the ``mode`` config key. If ``mode`` key is set to ``auto``, then any available utility (either -``growpart`` or ``gpart``) will be used. If neither utility is available, no -error will be raised. If ``mode`` is set to ``growpart``, then the ``growpart`` -utility will be used. If this utility is not available on the system, this will -result in an error. If ``mode`` is set to ``off`` or ``false``, then -``cc_growpart`` will take no action. +``growpart`` or BSD ``gpart``) will be used. If neither utility is available, +no error will be raised. If ``mode`` is set to ``growpart``, then the +``growpart`` utility will be used. If this utility is not available on the +system, this will result in an error. If ``mode`` is set to ``off`` or +``false``, then ``cc_growpart`` will take no action. There is some functionality overlap between this module and the ``growroot`` functionality of ``cloud-initramfs-tools``. However, there are some situations @@ -70,6 +70,7 @@ import stat from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS +from cloudinit import subp from cloudinit import util frequency = PER_ALWAYS @@ -131,39 +132,47 @@ class ResizeGrowPart(object): myenv['LANG'] = 'C' try: - (out, _err) = util.subp(["growpart", "--help"], env=myenv) - if re.search(r"--update\s+", out, re.DOTALL): + (out, _err) = subp.subp(["growpart", "--help"], env=myenv) + if re.search(r"--update\s+", out): return True - except util.ProcessExecutionError: + except subp.ProcessExecutionError: pass return False def resize(self, diskdev, partnum, partdev): before = get_size(partdev) try: - util.subp(["growpart", '--dry-run', diskdev, partnum]) - except util.ProcessExecutionError as e: + subp.subp(["growpart", '--dry-run', diskdev, partnum]) + except subp.ProcessExecutionError as e: if e.exit_code != 1: util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", diskdev, partnum) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e return (before, before) try: - util.subp(["growpart", diskdev, partnum]) - except util.ProcessExecutionError as e: + subp.subp(["growpart", diskdev, partnum]) + except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e return (before, get_size(partdev)) class ResizeGpart(object): def available(self): - if not util.which('gpart'): - return False - return True + myenv = os.environ.copy() + myenv['LANG'] = 'C' + + try: + (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) + if re.search(r"gpart recover ", err): + return True + + except subp.ProcessExecutionError: + pass + return False def resize(self, diskdev, partnum, partdev): """ @@ -174,18 +183,18 @@ class ResizeGpart(object): be recovered. """ try: - util.subp(["gpart", "recover", diskdev]) - except util.ProcessExecutionError as e: + subp.subp(["gpart", "recover", diskdev]) + except subp.ProcessExecutionError as e: if e.exit_code != 0: util.logexc(LOG, "Failed: gpart recover %s", diskdev) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e before = get_size(partdev) try: - util.subp(["gpart", "resize", "-i", partnum, diskdev]) - except util.ProcessExecutionError as e: + subp.subp(["gpart", "resize", "-i", partnum, diskdev]) + except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e # Since growing the FS requires a reboot, make sure we reboot # first when this module has finished. diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index a323edf..eb03c66 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -1,8 +1,9 @@ -# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2009-2010, 2020 Canonical Ltd. # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser # Author: Juerg Haefliger +# Author: Matthew Ruffell # # This file is part of cloud-init. See LICENSE file for license information. @@ -15,15 +16,15 @@ Configure which device is used as the target for grub installation. This module should work correctly by default without any user configuration. It can be enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``. If no -installation device is specified this module will look for the first existing -device in: +installation device is specified this module will execute grub-probe to +determine which disk the /boot directory is associated with. - - ``/dev/sda`` - - ``/dev/vda`` - - ``/dev/xvda`` - - ``/dev/sda1`` - - ``/dev/vda1`` - - ``/dev/xvda1`` +The value which is placed into the debconf database is in the format which the +grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value, +but we do fallback to the plain disk name if a by-id name is not present. + +If this module is executed inside a container, then the debconf database is +seeded with empty values, and install_devices_empty is set to true. **Internal name:** ``cc_grub_dpkg`` @@ -42,11 +43,68 @@ device in: import os +from cloudinit import subp from cloudinit import util +from cloudinit.subp import ProcessExecutionError distros = ['ubuntu', 'debian'] +def fetch_idevs(log): + """ + Fetches the /dev/disk/by-id device grub is installed to. + Falls back to plain disk name if no by-id entry is present. + """ + disk = "" + devices = [] + + try: + # get the root disk where the /boot directory resides. + disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'], + capture=True)[0].strip() + except ProcessExecutionError as e: + # grub-common may not be installed, especially on containers + # FileNotFoundError is a nested exception of ProcessExecutionError + if isinstance(e.reason, FileNotFoundError): + log.debug("'grub-probe' not found in $PATH") + # disks from the container host are present in /proc and /sys + # which is where grub-probe determines where /boot is. + # it then checks for existence in /dev, which fails as host disks + # are not exposed to the container. + elif "failed to get canonical path" in e.stderr: + log.debug("grub-probe 'failed to get canonical path'") + else: + # something bad has happened, continue to log the error + raise + except Exception: + util.logexc(log, "grub-probe failed to execute for grub-dpkg") + + if not disk or not os.path.exists(disk): + # If we failed to detect a disk, we can return early + return '' + + try: + # check if disk exists and use udevadm to fetch symlinks + devices = subp.subp( + ['udevadm', 'info', '--root', '--query=symlink', disk], + capture=True + )[0].strip().split() + except Exception: + util.logexc( + log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk + ) + + log.debug('considering these device symlinks: %s', ','.join(devices)) + # filter symlinks for /dev/disk/by-id entries + devices = [dev for dev in devices if 'disk/by-id' in dev] + log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices)) + # select first device if there is one, else fall back to plain name + idevs = sorted(devices)[0] if devices else disk + log.debug('selected %s', idevs) + + return idevs + + def handle(name, cfg, _cloud, log, _args): mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {})) @@ -62,22 +120,10 @@ def handle(name, cfg, _cloud, log, _args): idevs_empty = util.get_cfg_option_str( mycfg, "grub-pc/install_devices_empty", None) - if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or - (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): - if idevs is None: - idevs = "" - if idevs_empty is None: - idevs_empty = "true" - else: - if idevs_empty is None: - idevs_empty = "false" - if idevs is None: - idevs = "/dev/sda" - for dev in ("/dev/sda", "/dev/vda", "/dev/xvda", - "/dev/sda1", "/dev/vda1", "/dev/xvda1"): - if os.path.exists(dev): - idevs = dev - break + if idevs is None: + idevs = fetch_idevs(log) + if idevs_empty is None: + idevs_empty = "false" if idevs else "true" # now idevs and idevs_empty are set to determined values # or, those set by user @@ -90,7 +136,7 @@ def handle(name, cfg, _cloud, log, _args): (idevs, idevs_empty)) try: - util.subp(['debconf-set-selections'], dconf_sel) + subp.subp(['debconf-set-selections'], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 8f8735c..0f2be52 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -9,10 +9,10 @@ """ Keys to Console --------------- -**Summary:** control which ssh keys may be written to console +**Summary:** control which SSH keys may be written to console -For security reasons it may be desirable not to write ssh fingerprints and keys -to the console. To avoid the fingerprint of types of ssh keys being written to +For security reasons it may be desirable not to write SSH fingerprints and keys +to the console. To avoid the fingerprint of types of SSH keys being written to console the ``ssh_fp_console_blacklist`` config key can be used. By default all types of keys will have their fingerprints written to console. To avoid keys of a key type being written to console the ``ssh_key_console_blacklist`` config @@ -33,6 +33,7 @@ key can be used. By default ``ssh-dss`` keys are not written to console. import os from cloudinit.settings import PER_INSTANCE +from cloudinit import subp from cloudinit import util frequency = PER_INSTANCE @@ -64,7 +65,7 @@ def handle(name, cfg, cloud, log, _args): try: cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] - (stdout, _stderr) = util.subp(cmd) + (stdout, _stderr) = subp.subp(cmd) util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index eaf1e94..299c4d0 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -56,12 +56,12 @@ The following default client config is provided, but can be overridden:: """ import os - -from six import BytesIO +from io import BytesIO from configobj import ConfigObj from cloudinit import type_utils +from cloudinit import subp from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -117,7 +117,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") - util.subp(["service", "landscape-client", "restart"]) + subp.subp(["service", "landscape-client", "restart"]) def merge_together(objs): diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index f68c3cc..4f8b7bf 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -6,27 +6,58 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Locale ------- -**Summary:** set system locale +"""Locale: set system locale""" -Configure the system locale and apply it system wide. By default use the locale -specified by the datasource. +from textwrap import dedent -**Internal name:** ``cc_locale`` - -**Module frequency:** per instance +from cloudinit import util +from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.settings import PER_INSTANCE -**Supported distros:** all -**Config keys**:: +frequency = PER_INSTANCE +distros = ['all'] +schema = { + 'id': 'cc_locale', + 'name': 'Locale', + 'title': 'Set system locale', + 'description': dedent( + """\ + Configure the system locale and apply it system wide. By default use + the locale specified by the datasource.""" + ), + 'distros': distros, + 'examples': [ + dedent("""\ + # Set the locale to ar_AE + locale: ar_AE + """), + dedent("""\ + # Set the locale to fr_CA in /etc/alternate_path/locale + locale: fr_CA + locale_configfile: /etc/alternate_path/locale + """), + ], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'locale': { + 'type': 'string', + 'description': ( + "The locale to set as the system's locale (e.g. ar_PS)" + ), + }, + 'locale_configfile': { + 'type': 'string', + 'description': ( + "The file in which to write the locale configuration (defaults" + " to the distro's default location)" + ), + }, + }, +} - locale: - locale_configfile: -""" - -from cloudinit import util +__doc__ = get_schema_doc(schema) # Supplement python help() def handle(name, cfg, cloud, log, args): @@ -40,6 +71,8 @@ def handle(name, cfg, cloud, log, args): name, locale) return + validate_cloudconfig_schema(cfg, schema) + log.debug("Setting locale to %s", locale) locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile) diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 151a984..7129c9c 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -48,6 +48,7 @@ lxd-bridge will be configured accordingly. """ from cloudinit import log as logging +from cloudinit import subp from cloudinit import util import os @@ -85,16 +86,16 @@ def handle(name, cfg, cloud, log, args): # Install the needed packages packages = [] - if not util.which("lxd"): + if not subp.which("lxd"): packages.append('lxd') - if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): + if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'): packages.append('zfsutils-linux') if len(packages): try: cloud.distro.install_packages(packages) - except util.ProcessExecutionError as exc: + except subp.ProcessExecutionError as exc: log.warning("failed to install packages %s: %s", packages, exc) return @@ -104,20 +105,20 @@ def handle(name, cfg, cloud, log, args): 'network_address', 'network_port', 'storage_backend', 'storage_create_device', 'storage_create_loop', 'storage_pool', 'trust_password') - util.subp(['lxd', 'waitready', '--timeout=300']) + subp.subp(['lxd', 'waitready', '--timeout=300']) cmd = ['lxd', 'init', '--auto'] for k in init_keys: if init_cfg.get(k): cmd.extend(["--%s=%s" % (k.replace('_', '-'), str(init_cfg[k]))]) - util.subp(cmd) + subp.subp(cmd) # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) if os.path.exists("/etc/default/lxd-bridge") \ - and util.which(dconf_comm): + and subp.which(dconf_comm): # Bridge configured through packaging debconf = bridge_to_debconf(bridge_cfg) @@ -127,7 +128,7 @@ def handle(name, cfg, cloud, log, args): log.debug("Setting lxd debconf via " + dconf_comm) data = "\n".join(["set %s %s" % (k, v) for k, v in debconf.items()]) + "\n" - util.subp(['debconf-communicate'], data) + subp.subp(['debconf-communicate'], data) except Exception: util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm) @@ -137,7 +138,7 @@ def handle(name, cfg, cloud, log, args): # Run reconfigure log.debug("Running dpkg-reconfigure for lxd") - util.subp(['dpkg-reconfigure', 'lxd', + subp.subp(['dpkg-reconfigure', 'lxd', '--frontend=noninteractive']) else: # Built-in LXD bridge support @@ -264,7 +265,7 @@ def _lxc(cmd): env = {'LC_ALL': 'C', 'HOME': os.environ.get('HOME', '/root'), 'USER': os.environ.get('USER', 'root')} - util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) def maybe_cleanup_default(net_name, did_init, create, attach, @@ -286,7 +287,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach, try: _lxc(["network", "delete", net_name]) LOG.debug(msg, net_name, succeeded) - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.exit_code != 1: raise e LOG.debug(msg, net_name, fail_assume_enoent) @@ -296,7 +297,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach, try: _lxc(["profile", "device", "remove", profile, nic_name]) LOG.debug(msg, nic_name, profile, succeeded) - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.exit_code != 1: raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index d5f63f5..41ea4fc 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -49,15 +49,14 @@ private certificates for mcollective. Their values will be written to """ import errno - -import six -from six import BytesIO +import io # Used since this can maintain comments # and doesn't need a top level section from configobj import ConfigObj from cloudinit import log as logging +from cloudinit import subp from cloudinit import util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" @@ -73,7 +72,7 @@ def configure(config, server_cfg=SERVER_CFG, # original file in order to be able to mix the rest up. try: old_contents = util.load_file(server_cfg, quiet=False, decode=False) - mcollective_config = ConfigObj(BytesIO(old_contents)) + mcollective_config = ConfigObj(io.BytesIO(old_contents)) except IOError as e: if e.errno != errno.ENOENT: raise @@ -93,7 +92,7 @@ def configure(config, server_cfg=SERVER_CFG, 'plugin.ssl_server_private'] = pricert_file mcollective_config['securityprovider'] = 'ssl' else: - if isinstance(cfg, six.string_types): + if isinstance(cfg, str): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): @@ -119,7 +118,7 @@ def configure(config, server_cfg=SERVER_CFG, raise # Now we got the whole (new) file, write to disk... - contents = BytesIO() + contents = io.BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644) @@ -142,6 +141,6 @@ def handle(name, cfg, cloud, log, _args): configure(config=mcollective_cfg['conf']) # restart mcollective to handle updated config - util.subp(['service', 'mcollective', 'restart'], capture=False) + subp.subp(['service', 'mcollective', 'restart'], capture=False) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index c741c74..54f2f87 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -25,7 +25,7 @@ mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``). The ``mount_default_fields`` config key allows default options to be specified for the values in a ``mounts`` entry that are not specified, aside from the -``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 7 +``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6 values. It defaults to:: mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"] @@ -65,15 +65,19 @@ swap file is created. from string import whitespace import logging -import os.path +import os import re from cloudinit import type_utils +from cloudinit import subp from cloudinit import util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) +# Name matches 'server:/path' +NETWORK_NAME_FILTER = r"^.+:.*" +NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" MNT_COMMENT = "comment=cloudconfig" @@ -93,6 +97,13 @@ def is_meta_device_name(name): return False +def is_network_device(name): + # return true if this is a network device + if NETWORK_NAME_RE.match(name): + return True + return False + + def _get_nth_partition_for_device(device_path, partition_number): potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), '-part%s' % (partition_number,)] @@ -122,6 +133,9 @@ def sanitize_devname(startname, transformer, log): devname = "ephemeral0" log.debug("Adjusted mount option from ephemeral to ephemeral0") + if is_network_device(startname): + return startname + device_path, partition_number = util.expand_dotted_devname(devname) if is_meta_device_name(device_path): @@ -223,13 +237,59 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): return size +def create_swapfile(fname: str, size: str) -> None: + """Size is in MiB.""" + + errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" + + def create_swap(fname, size, method): + LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, fstype, method) + + if method == "fallocate": + cmd = ['fallocate', '-l', '%sM' % size, fname] + elif method == "dd": + cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', + 'count=%s' % size] + + try: + subp.subp(cmd, capture=True) + except subp.ProcessExecutionError as e: + LOG.warning(errmsg, fname, size, method, e) + util.del_file(fname) + + swap_dir = os.path.dirname(fname) + util.ensure_dir(swap_dir) + + fstype = util.get_mount_info(swap_dir)[1] + + if (fstype == "xfs" and + util.kernel_version() < (4, 18)) or fstype == "btrfs": + create_swap(fname, size, "dd") + else: + try: + create_swap(fname, size, "fallocate") + except subp.ProcessExecutionError as e: + LOG.warning(errmsg, fname, size, "dd", e) + LOG.warning("Will attempt with dd.") + create_swap(fname, size, "dd") + + if os.path.exists(fname): + util.chmod(fname, 0o600) + try: + subp.subp(['mkswap', fname]) + except subp.ProcessExecutionError: + util.del_file(fname) + raise + + def setup_swapfile(fname, size=None, maxsize=None): """ fname: full path string of filename to setup size: the size to create. set to "auto" for recommended maxsize: the maximum size """ - tdir = os.path.dirname(fname) + swap_dir = os.path.dirname(fname) if str(size).lower() == "auto": try: memsize = util.read_meminfo()['total'] @@ -237,28 +297,17 @@ def setup_swapfile(fname, size=None, maxsize=None): LOG.debug("Not creating swap: failed to read meminfo") return - util.ensure_dir(tdir) - size = suggested_swapsize(fsys=tdir, maxsize=maxsize, + util.ensure_dir(swap_dir) + size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, memsize=memsize) + mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - mbsize = str(int(size / (2 ** 20))) - msg = "creating swap file '%s' of %sMB" % (fname, mbsize) - try: - util.ensure_dir(tdir) - util.log_time(LOG.debug, msg, func=util.subp, - args=[['sh', '-c', - ('rm -f "$1" && umask 0066 && ' - '{ fallocate -l "${2}M" "$1" || ' - 'dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' - 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), - 'setup_swap', fname, mbsize]]) - - except Exception as e: - raise IOError("Failed %s: %s" % (msg, e)) + util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, + args=[fname, mibsize]) return fname @@ -332,17 +381,18 @@ def handle(_name, cfg, cloud, log, _args): fstab_devs = {} fstab_removed = [] - for line in util.load_file(FSTAB_PATH).splitlines(): - if MNT_COMMENT in line: - fstab_removed.append(line) - continue + if os.path.exists(FSTAB_PATH): + for line in util.load_file(FSTAB_PATH).splitlines(): + if MNT_COMMENT in line: + fstab_removed.append(line) + continue - try: - toks = WS.split(line) - except Exception: - pass - fstab_devs[toks[0]] = line - fstab_lines.append(line) + try: + toks = WS.split(line) + except Exception: + pass + fstab_devs[toks[0]] = line + fstab_lines.append(line) for i in range(len(cfgmnt)): # skip something that wasn't a list @@ -492,9 +542,9 @@ def handle(_name, cfg, cloud, log, _args): for cmd in activate_cmds: fmt = "Activate mounts: %s:" + ' '.join(cmd) try: - util.subp(cmd) + subp.subp(cmd) log.debug(fmt, "PASS") - except util.ProcessExecutionError: + except subp.ProcessExecutionError: log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 9e074bd..3d7279d 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -6,26 +6,26 @@ """NTP: enable and configure ntp""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +import copy +import os +from textwrap import dedent + from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils +from cloudinit import subp from cloudinit import util - -import copy -import os -import six -from textwrap import dedent +from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 -distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu'] +distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel', + 'sles', 'ubuntu'] NTP_CLIENT_CONFIG = { 'chrony': { @@ -64,6 +64,17 @@ NTP_CLIENT_CONFIG = { # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { + 'alpine': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'packages': [], + 'service_name': 'ntpd', + }, + }, 'debian': { 'chrony': { 'confpath': '/etc/chrony/chrony.conf', @@ -115,11 +126,11 @@ schema = { Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the - distro's ntp package, it will be copied to ``/etc/ntp.conf.dist`` - before any changes are made. A list of ntp pools and ntp servers can - be provided under the ``ntp`` config key. If no ntp ``servers`` or - ``pools`` are provided, 4 pools will be used in the format - ``{0-3}.{distro}.pool.ntp.org``."""), + distro's ntp package, it will be copied to a file with ``.dist`` + appended to the filename before any changes are made. A list of ntp + pools and ntp servers can be provided under the ``ntp`` config key. + If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used + in the format ``{0-3}.{distro}.pool.ntp.org``."""), 'distros': distros, 'examples': [ dedent("""\ @@ -171,8 +182,11 @@ schema = { 'uniqueItems': True, 'description': dedent("""\ List of ntp pools. If both pools and servers are - empty, 4 default pool servers will be provided of - the format ``{0-3}.{distro}.pool.ntp.org``.""") + empty, 4 default pool servers will be provided of + the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: + for Alpine Linux when using the Busybox NTP client + this setting will be ignored due to the limited + functionality of Busybox's ntpd.""") }, 'servers': { 'type': 'array', @@ -183,46 +197,46 @@ schema = { 'uniqueItems': True, 'description': dedent("""\ List of ntp servers. If both pools and servers are - empty, 4 default pool servers will be provided with - the format ``{0-3}.{distro}.pool.ntp.org``.""") + empty, 4 default pool servers will be provided with + the format ``{0-3}.{distro}.pool.ntp.org``.""") }, 'ntp_client': { 'type': 'string', 'default': 'auto', 'description': dedent("""\ Name of an NTP client to use to configure system NTP. - When unprovided or 'auto' the default client preferred - by the distribution will be used. The following - built-in client names can be used to override existing - configuration defaults: chrony, ntp, ntpdate, - systemd-timesyncd."""), + When unprovided or 'auto' the default client preferred + by the distribution will be used. The following + built-in client names can be used to override existing + configuration defaults: chrony, ntp, ntpdate, + systemd-timesyncd."""), }, 'enabled': { 'type': 'boolean', 'default': True, 'description': dedent("""\ Attempt to enable ntp clients if set to True. If set - to False, ntp client will not be configured or - installed"""), + to False, ntp client will not be configured or + installed"""), }, 'config': { 'description': dedent("""\ Configuration settings or overrides for the - ``ntp_client`` specified."""), + ``ntp_client`` specified."""), 'type': ['object'], 'properties': { 'confpath': { 'type': 'string', 'description': dedent("""\ The path to where the ``ntp_client`` - configuration is written."""), + configuration is written."""), }, 'check_exe': { 'type': 'string', 'description': dedent("""\ The executable name for the ``ntp_client``. - For example, ntp service ``check_exe`` is - 'ntpd' because it runs the ntpd binary."""), + For example, ntp service ``check_exe`` is + 'ntpd' because it runs the ntpd binary."""), }, 'packages': { 'type': 'array', @@ -232,22 +246,22 @@ schema = { 'uniqueItems': True, 'description': dedent("""\ List of packages needed to be installed for the - selected ``ntp_client``."""), + selected ``ntp_client``."""), }, 'service_name': { 'type': 'string', 'description': dedent("""\ The systemd or sysvinit service name used to - start and stop the ``ntp_client`` - service."""), + start and stop the ``ntp_client`` + service."""), }, 'template': { 'type': 'string', 'description': dedent("""\ Inline template allowing users to define their - own ``ntp_client`` configuration template. - The value must start with '## template:jinja' - to enable use of templating support. + own ``ntp_client`` configuration template. + The value must start with '## template:jinja' + to enable use of templating support. """), }, }, @@ -309,7 +323,7 @@ def select_ntp_client(ntp_client, distro): if distro_ntp_client == "auto": for client in distro.preferred_ntp_clients: cfg = distro_cfg.get(client) - if util.which(cfg.get('check_exe')): + if subp.which(cfg.get('check_exe')): LOG.debug('Selected NTP client "%s", already installed', client) clientcfg = cfg @@ -338,7 +352,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"): @param check_exe: string. The name of a binary that indicates the package the specified package is already installed. """ - if util.which(check_exe): + if subp.which(check_exe): return if packages is None: packages = ['ntp'] @@ -365,21 +379,30 @@ def generate_server_names(distro): """ names = [] pool_distro = distro - # For legal reasons x.pool.sles.ntp.org does not exist, - # use the opensuse pool + if distro == 'sles': + # For legal reasons x.pool.sles.ntp.org does not exist, + # use the opensuse pool pool_distro = 'opensuse' + elif distro == 'alpine': + # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist + # so use general x.pool.ntp.org instead. + pool_distro = '' + for x in range(0, NR_POOL_SERVERS): - name = "%d.%s.pool.ntp.org" % (x, pool_distro) - names.append(name) + names.append(".".join( + [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + return names -def write_ntp_config_template(distro_name, servers=None, pools=None, - path=None, template_fn=None, template=None): +def write_ntp_config_template(distro_name, service_name=None, servers=None, + pools=None, path=None, template_fn=None, + template=None): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. + @param service_name: string. The name of the NTP client service. @param servers: A list of strings specifying ntp servers. Defaults to empty list. @param pools: A list of strings specifying ntp pools. Defaults to empty @@ -398,7 +421,14 @@ def write_ntp_config_template(distro_name, servers=None, pools=None, if not pools: pools = [] - if len(servers) == 0 and len(pools) == 0: + if (len(servers) == 0 and distro_name == 'alpine' and + service_name == 'ntpd'): + # Alpine's Busybox ntpd only understands "servers" configuration + # and not "pool" configuration. + servers = generate_server_names(distro_name) + LOG.debug( + 'Adding distro default ntp servers: %s', ','.join(servers)) + elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( 'Adding distro default ntp pool servers: %s', ','.join(pools)) @@ -433,7 +463,7 @@ def reload_ntp(service, systemd=False): cmd = ['systemctl', 'reload-or-restart', service] else: cmd = ['service', service, 'restart'] - util.subp(cmd, capture=True) + subp.subp(cmd, capture=True) def supplemental_schema_validation(ntp_config): @@ -460,7 +490,7 @@ def supplemental_schema_validation(ntp_config): for key, value in sorted(ntp_config.items()): keypath = 'ntp:config:' + key if key == 'confpath': - if not all([value, isinstance(value, six.string_types)]): + if not all([value, isinstance(value, str)]): errors.append( 'Expected a config file path {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) @@ -472,11 +502,11 @@ def supplemental_schema_validation(ntp_config): elif key in ('template', 'template_name'): if value is None: # Either template or template_name can be none continue - if not isinstance(value, six.string_types): + if not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) - elif not isinstance(value, six.string_types): + elif not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) @@ -533,6 +563,8 @@ def handle(name, cfg, cloud, log, _args): raise RuntimeError(msg) write_ntp_config_template(cloud.distro.name, + service_name=ntp_client_config.get( + 'service_name'), servers=ntp_cfg.get('servers', []), pools=ntp_cfg.get('pools', []), path=ntp_client_config.get('confpath'), @@ -545,7 +577,7 @@ def handle(name, cfg, cloud, log, _args): try: reload_ntp(ntp_client_config['service_name'], systemd=cloud.distro.uses_systemd()) - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 86afffe..036baf8 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -43,6 +43,7 @@ import os import time from cloudinit import log as logging +from cloudinit import subp from cloudinit import util REBOOT_FILE = "/var/run/reboot-required" @@ -57,7 +58,7 @@ def _multi_cfg_bool_get(cfg, *keys): def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): - util.subp(REBOOT_CMD) + subp.subp(REBOOT_CMD) start = time.time() wait_time = initial_sleep for _i in range(0, wait_attempts): diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index b8e2709..733c391 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -19,6 +19,7 @@ keys to post. Available keys are: - ``pub_key_dsa`` - ``pub_key_rsa`` - ``pub_key_ecdsa`` + - ``pub_key_ed25519`` - ``instance_id`` - ``hostname`` - ``fdqn`` @@ -52,6 +53,7 @@ POST_LIST_ALL = [ 'pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', + 'pub_key_ed25519', 'instance_id', 'hostname', 'fqdn' @@ -105,6 +107,7 @@ def handle(name, cfg, cloud, log, args): 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', + 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', } for (n, path) in pubkeys.items(): diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 43a479c..6fcb8a7 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -22,9 +22,8 @@ The ``delay`` key specifies a duration to be added onto any shutdown command used. Therefore, if a 5 minute delay and a 120 second shutdown are specified, the maximum amount of time between cloud-init starting and the system shutting down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay`` -key must have an argument in a form that the ``shutdown`` utility recognizes. -The most common format is the form ``+5`` for 5 minutes. See ``man shutdown`` -for more options. +key must have an argument in either the form ``+5`` for 5 minutes or ``now`` +for immediate shutdown. Optionally, a command can be run to determine whether or not the system should shut down. The command to be run should be specified in the @@ -33,6 +32,10 @@ the system should shut down. The command to be run should be specified in the ``condition`` key is omitted or the command specified by the ``condition`` key returns 0. +.. note:: + With Alpine Linux any message value specified is ignored as Alpine's halt, + poweroff, and reboot commands do not support broadcasting a message. + **Internal name:** ``cc_power_state_change`` **Module frequency:** per instance @@ -49,16 +52,16 @@ key returns 0. condition: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - import errno import os import re -import six import subprocess import time +from cloudinit.settings import PER_INSTANCE +from cloudinit import subp +from cloudinit import util + frequency = PER_INSTANCE EXIT_FAIL = 254 @@ -72,7 +75,7 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = util.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) return m.group(2) @@ -112,9 +115,9 @@ def check_condition(cond, log=None): return False -def handle(_name, cfg, _cloud, log, _args): +def handle(_name, cfg, cloud, log, _args): try: - (args, timeout, condition) = load_power_state(cfg) + (args, timeout, condition) = load_power_state(cfg, cloud.distro.name) if args is None: log.debug("no power_state provided. doing nothing") return @@ -141,7 +144,19 @@ def handle(_name, cfg, _cloud, log, _args): condition, execmd, [args, devnull_fp]) -def load_power_state(cfg): +def convert_delay(delay, fmt=None, scale=None): + if not fmt: + fmt = "+%s" + if not scale: + scale = 1 + + if delay != "now": + delay = fmt % int(int(delay) * int(scale)) + + return delay + + +def load_power_state(cfg, distro_name): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found pstate = cfg.get('power_state') @@ -161,29 +176,45 @@ def load_power_state(cfg): (','.join(opt_map.keys()), mode)) delay = pstate.get("delay", "now") - # convert integer 30 or string '30' to '+30' - try: - delay = "+%s" % int(delay) - except ValueError: - pass + message = pstate.get("message") + scale = 1 + fmt = "+%s" + command = ["shutdown", opt_map[mode]] + + if distro_name == 'alpine': + # Convert integer 30 or string '30' to '1800' (seconds) as Alpine's + # halt/poweroff/reboot commands take seconds rather than minutes. + scale = 60 + # No "+" in front of delay value as not supported by Alpine's commands. + fmt = "%s" + if delay == "now": + # Alpine's commands do not understand "now". + delay = "0" + command = [mode, "-d"] + # Alpine's commands don't support a message. + message = None - if delay != "now" and not re.match(r"\+[0-9]+", delay): + try: + delay = convert_delay(delay, fmt=fmt, scale=scale) + except ValueError as e: raise TypeError( "power_state[delay] must be 'now' or '+m' (minutes)." - " found '%s'." % delay) + " found '%s'." % delay + ) from e - args = ["shutdown", opt_map[mode], delay] - if pstate.get("message"): - args.append(pstate.get("message")) + args = command + [delay] + if message: + args.append(message) try: timeout = float(pstate.get('timeout', 30.0)) - except ValueError: - raise ValueError("failed to convert timeout '%s' to float." % - pstate['timeout']) + except ValueError as e: + raise ValueError( + "failed to convert timeout '%s' to float." % pstate['timeout'] + ) from e condition = pstate.get("condition", True) - if not isinstance(condition, six.string_types + (list, bool)): + if not isinstance(condition, (str, list, bool)): raise TypeError("condition type %s invalid. must be list, bool, str") return (args, timeout, condition) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index b088db6..bc981cf 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -77,13 +77,13 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 """ -from six import StringIO - import os import socket import yaml +from io import StringIO from cloudinit import helpers +from cloudinit import subp from cloudinit import util PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' @@ -106,14 +106,14 @@ class PuppetConstants(object): def _autostart_puppet(log): # Set puppet to automatically start if os.path.exists('/etc/default/puppet'): - util.subp(['sed', '-i', + subp.subp(['sed', '-i', '-e', 's/^START=.*/START=yes/', '/etc/default/puppet'], capture=False) elif os.path.exists('/bin/systemctl'): - util.subp(['/bin/systemctl', 'enable', 'puppet.service'], + subp.subp(['/bin/systemctl', 'enable', 'puppet.service'], capture=False) elif os.path.exists('/sbin/chkconfig'): - util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) else: log.warning(("Sorry we do not know how to enable" " puppet services on this system")) @@ -160,9 +160,9 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 - puppet_config.readfp( # pylint: disable=W1505 + puppet_config.read_file( StringIO(cleaned_contents), - filename=p_constants.conf_path) + source=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place @@ -204,6 +204,6 @@ def handle(name, cfg, cloud, log, _args): _autostart_puppet(log) # Start puppetd - util.subp(['service', 'puppet', 'start'], capture=False) + subp.subp(['service', 'puppet', 'start'], capture=False) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 01dfc12..978d2ee 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -19,6 +19,7 @@ from textwrap import dedent from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit.settings import PER_ALWAYS +from cloudinit import subp from cloudinit import util NOBLOCK = "noblock" @@ -88,11 +89,11 @@ def _resize_zfs(mount_point, devpth): def _get_dumpfs_output(mount_point): - return util.subp(['dumpfs', '-m', mount_point])[0] + return subp.subp(['dumpfs', '-m', mount_point])[0] def _get_gpart_output(part): - return util.subp(['gpart', 'show', part])[0] + return subp.subp(['gpart', 'show', part])[0] def _can_skip_resize_ufs(mount_point, devpth): @@ -117,14 +118,12 @@ def _can_skip_resize_ufs(mount_point, devpth): if o == "-f": frag_sz = int(a) # check the current partition size - """ - # gpart show /dev/da0 -=> 40 62914480 da0 GPT (30G) - 40 1024 1 freebsd-boot (512K) - 1064 58719232 2 freebsd-ufs (28G) - 58720296 3145728 3 freebsd-swap (1.5G) - 61866024 1048496 - free - (512M) - """ + # Example output from `gpart show /dev/da0`: + # => 40 62914480 da0 GPT (30G) + # 40 1024 1 freebsd-boot (512K) + # 1064 58719232 2 freebsd-ufs (28G) + # 58720296 3145728 3 freebsd-swap (1.5G) + # 61866024 1048496 - free - (512M) expect_sz = None m = re.search('^(/dev/.+)p([0-9])$', devpth) gpart_res = _get_gpart_output(m.group(1)) @@ -306,8 +305,8 @@ def handle(name, cfg, _cloud, log, args): def do_resize(resize_cmd, log): try: - util.subp(resize_cmd) - except util.ProcessExecutionError: + subp.subp(resize_cmd) + except subp.ProcessExecutionError: util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) raise # TODO(harlowja): Should we add a fsck check after this to make diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 69f4768..519e66e 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -30,7 +30,7 @@ are configured correctly. **Module frequency:** per instance -**Supported distros:** fedora, rhel, sles +**Supported distros:** alpine, fedora, rhel, sles **Config keys**:: @@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -distros = ['fedora', 'opensuse', 'rhel', 'sles'] +distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles'] def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 28c79b8..28d62e9 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -39,6 +39,7 @@ Subscription`` example config. """ from cloudinit import log as logging +from cloudinit import subp from cloudinit import util LOG = logging.getLogger(__name__) @@ -173,7 +174,7 @@ class SubscriptionManager(object): try: _sub_man_cli(cmd) - except util.ProcessExecutionError: + except subp.ProcessExecutionError: return False return True @@ -200,7 +201,7 @@ class SubscriptionManager(object): try: return_out = _sub_man_cli(cmd, logstring_val=True)[0] - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " "to: {0}".format(e.stderr)) @@ -223,7 +224,7 @@ class SubscriptionManager(object): # Attempting to register the system only try: return_out = _sub_man_cli(cmd, logstring_val=True)[0] - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " "to: {0}".format(e.stderr)) @@ -246,7 +247,7 @@ class SubscriptionManager(object): try: return_out = _sub_man_cli(cmd)[0] - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): if line != '': @@ -264,7 +265,7 @@ class SubscriptionManager(object): cmd = ['attach', '--auto'] try: return_out = _sub_man_cli(cmd)[0] - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: self.log_warn("Auto-attach failed with: {0}".format(e)) return False for line in return_out.split("\n"): @@ -341,7 +342,7 @@ class SubscriptionManager(object): "system: %s", (", ".join(pool_list)) .replace('--pool=', '')) return True - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: self.log_warn("Unable to attach pool {0} " "due to {1}".format(pool, e)) return False @@ -414,7 +415,7 @@ class SubscriptionManager(object): try: _sub_man_cli(cmd) - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: self.log_warn("Unable to alter repos due to {0}".format(e)) return False @@ -432,11 +433,11 @@ class SubscriptionManager(object): def _sub_man_cli(cmd, logstring_val=False): ''' - Uses the prefered cloud-init subprocess def of util.subp + Uses the prefered cloud-init subprocess def of subp.subp and runs subscription-manager. Breaking this to a separate function for later use in mocking and unittests ''' - return util.subp(['subscription-manager'] + cmd, + return subp.subp(['subscription-manager'] + cmd, logstring=logstring_val) diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index bd8ee89..a5aca03 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -50,13 +50,12 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``. # import os +from urllib.parse import parse_qs from cloudinit.settings import PER_INSTANCE from cloudinit import url_helper as uhelp from cloudinit import util -from six.moves.urllib_parse import parse_qs - frequency = PER_INSTANCE MY_NAME = "cc_rightscale_userdata" diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index ff211f6..2a2bc93 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -180,9 +180,9 @@ config entries. Legacy to new mappings are as follows: import os import re -import six from cloudinit import log as logging +from cloudinit import subp from cloudinit import util DEF_FILENAME = "20-cloud-config.conf" @@ -216,7 +216,7 @@ def reload_syslog(command=DEF_RELOAD, systemd=False): cmd = ['service', service, 'restart'] else: cmd = command - util.subp(cmd, capture=True) + subp.subp(cmd, capture=True) def load_config(cfg): @@ -233,9 +233,9 @@ def load_config(cfg): fillup = ( (KEYNAME_CONFIGS, [], list), - (KEYNAME_DIR, DEF_DIR, six.string_types), - (KEYNAME_FILENAME, DEF_FILENAME, six.string_types), - (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)), + (KEYNAME_DIR, DEF_DIR, str), + (KEYNAME_FILENAME, DEF_FILENAME, str), + (KEYNAME_RELOAD, DEF_RELOAD, (str, list)), (KEYNAME_REMOTES, DEF_REMOTES, dict)) for key, default, vtypes in fillup: @@ -347,8 +347,10 @@ class SyslogRemotesLine(object): if self.port: try: int(self.port) - except ValueError: - raise ValueError("port '%s' is not an integer" % self.port) + except ValueError as e: + raise ValueError( + "port '%s' is not an integer" % self.port + ) from e if not self.addr: raise ValueError("address is required") @@ -430,7 +432,7 @@ def handle(name, cfg, cloud, log, _args): restarted = reload_syslog( command=mycfg[KEYNAME_RELOAD], systemd=cloud.distro.uses_systemd()), - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: restarted = False log.warning("Failed to reload syslog", e) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 1c991d8..b61876a 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -45,7 +45,9 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``. import os -from cloudinit import safeyaml, util +from cloudinit import safeyaml, subp, util +from cloudinit.distros import rhel_util + # Note: see https://docs.saltstack.com/en/latest/topics/installation/ # Note: see https://docs.saltstack.com/en/latest/ref/configuration/ @@ -123,10 +125,11 @@ def handle(name, cfg, cloud, log, _args): # we need to have the salt minion service enabled in rc in order to be # able to start the service. this does only apply on FreeBSD servers. if cloud.distro.osfamily == 'freebsd': - cloud.distro.updatercconf('salt_minion_enable', 'YES') + rhel_util.update_sysconfig_file( + '/etc/rc.conf', {'salt_minion_enable': 'YES'}) # restart salt-minion. 'service' will start even if not started. if it # was started, it needs to be restarted for config change. - util.subp(['service', const.srv_name, 'restart'], capture=False) + subp.subp(['service', const.srv_name, 'restart'], capture=False) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py index 588e1b0..1e3f419 100644 --- a/cloudinit/config/cc_scripts_per_boot.py +++ b/cloudinit/config/cc_scripts_per_boot.py @@ -24,7 +24,7 @@ module does not accept any config keys. import os -from cloudinit import util +from cloudinit import subp from cloudinit.settings import PER_ALWAYS @@ -38,7 +38,7 @@ def handle(name, _cfg, cloud, log, _args): # https://forums.aws.amazon.com/thread.jspa?threadID=96918 runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR) try: - util.runparts(runparts_path) + subp.runparts(runparts_path) except Exception: log.warning("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py index 75549b5..5966fb9 100644 --- a/cloudinit/config/cc_scripts_per_instance.py +++ b/cloudinit/config/cc_scripts_per_instance.py @@ -27,7 +27,7 @@ the system. As a result per-instance scripts will run again. import os -from cloudinit import util +from cloudinit import subp from cloudinit.settings import PER_INSTANCE @@ -41,7 +41,7 @@ def handle(name, _cfg, cloud, log, _args): # https://forums.aws.amazon.com/thread.jspa?threadID=96918 runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR) try: - util.runparts(runparts_path) + subp.runparts(runparts_path) except Exception: log.warning("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py index 259bdfa..bcca859 100644 --- a/cloudinit/config/cc_scripts_per_once.py +++ b/cloudinit/config/cc_scripts_per_once.py @@ -25,7 +25,7 @@ be run in alphabetical order. This module does not accept any config keys. import os -from cloudinit import util +from cloudinit import subp from cloudinit.settings import PER_ONCE @@ -39,7 +39,7 @@ def handle(name, _cfg, cloud, log, _args): # https://forums.aws.amazon.com/thread.jspa?threadID=96918 runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR) try: - util.runparts(runparts_path) + subp.runparts(runparts_path) except Exception: log.warning("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py index d940dbd..215703e 100644 --- a/cloudinit/config/cc_scripts_user.py +++ b/cloudinit/config/cc_scripts_user.py @@ -27,7 +27,7 @@ This module does not accept any config keys. import os -from cloudinit import util +from cloudinit import subp from cloudinit.settings import PER_INSTANCE @@ -42,7 +42,7 @@ def handle(name, _cfg, cloud, log, _args): # go here... runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR) try: - util.runparts(runparts_path) + subp.runparts(runparts_path) except Exception: log.warning("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py index faac924..e0a4bff 100644 --- a/cloudinit/config/cc_scripts_vendor.py +++ b/cloudinit/config/cc_scripts_vendor.py @@ -28,6 +28,7 @@ entry under the ``vendor_data`` config key. import os +from cloudinit import subp from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -46,7 +47,7 @@ def handle(name, cfg, cloud, log, _args): prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), []) try: - util.runparts(runparts_path, exe_prefix=prefix) + subp.runparts(runparts_path, exe_prefix=prefix) except Exception: log.warning("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index a5d7c73..4fb9b44 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -61,11 +61,11 @@ used:: import base64 import os - -from six import BytesIO +from io import BytesIO from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import subp from cloudinit import util frequency = PER_INSTANCE @@ -93,14 +93,14 @@ def handle_random_seed_command(command, required, env=None): return cmd = command[0] - if not util.which(cmd): + if not subp.which(cmd): if required: raise ValueError( "command '{cmd}' not found but required=true".format(cmd=cmd)) else: LOG.debug("command '%s' not found for seed_command", cmd) return - util.subp(command, env=env, capture=False) + subp.subp(command, env=env, capture=False) def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index a0febc3..1d23d80 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -21,9 +21,17 @@ key, and the fqdn of the cloud wil be used. If a fqdn specified with the the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn`` will be used. +This module will run in the init-local stage before networking is configured +if the hostname is set by metadata or user data on the local system. + +This will occur on datasources like nocloud and ovf where metadata and user +data are available locally. This ensures that the desired hostname is applied +before any DHCP requests are preformed on these platforms where dynamic DNS is +based on initial hostname. + **Internal name:** ``cc_set_hostname`` -**Module frequency:** per instance +**Module frequency:** per always **Supported distros:** all @@ -47,7 +55,6 @@ class SetHostnameError(Exception): This may happen if we attempt to set the hostname early in cloud-init's init-local timeframe as certain services may not be running yet. """ - pass def handle(name, cfg, cloud, log, _args): @@ -78,7 +85,7 @@ def handle(name, cfg, cloud, log, _args): except Exception as e: msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) util.logexc(log, msg) - raise SetHostnameError("%s: %s" % (msg, e)) + raise SetHostnameError("%s: %s" % (msg, e)) from e write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn}) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index c3c5b0f..d6b5682 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -83,6 +83,7 @@ import sys from cloudinit.distros import ug_util from cloudinit import log as logging from cloudinit.ssh_util import update_ssh_config +from cloudinit import subp from cloudinit import util from string import ascii_letters, digits @@ -112,7 +113,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): elif util.is_false(pw_auth): cfg_val = 'no' else: - bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == 'unchanged': LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: @@ -121,15 +122,15 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): updated = update_ssh_config({cfg_name: cfg_val}) if not updated: - LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return if 'systemctl' in service_cmd: cmd = list(service_cmd) + ["restart", service_name] else: cmd = list(service_cmd) + [service_name, "restart"] - util.subp(cmd) - LOG.debug("Restarted the ssh daemon.") + subp.subp(cmd) + LOG.debug("Restarted the SSH daemon.") def handle(_name, cfg, cloud, log, args): @@ -236,17 +237,17 @@ def handle(_name, cfg, cloud, log, args): raise errors[-1] -def rand_user_password(pwlen=9): +def rand_user_password(pwlen=20): return util.rand_str(pwlen, select_from=PW_SET) def chpasswd(distro, plist_in, hashed=False): - if util.is_FreeBSD(): + if util.is_BSD(): for pentry in plist_in.splitlines(): u, p = pentry.split(":") distro.set_passwd(u, p, hashed=hashed) else: cmd = ['chpasswd'] + (['-e'] if hashed else []) - util.subp(cmd, plist_in) + subp.subp(cmd, plist_in) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 90724b8..20ed7d2 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -12,6 +12,7 @@ from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit.settings import PER_INSTANCE from cloudinit.subp import prepend_base_command +from cloudinit import subp from cloudinit import util @@ -61,9 +62,9 @@ schema = { snap: assertions: 00: | - signed_assertion_blob_here + signed_assertion_blob_here 02: | - signed_assertion_blob_here + signed_assertion_blob_here commands: 00: snap create-user --sudoer --known @mydomain.com 01: snap install canonical-livepatch @@ -85,6 +86,21 @@ schema = { 01: ['snap', 'install', 'vlc'] 02: snap install vlc 03: 'snap install vlc' + """), dedent("""\ + # You can use a list of commands + snap: + commands: + - ['install', 'vlc'] + - ['snap', 'install', 'vlc'] + - snap install vlc + - 'snap install vlc' + """), dedent("""\ + # You can use a list of assertions + snap: + assertions: + - signed_assertion_blob_here + - | + signed_assertion_blob_here """)], 'frequency': PER_INSTANCE, 'type': 'object', @@ -98,7 +114,8 @@ schema = { 'additionalItems': False, # Reject items non-string 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True + 'uniqueItems': True, + 'additionalProperties': {'type': 'string'}, }, 'commands': { 'type': ['object', 'array'], # Array of strings or dict @@ -110,6 +127,12 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, + 'additionalProperties': { + 'oneOf': [ + {'type': 'string'}, + {'type': 'array', 'items': {'type': 'string'}}, + ], + }, }, 'squashfuse_in_container': { 'type': 'boolean' @@ -122,10 +145,6 @@ schema = { } } -# TODO schema for 'assertions' and 'commands' are too permissive at the moment. -# Once python-jsonschema supports schema draft 6 add support for arbitrary -# object keys with 'patternProperties' constraint to validate string values. - __doc__ = get_schema_doc(schema) # Supplement python help() SNAP_CMD = "snap" @@ -157,7 +176,7 @@ def add_assertions(assertions): LOG.debug('Snap acking: %s', asrt.split('\n')[0:2]) util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) - util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) + subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) def run_commands(commands): @@ -186,8 +205,8 @@ def run_commands(commands): for command in fixed_snap_commands: shell = isinstance(command, str) try: - util.subp(command, shell=shell, status_cb=sys.stderr.write) - except util.ProcessExecutionError as e: + subp.subp(command, shell=shell, status_cb=sys.stderr.write) + except subp.ProcessExecutionError as e: cmd_failures.append(str(e)) if cmd_failures: msg = 'Failures running snap commands:\n{cmd_failures}'.format( diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py index 1020e94..9508360 100644 --- a/cloudinit/config/cc_spacewalk.py +++ b/cloudinit/config/cc_spacewalk.py @@ -27,7 +27,7 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/ activation_key: """ -from cloudinit import util +from cloudinit import subp distros = ['redhat', 'fedora'] @@ -41,9 +41,9 @@ def is_registered(): # assume we aren't registered; which is sorta ghetto... already_registered = False try: - util.subp(['rhn-profile-sync', '--verbose'], capture=False) + subp.subp(['rhn-profile-sync', '--verbose'], capture=False) already_registered = True - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.exit_code != 1: raise return already_registered @@ -65,7 +65,7 @@ def do_register(server, profile_name, cmd.extend(['--sslCACert', str(ca_cert_path)]) if activation_key: cmd.extend(['--activationkey', str(activation_key)]) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 050285a..9b2a333 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -9,43 +9,23 @@ """ SSH --- -**Summary:** configure ssh and ssh keys +**Summary:** configure SSH and SSH keys (host and authorized) -This module handles most configuration for ssh and ssh keys. Many images have -default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing -default keys is usually the desired behavior this option is enabled by default. +This module handles most configuration for SSH and both host and authorized SSH +keys. -Keys can be added using the ``ssh_keys`` configuration key. The argument to -this config key should be a dictionary entries for the public and private keys -of each desired key type. Entries in the ``ssh_keys`` config dict should -have keys in the format ``_private`` and ``_public``, e.g. -``rsa_private: `` and ``rsa_public: ``. See below for supported key -types. Not all key types have to be specified, ones left unspecified will not -be used. If this config option is used, then no keys will be generated. +Authorized Keys +^^^^^^^^^^^^^^^ -.. note:: - when specifying private keys in cloud-config, care should be taken to - ensure that the communication between the data source and the instance is - secure +Authorized keys are a list of public SSH keys that are allowed to connect to a +a user account on a system. They are stored in `.ssh/authorized_keys` in that +account's home directory. Authorized keys for the default user defined in +``users`` can be specified using ``ssh_authorized_keys``. Keys +should be specified as a list of public keys. .. note:: - to specify multiline private keys, use yaml multiline syntax - -If no keys are specified using ``ssh_keys``, then keys will be generated using -``ssh-keygen``. By default one public/private pair of each supported key type -will be generated. The key types to generate can be specified using the -``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For -each key type for which this module has been instructed to create a keypair, if -a key of the same type is already present on the system (i.e. if -``ssh_deletekeys`` was false), no key will be generated. - -Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config -flags are: - - - rsa - - dsa - - ecdsa - - ed25519 + see the ``cc_set_passwords`` module documentation to enable/disable SSH + password authentication Root login can be enabled/disabled using the ``disable_root`` config key. Root login options can be manually specified with ``disable_root_opts``. If @@ -55,17 +35,82 @@ root login is disabled, and root login opts are set to:: no-port-forwarding,no-agent-forwarding,no-X11-forwarding -Authorized keys for the default user/first user defined in ``users`` can be -specified using ``ssh_authorized_keys``. Keys should be specified as a list of -public keys. +Supported public key types for the ``ssh_authorized_keys`` are: -Importing ssh public keys for the default user (defined in ``users``)) is -enabled by default. This feature may be disabled by setting -``allow_publish_ssh_keys: false``. + - dsa + - rsa + - ecdsa + - ed25519 + - ecdsa-sha2-nistp256-cert-v01@openssh.com + - ecdsa-sha2-nistp256 + - ecdsa-sha2-nistp384-cert-v01@openssh.com + - ecdsa-sha2-nistp384 + - ecdsa-sha2-nistp521-cert-v01@openssh.com + - ecdsa-sha2-nistp521 + - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com + - sk-ecdsa-sha2-nistp256@openssh.com + - sk-ssh-ed25519-cert-v01@openssh.com + - sk-ssh-ed25519@openssh.com + - ssh-dss-cert-v01@openssh.com + - ssh-dss + - ssh-ed25519-cert-v01@openssh.com + - ssh-ed25519 + - ssh-rsa-cert-v01@openssh.com + - ssh-rsa + - ssh-xmss-cert-v01@openssh.com + - ssh-xmss@openssh.com .. note:: - see the ``cc_set_passwords`` module documentation to enable/disable ssh - password authentication + this list has been filtered out from the supported keytypes of + `OpenSSH`_ source, where the sigonly keys are removed. Please see + ``ssh_util`` for more information. + + ``dsa``, ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy, + as they are valid public keys in some old distros. They can possibly + be removed in the future when support for the older distros are dropped + +.. _OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshkey.c + +Host Keys +^^^^^^^^^ + +Host keys are for authenticating a specific instance. Many images have default +host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents +re-use of a private host key from an image on multiple machines. Since +removing default host keys is usually the desired behavior this option is +enabled by default. + +Host keys can be added using the ``ssh_keys`` configuration key. The argument +to this config key should be a dictionary entries for the public and private +keys of each desired key type. Entries in the ``ssh_keys`` config dict should +have keys in the format ``_private`` and ``_public``, +e.g. ``rsa_private: `` and ``rsa_public: ``. See below for supported +key types. Not all key types have to be specified, ones left unspecified will +not be used. If this config option is used, then no keys will be generated. + +.. note:: + when specifying private host keys in cloud-config, care should be taken to + ensure that the communication between the data source and the instance is + secure + +.. note:: + to specify multiline private host keys, use yaml multiline syntax + +If no host keys are specified using ``ssh_keys``, then keys will be generated +using ``ssh-keygen``. By default one public/private pair of each supported +host key type will be generated. The key types to generate can be specified +using the ``ssh_genkeytypes`` config flag, which accepts a list of host key +types to use. For each host key type for which this module has been instructed +to create a keypair, if a key of the same type is already present on the +system (i.e. if ``ssh_deletekeys`` was false), no key will be generated. + +Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` +config flags are: + + - rsa + - dsa + - ecdsa + - ed25519 **Internal name:** ``cc_ssh`` @@ -107,6 +152,7 @@ import sys from cloudinit.distros import ug_util from cloudinit import ssh_util +from cloudinit import subp from cloudinit import util @@ -155,7 +201,7 @@ def handle(_name, cfg, cloud, log, _args): try: # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) except Exception: util.logexc(log, "Failed generated a key for %s from %s", @@ -177,9 +223,9 @@ def handle(_name, cfg, cloud, log, _args): # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): try: - out, err = util.subp(cmd, capture=True, env=lang_c) + out, err = subp.subp(cmd, capture=True, env=lang_c) sys.stdout.write(util.decode_binary(out)) - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: err = util.decode_binary(e.stderr).lower() if (e.exit_code == 1 and err.lower().startswith("unknown key")): @@ -216,7 +262,7 @@ def handle(_name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True): keys = cloud.get_public_ssh_keys() or [] else: - log.debug('Skipping import of publish ssh keys per ' + log.debug('Skipping import of publish SSH keys per ' 'config setting: allow_public_ssh_keys=False') if "ssh_authorized_keys" in cfg: @@ -225,7 +271,7 @@ def handle(_name, cfg, cloud, log, _args): apply_credentials(keys, user, disable_root, disable_root_opts) except Exception: - util.logexc(log, "Applying ssh credentials failed!") + util.logexc(log, "Applying SSH credentials failed!") def apply_credentials(keys, user, disable_root, disable_root_opts): diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 98b0e66..05d30ad 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -7,13 +7,13 @@ """ SSH Authkey Fingerprints ------------------------ -**Summary:** log fingerprints of user ssh keys +**Summary:** log fingerprints of user SSH keys Write fingerprints of authorized keys for each user to log. This is enabled by default, but can be disabled using ``no_ssh_fingerprints``. The hash type for -the keys can be specified, but defaults to ``md5``. +the keys can be specified, but defaults to ``sha256``. -**Internal name:** `` cc_ssh_authkey_fingerprints`` +**Internal name:** ``cc_ssh_authkey_fingerprints`` **Module frequency:** per instance @@ -42,7 +42,7 @@ def _split_hash(bin_hash): return split_up -def _gen_fingerprint(b64_text, hash_meth='md5'): +def _gen_fingerprint(b64_text, hash_meth='sha256'): if not b64_text: return '' # TBD(harlowja): Maybe we should feed this into 'ssh -lf'? @@ -59,16 +59,16 @@ def _gen_fingerprint(b64_text, hash_meth='md5'): def _is_printable_key(entry): if any([entry.keytype, entry.base64, entry.comment, entry.options]): - if (entry.keytype and - entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']): + if (entry.keytype and entry.keytype.lower().strip() + in ssh_util.VALID_KEY_TYPES): return True return False -def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', +def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256', prefix='ci-info: '): if not key_entries: - message = ("%sno authorized ssh keys fingerprints found for user %s.\n" + message = ("%sno authorized SSH keys fingerprints found for user %s.\n" % (prefix, user)) util.multi_log(message) return @@ -98,10 +98,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', def handle(name, cfg, cloud, log, _args): if util.is_true(cfg.get('no_ssh_fingerprints', False)): log.debug(("Skipping module named %s, " - "logging of ssh fingerprints disabled"), name) + "logging of SSH fingerprints disabled"), name) return - hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") + hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256") (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 6b46daf..856e5a9 100755 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -9,9 +9,9 @@ """ SSH Import Id ------------- -**Summary:** import ssh id +**Summary:** import SSH id -This module imports ssh keys from either a public keyserver, usually launchpad +This module imports SSH keys from either a public keyserver, usually launchpad or github using ``ssh-import-id``. Keys are referenced by the username they are associated with on the keyserver. The keyserver can be specified by prepending either ``lp:`` for launchpad or ``gh:`` for github to the username. @@ -31,6 +31,7 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username. """ from cloudinit.distros import ug_util +from cloudinit import subp from cloudinit import util import pwd @@ -98,12 +99,12 @@ def import_ssh_ids(ids, user, log): raise exc cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids - log.debug("Importing ssh ids for user %s.", user) + log.debug("Importing SSH ids for user %s.", user) try: - util.subp(cmd, capture=False) - except util.ProcessExecutionError as exc: - util.logexc(log, "Failed to run command to import %s ssh ids", user) + subp.subp(cmd, capture=False) + except subp.ProcessExecutionError as exc: + util.logexc(log, "Failed to run command to import %s SSH ids", user) raise exc # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index f846e9a..d61dc65 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -4,12 +4,11 @@ from textwrap import dedent -import six - from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import subp from cloudinit import util @@ -98,7 +97,7 @@ def configure_ua(token=None, enable=None): if enable is None: enable = [] - elif isinstance(enable, six.string_types): + elif isinstance(enable, str): LOG.warning('ubuntu_advantage: enable should be a list, not' ' a string; treating as a single enable') enable = [enable] @@ -111,18 +110,18 @@ def configure_ua(token=None, enable=None): attach_cmd = ['ua', 'attach', token] LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd)) try: - util.subp(attach_cmd) - except util.ProcessExecutionError as e: + subp.subp(attach_cmd) + except subp.ProcessExecutionError as e: msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( error=str(e)) util.logexc(LOG, msg) - raise RuntimeError(msg) + raise RuntimeError(msg) from e enable_errors = [] for service in enable: try: cmd = ['ua', 'enable', service] - util.subp(cmd, capture=True) - except util.ProcessExecutionError as e: + subp.subp(cmd, capture=True) + except subp.ProcessExecutionError as e: enable_errors.append((service, e)) if enable_errors: for service, error in enable_errors: @@ -137,7 +136,7 @@ def configure_ua(token=None, enable=None): def maybe_install_ua_tools(cloud): """Install ubuntu-advantage-tools if not present.""" - if util.which('ua'): + if subp.which('ua'): return try: cloud.distro.update_package_sources() diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py index 297451d..2d1d2b3 100644 --- a/cloudinit/config/cc_ubuntu_drivers.py +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -9,6 +9,7 @@ from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import subp from cloudinit import temp_utils from cloudinit import type_utils from cloudinit import util @@ -108,7 +109,7 @@ def install_drivers(cfg, pkg_install_func): LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) return - if not util.which('ubuntu-drivers'): + if not subp.which('ubuntu-drivers'): LOG.debug("'ubuntu-drivers' command not available. " "Installing ubuntu-drivers-common") pkg_install_func(['ubuntu-drivers-common']) @@ -131,7 +132,7 @@ def install_drivers(cfg, pkg_install_func): debconf_script, util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT), mode=0o755) - util.subp([debconf_script, debconf_file]) + subp.subp([debconf_script, debconf_file]) except Exception as e: util.logexc( LOG, "Failed to register NVIDIA debconf template: %s", str(e)) @@ -141,8 +142,8 @@ def install_drivers(cfg, pkg_install_func): util.del_dir(tdir) try: - util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) - except util.ProcessExecutionError as exc: + subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) + except subp.ProcessExecutionError as exc: if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: LOG.warning('the available version of ubuntu-drivers is' ' too old to perform requested driver installation') diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index c32a743..426498a 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows: a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there. Default: none - - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's + - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's authkeys file. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_import_id``: Optional. SSH id to import for user. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH - logins for this user. When specified, all cloud meta-data public ssh - keys will be set up in a disabled state for this username. Any ssh login + logins for this user. When specified, all cloud meta-data public SSH + keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the configured for this instance. Default: false. This key can not be combined with ``ssh_import_id`` or @@ -78,6 +78,13 @@ config keys for an entry in ``users`` are as follows: If specifying a sudo rule for a user, ensure that the syntax for the rule is valid, as it is not checked by cloud-init. +.. note:: + Most of these configuration options will not be honored if the user + already exists. Following options are the exceptions and they are + applicable on already-existing users: + - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo', + 'ssh_authorized_keys', 'ssh_redirect_user'. + **Internal name:** ``cc_users_groups`` **Module frequency:** per instance diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 0b6546e..8601e70 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -4,61 +4,14 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Write Files ------------ -**Summary:** write arbitrary files - -Write out arbitrary content to files, optionally setting permissions. Content -can be specified in plain text or binary. Data encoded with either base64 or -binary gzip data can be specified and will be decoded before being written. - -.. note:: - if multiline data is provided, care should be taken to ensure that it - follows yaml formatting standards. to specify binary data, use the yaml - option ``!!binary`` - -.. note:: - Do not write files under /tmp during boot because of a race with - systemd-tmpfiles-clean that can cause temp files to get cleaned during - the early boot process. Use /run/somedir instead to avoid race LP:1707222. - -**Internal name:** ``cc_write_files`` - -**Module frequency:** per instance - -**Supported distros:** all - -**Config keys**:: - - write_files: - - encoding: b64 - content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4... - owner: root:root - path: /etc/sysconfig/selinux - permissions: '0644' - - content: | - # My new /etc/sysconfig/samba file - - SMDBOPTIONS="-D" - path: /etc/sysconfig/samba - - content: !!binary | - f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAA - AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAA - AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAAB - ... - path: /bin/arch - permissions: '0555' - - content: | - 15 * * * * root ship_logs - path: /etc/crontab - append: true -""" +"""Write Files: write arbitrary files""" import base64 import os -import six +from textwrap import dedent +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -72,6 +25,142 @@ UNKNOWN_ENC = 'text/plain' LOG = logging.getLogger(__name__) +distros = ['all'] + +# The schema definition for each cloud-config module is a strict contract for +# describing supported configuration parameters for each cloud-config section. +# It allows cloud-config to validate and alert users to invalid or ignored +# configuration options before actually attempting to deploy with said +# configuration. + +supported_encoding_types = [ + 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64', + 'base64'] + +schema = { + 'id': 'cc_write_files', + 'name': 'Write Files', + 'title': 'write arbitrary files', + 'description': dedent("""\ + Write out arbitrary content to files, optionally setting permissions. + Parent folders in the path are created if absent. + Content can be specified in plain text or binary. Data encoded with + either base64 or binary gzip data can be specified and will be decoded + before being written. For empty file creation, content can be omitted. + + .. note:: + if multiline data is provided, care should be taken to ensure that it + follows yaml formatting standards. to specify binary data, use the yaml + option ``!!binary`` + + .. note:: + Do not write files under /tmp during boot because of a race with + systemd-tmpfiles-clean that can cause temp files to get cleaned during + the early boot process. Use /run/somedir instead to avoid race + LP:1707222."""), + 'distros': distros, + 'examples': [ + dedent("""\ + # Write out base64 encoded content to /etc/sysconfig/selinux + write_files: + - encoding: b64 + content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4... + owner: root:root + path: /etc/sysconfig/selinux + permissions: '0644' + """), + dedent("""\ + # Appending content to an existing file + write_files: + - content: | + 15 * * * * root ship_logs + path: /etc/crontab + append: true + """), + dedent("""\ + # Provide gziped binary content + write_files: + - encoding: gzip + content: !!binary | + H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= + path: /usr/bin/hello + permissions: '0755' + """), + dedent("""\ + # Create an empty file on the system + write_files: + - path: /root/CLOUD_INIT_WAS_HERE + """)], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'write_files': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'path': { + 'type': 'string', + 'description': dedent("""\ + Path of the file to which ``content`` is decoded + and written + """), + }, + 'content': { + 'type': 'string', + 'default': '', + 'description': dedent("""\ + Optional content to write to the provided ``path``. + When content is present and encoding is not '%s', + decode the content prior to writing. Default: + **''** + """ % UNKNOWN_ENC), + }, + 'owner': { + 'type': 'string', + 'default': DEFAULT_OWNER, + 'description': dedent("""\ + Optional owner:group to chown on the file. Default: + **{owner}** + """.format(owner=DEFAULT_OWNER)), + }, + 'permissions': { + 'type': 'string', + 'default': oct(DEFAULT_PERMS).replace('o', ''), + 'description': dedent("""\ + Optional file permissions to set on ``path`` + represented as an octal string '0###'. Default: + **'{perms}'** + """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))), + }, + 'encoding': { + 'type': 'string', + 'default': UNKNOWN_ENC, + 'enum': supported_encoding_types, + 'description': dedent("""\ + Optional encoding type of the content. Default is + **text/plain** and no content decoding is + performed. Supported encoding types are: + %s.""" % ", ".join(supported_encoding_types)), + }, + 'append': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + Whether to append ``content`` to existing file if + ``path`` exists. Default: **false**. + """), + }, + }, + 'required': ['path'], + 'additionalProperties': False + }, + } + } +} + +__doc__ = get_schema_doc(schema) # Supplement python help() + def handle(name, cfg, _cloud, log, _args): files = cfg.get('write_files') @@ -79,6 +168,7 @@ def handle(name, cfg, _cloud, log, _args): log.debug(("Skipping module named %s," " no/empty 'write_files' key in configuration"), name) return + validate_cloudconfig_schema(cfg, schema) write_files(name, files) @@ -126,7 +216,7 @@ def decode_perms(perm, default): if perm is None: return default try: - if isinstance(perm, six.integer_types + (float,)): + if isinstance(perm, (int, float)): # Just 'downcast' it (if a float) return int(perm) else: diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b354a7..01fe683 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,7 +18,7 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** fedora, rhel +**Supported distros:** centos, fedora, rhel **Config keys**:: @@ -30,17 +30,13 @@ entry, the config entry will be skipped. # any repository configuration options (see man yum.conf) """ +import io import os - -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser -import six +from configparser import ConfigParser from cloudinit import util -distros = ['fedora', 'rhel'] +distros = ['centos', 'fedora', 'rhel'] def _canonicalize_id(repo_id): @@ -57,7 +53,7 @@ def _format_repo_value(val): # Can handle 'lists' in certain cases # See: https://linux.die.net/man/5/yum.conf return "\n".join([_format_repo_value(v) for v in val]) - if not isinstance(val, six.string_types): + if not isinstance(val, str): return str(val) return val @@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config): # For now assume that people using this know # the format of yum and don't verify keys/values further to_be.set(repo_id, k, _format_repo_value(v)) - to_be_stream = six.StringIO() + to_be_stream = io.StringIO() to_be.write(to_be_stream) to_be_stream.seek(0) lines = to_be_stream.readlines() diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index aba2695..05855b0 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -7,7 +7,6 @@ import configobj import os -from six import string_types from textwrap import dedent from cloudinit.config.schema import get_schema_doc @@ -110,7 +109,7 @@ def _format_repo_value(val): return 1 if val else 0 if isinstance(val, (list, tuple)): return "\n ".join([_format_repo_value(v) for v in val]) - if not isinstance(val, string_types): + if not isinstance(val, str): return str(val) return val diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 807c3ee..8a966ae 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -1,8 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. """schema.py: Set of module functions for processing cloud-config schema.""" -from __future__ import print_function - from cloudinit import importer from cloudinit.util import find_modules, load_file @@ -36,6 +34,8 @@ SCHEMA_DOC_TMPL = """ {examples} """ SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' +SCHEMA_LIST_ITEM_TMPL = ( + '{prefix}Each item in **{prop_name}** list supports the following keys:') SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n' SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---' @@ -58,6 +58,19 @@ class SchemaValidationError(ValueError): super(SchemaValidationError, self).__init__(message) +def is_schema_byte_string(checker, instance): + """TYPE_CHECKER override allowing bytes for string type + + For jsonschema v. 3.0.0+ + """ + try: + from jsonschema import Draft4Validator + except ImportError: + return False + return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or + isinstance(instance, (bytes,))) + + def validate_cloudconfig_schema(config, schema, strict=False): """Validate provided config meets the schema definition. @@ -73,11 +86,31 @@ def validate_cloudconfig_schema(config, schema, strict=False): """ try: from jsonschema import Draft4Validator, FormatChecker + from jsonschema.validators import create, extend except ImportError: logging.debug( 'Ignoring schema validation. python-jsonschema is not present') return - validator = Draft4Validator(schema, format_checker=FormatChecker()) + + # Allow for bytes to be presented as an acceptable valid value for string + # type jsonschema attributes in cloud-init's schema. + # This allows #cloud-config to provide valid yaml "content: !!binary | ..." + if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+ + type_checker = Draft4Validator.TYPE_CHECKER.redefine( + 'string', is_schema_byte_string) + cloudinitValidator = extend(Draft4Validator, type_checker=type_checker) + else: # jsonschema 2.6 workaround + types = Draft4Validator.DEFAULT_TYPES + # Allow bytes as well as string (and disable a spurious + # unsupported-assignment-operation pylint warning which appears because + # this code path isn't written against the latest jsonschema). + types['string'] = (str, bytes) # pylint: disable=E1137 + cloudinitValidator = create( + meta_schema=Draft4Validator.META_SCHEMA, + validators=Draft4Validator.VALIDATORS, + version="draft4", + default_types=types) + validator = cloudinitValidator(schema, format_checker=FormatChecker()) errors = () for error in sorted(validator.iter_errors(config), key=lambda e: e.path): path = '.'.join([str(p) for p in error.path]) @@ -106,7 +139,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): schemapaths = _schemapath_for_cloudconfig( cloudconfig, original_content) errors_by_line = defaultdict(list) - error_count = 1 error_footer = [] annotated_content = [] for path, msg in schema_errors: @@ -120,18 +152,17 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): if col is not None: msg = 'Line {line} column {col}: {msg}'.format( line=line, col=col, msg=msg) - error_footer.append('# E{0}: {1}'.format(error_count, msg)) - error_count += 1 lines = original_content.decode().split('\n') - error_count = 1 - for line_number, line in enumerate(lines): - errors = errors_by_line[line_number + 1] + error_index = 1 + for line_number, line in enumerate(lines, 1): + errors = errors_by_line[line_number] if errors: - error_label = ','.join( - ['E{0}'.format(count + error_count) - for count in range(0, len(errors))]) - error_count += len(errors) - annotated_content.append(line + '\t\t# ' + error_label) + error_label = [] + for error in errors: + error_label.append('E{0}'.format(error_index)) + error_footer.append('# E{0}: {1}'.format(error_index, error)) + error_index += 1 + annotated_content.append(line + '\t\t# ' + ','.join(error_label)) else: annotated_content.append(line) annotated_content.append( @@ -179,7 +210,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): error = SchemaValidationError(errors) if annotate: print(annotated_cloudconfig_file({}, content, error.schema_errors)) - raise error + raise error from e try: validate_cloudconfig_schema( cloudconfig, schema, strict=True) @@ -213,20 +244,34 @@ def _schemapath_for_cloudconfig(config, original_content): previous_depth = -1 path_prefix = '' if line.startswith('- '): + # Process list items adding a list_index to the path prefix + previous_list_idx = '.%d' % (list_index - 1) + if path_prefix and path_prefix.endswith(previous_list_idx): + path_prefix = path_prefix[:-len(previous_list_idx)] key = str(list_index) - value = line[1:] + schema_line_numbers[key] = line_number + item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0]) + item_indent += 1 # For the leading '-' character + previous_depth = indent_depth + indent_depth += item_indent + line = line[item_indent:] # Strip leading list item + whitespace list_index += 1 else: + # Process non-list lines setting value if present list_index = 0 key, value = line.split(':', 1) + if path_prefix: + # Append any existing path_prefix for a fully-pathed key + key = path_prefix + '.' + key while indent_depth <= previous_depth: if scopes: previous_depth, path_prefix = scopes.pop() + if list_index > 0 and indent_depth == previous_depth: + path_prefix = '.'.join(path_prefix.split('.')[:-1]) + break else: previous_depth = -1 path_prefix = '' - if path_prefix: - key = path_prefix + '.' + key scopes.append((indent_depth, key)) if value: value = value.strip() @@ -259,6 +304,28 @@ def _get_property_type(property_dict): return property_type +def _parse_description(description, prefix): + """Parse description from the schema in a format that we can better + display in our docs. This parser does three things: + + - Guarantee that a paragraph will be in a single line + - Guarantee that each new paragraph will be aligned with + the first paragraph + - Proper align lists of items + + @param description: The original description in the schema. + @param prefix: The number of spaces used to align the current description + """ + list_paragraph = prefix * 3 + description = re.sub(r"(\S)\n(\S)", r"\1 \2", description) + description = re.sub( + r"\n\n", r"\n\n{}".format(prefix), description) + description = re.sub( + r"\n( +)-", r"\n{}-".format(list_paragraph), description) + + return description + + def _get_property_doc(schema, prefix=' '): """Return restructured text describing the supported schema properties.""" new_prefix = prefix + ' ' @@ -266,11 +333,23 @@ def _get_property_doc(schema, prefix=' '): for prop_key, prop_config in schema.get('properties', {}).items(): # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL description = prop_config.get('description', '') + properties.append(SCHEMA_PROPERTY_TMPL.format( prefix=prefix, prop_name=prop_key, type=_get_property_type(prop_config), - description=description.replace('\n', ''))) + description=_parse_description(description, prefix))) + items = prop_config.get('items') + if items: + if isinstance(items, list): + for item in items: + properties.append( + _get_property_doc(item, prefix=new_prefix)) + elif isinstance(items, dict) and items.get('properties'): + properties.append(SCHEMA_LIST_ITEM_TMPL.format( + prefix=new_prefix, prop_name=prop_key)) + new_prefix += ' ' + properties.append(_get_property_doc(items, prefix=new_prefix)) if 'properties' in prop_config: properties.append( _get_property_doc(prop_config, prefix=new_prefix)) @@ -346,8 +425,9 @@ def get_parser(parser=None): description='Validate cloud-config files or document schema') parser.add_argument('-c', '--config-file', help='Path of the cloud-config yaml file to validate') - parser.add_argument('-d', '--doc', action="store_true", default=False, - help='Print schema documentation') + parser.add_argument('-d', '--docs', nargs='+', + help=('Print schema module docs. Choices: all or' + ' space-delimited cc_names.')) parser.add_argument('--annotate', action="store_true", default=False, help='Annotate existing cloud-config file with errors') return parser @@ -355,9 +435,9 @@ def get_parser(parser=None): def handle_schema_args(name, args): """Handle provided schema args and perform the appropriate actions.""" - exclusive_args = [args.config_file, args.doc] + exclusive_args = [args.config_file, args.docs] if not any(exclusive_args) or all(exclusive_args): - error('Expected either --config-file argument or --doc') + error('Expected either --config-file argument or --docs') full_schema = get_schema() if args.config_file: try: @@ -370,9 +450,16 @@ def handle_schema_args(name, args): error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) - if args.doc: + elif args.docs: + schema_ids = [subschema['id'] for subschema in full_schema['allOf']] + schema_ids += ['all'] + invalid_docs = set(args.docs).difference(set(schema_ids)) + if invalid_docs: + error('Invalid --docs value {0}. Must be one of: {1}'.format( + list(invalid_docs), ', '.join(schema_ids))) for subschema in full_schema['allOf']: - print(get_schema_doc(subschema)) + if 'all' in args.docs or subschema['id'] in args.docs: + print(get_schema_doc(subschema)) def main(): diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py index 67646b0..b00f208 100644 --- a/cloudinit/config/tests/test_disable_ec2_metadata.py +++ b/cloudinit/config/tests/test_disable_ec2_metadata.py @@ -15,10 +15,8 @@ DISABLE_CFG = {'disable_ec2_metadata': 'true'} class TestEC2MetadataRoute(CiTestCase): - with_logs = True - - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') def test_disable_ifconfig(self, m_subp, m_which): """Set the route if ifconfig command is available""" m_which.side_effect = lambda x: x if x == 'ifconfig' else None @@ -27,8 +25,8 @@ class TestEC2MetadataRoute(CiTestCase): ['route', 'add', '-host', '169.254.169.254', 'reject'], capture=False) - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') def test_disable_ip(self, m_subp, m_which): """Set the route if ip command is available""" m_which.side_effect = lambda x: x if x == 'ip' else None @@ -37,8 +35,8 @@ class TestEC2MetadataRoute(CiTestCase): ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], capture=False) - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') def test_disable_no_tool(self, m_subp, m_which): """Log error when neither route nor ip commands are available""" m_which.return_value = None # Find neither ifconfig nor ip diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py new file mode 100644 index 0000000..46ba99b --- /dev/null +++ b/cloudinit/config/tests/test_final_message.py @@ -0,0 +1,46 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +from unittest import mock + +import pytest + +from cloudinit.config.cc_final_message import handle + + +class TestHandle: + # TODO: Expand these tests to cover full functionality; currently they only + # cover the logic around how the boot-finished file is written (and not its + # contents). + + @pytest.mark.parametrize( + "instance_dir_exists,file_is_written,expected_log_substring", + [ + (True, True, None), + (False, False, "Failed to write boot finished file "), + ], + ) + def test_boot_finished_written( + self, + instance_dir_exists, + file_is_written, + expected_log_substring, + caplog, + tmpdir, + ): + instance_dir = tmpdir.join("var/lib/cloud/instance") + if instance_dir_exists: + instance_dir.ensure_dir() + boot_finished = instance_dir.join("boot-finished") + + m_cloud = mock.Mock( + paths=mock.Mock(boot_finished=boot_finished.strpath) + ) + + handle(None, {}, m_cloud, logging.getLogger(), []) + + # We should not change the status of the instance directory + assert instance_dir_exists == instance_dir.exists() + assert file_is_written == boot_finished.exists() + + if expected_log_substring: + assert expected_log_substring in caplog.text diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py new file mode 100644 index 0000000..99c05bb --- /dev/null +++ b/cloudinit/config/tests/test_grub_dpkg.py @@ -0,0 +1,176 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import pytest + +from unittest import mock +from logging import Logger +from cloudinit.subp import ProcessExecutionError +from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle + + +class TestFetchIdevs: + """Tests cc_grub_dpkg.fetch_idevs()""" + + # Note: udevadm info returns devices in a large single line string + @pytest.mark.parametrize( + "grub_output,path_exists,expected_log_call,udevadm_output" + ",expected_idevs", + [ + # Inside a container, grub not installed + ( + ProcessExecutionError(reason=FileNotFoundError()), + False, + mock.call("'grub-probe' not found in $PATH"), + '', + '', + ), + # Inside a container, grub installed + ( + ProcessExecutionError(stderr="failed to get canonical path"), + False, + mock.call("grub-probe 'failed to get canonical path'"), + '', + '', + ), + # KVM Instance + ( + ['/dev/vda'], + True, + None, + ( + '/dev/disk/by-path/pci-0000:00:00.0 ', + '/dev/disk/by-path/virtio-pci-0000:00:00.0 ' + ), + '/dev/vda', + ), + # Xen Instance + ( + ['/dev/xvda'], + True, + None, + '', + '/dev/xvda', + ), + # NVMe Hardware Instance + ( + ['/dev/nvme1n1'], + True, + None, + ( + '/dev/disk/by-id/nvme-Company_hash000 ', + '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ', + '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ' + ), + '/dev/disk/by-id/nvme-Company_hash000', + ), + # SCSI Hardware Instance + ( + ['/dev/sda'], + True, + None, + ( + '/dev/disk/by-id/company-user-1 ', + '/dev/disk/by-id/scsi-0Company_user-1 ', + '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ' + ), + '/dev/disk/by-id/company-user-1', + ), + ], + ) + @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") + @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists") + @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") + def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output, + path_exists, expected_log_call, udevadm_output, + expected_idevs): + """Tests outputs from grub-probe and udevadm info against grub-dpkg""" + m_subp.side_effect = [ + grub_output, + ["".join(udevadm_output)] + ] + m_exists.return_value = path_exists + log = mock.Mock(spec=Logger) + idevs = fetch_idevs(log) + assert expected_idevs == idevs + if expected_log_call is not None: + assert expected_log_call in log.debug.call_args_list + + +class TestHandle: + """Tests cc_grub_dpkg.handle()""" + + @pytest.mark.parametrize( + "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output", + [ + ( + # No configuration + None, + None, + '/dev/disk/by-id/nvme-Company_hash000', + ( + "Setting grub debconf-set-selections with ", + "'/dev/disk/by-id/nvme-Company_hash000','false'" + ), + ), + ( + # idevs set, idevs_empty unset + '/dev/sda', + None, + '/dev/sda', + ( + "Setting grub debconf-set-selections with ", + "'/dev/sda','false'" + ), + ), + ( + # idevs unset, idevs_empty set + None, + 'true', + '/dev/xvda', + ( + "Setting grub debconf-set-selections with ", + "'/dev/xvda','true'" + ), + ), + ( + # idevs set, idevs_empty set + '/dev/vda', + 'false', + '/dev/disk/by-id/company-user-1', + ( + "Setting grub debconf-set-selections with ", + "'/dev/vda','false'" + ), + ), + ( + # idevs set, idevs_empty set + # Respect what the user defines, even if its logically wrong + '/dev/nvme0n1', + 'true', + '', + ( + "Setting grub debconf-set-selections with ", + "'/dev/nvme0n1','true'" + ), + ) + ], + ) + @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs") + @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str") + @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") + @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") + def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs, + cfg_idevs, cfg_idevs_empty, fetch_idevs_output, + expected_log_output): + """Test setting of correct debconf database entries""" + m_get_cfg_str.side_effect = [ + cfg_idevs, + cfg_idevs_empty + ] + m_fetch_idevs.return_value = fetch_idevs_output + log = mock.Mock(spec=Logger) + handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock()) + log.debug.assert_called_with("".join(expected_log_output)) + + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py new file mode 100644 index 0000000..764a33e --- /dev/null +++ b/cloudinit/config/tests/test_mounts.py @@ -0,0 +1,28 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + +import pytest + +from cloudinit.config.cc_mounts import create_swapfile + + +M_PATH = 'cloudinit.config.cc_mounts.' + + +class TestCreateSwapfile: + + @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other')) + @mock.patch(M_PATH + 'util.get_mount_info') + @mock.patch(M_PATH + 'subp.subp') + def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir): + swap_file = tmpdir.join("swap-file") + fname = str(swap_file) + + # Some of the calls to subp.subp should create the swap file; this + # roughly approximates that + m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') + + m_get_mount_info.return_value = (mock.ANY, fstype) + + create_swapfile(fname, '') + assert mock.call(['mkswap', fname]) in m_subp.call_args_list diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py new file mode 100644 index 0000000..6546a0b --- /dev/null +++ b/cloudinit/config/tests/test_resolv_conf.py @@ -0,0 +1,86 @@ +from unittest import mock + +import pytest + +from cloudinit.config.cc_resolv_conf import generate_resolv_conf + + +EXPECTED_HEADER = """\ +# Your system has been configured with 'manage-resolv-conf' set to true. +# As a result, cloud-init has written this file with configuration data +# that it has been provided. Cloud-init, by default, will write this file +# a single time (PER_ONCE). +#\n\n""" + + +class TestGenerateResolvConf: + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file): + generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock()) + + assert [ + mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_target_fname_is_used_if_passed(self, m_render_to_file): + generate_resolv_conf( + "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path" + ) + + assert [ + mock.call(mock.ANY, "/use/this/path", mock.ANY) + ] == m_render_to_file.call_args_list + + # Patch in templater so we can assert on the actual generated content + @mock.patch("cloudinit.templater.util.write_file") + # Parameterise with the value to be passed to generate_resolv_conf as the + # params parameter, and the expected line after the header as + # expected_extra_line. + @pytest.mark.parametrize( + "params,expected_extra_line", + [ + # No options + ({}, None), + # Just a true flag + ({"options": {"foo": True}}, "options foo"), + # Just a false flag + ({"options": {"foo": False}}, None), + # Just an option + ({"options": {"foo": "some_value"}}, "options foo:some_value"), + # A true flag and an option + ( + {"options": {"foo": "some_value", "bar": True}}, + "options bar foo:some_value", + ), + # Two options + ( + {"options": {"foo": "some_value", "bar": "other_value"}}, + "options bar:other_value foo:some_value", + ), + # Everything + ( + { + "options": { + "foo": "some_value", + "bar": "other_value", + "baz": False, + "spam": True, + } + }, + "options spam bar:other_value foo:some_value", + ), + ], + ) + def test_flags_and_options( + self, m_write_file, params, expected_extra_line + ): + generate_resolv_conf("templates/resolv.conf.tmpl", params) + + expected_content = EXPECTED_HEADER + if expected_extra_line is not None: + # If we have any extra lines, expect a trailing newline + expected_content += "\n".join([expected_extra_line, ""]) + assert [ + mock.call(mock.ANY, expected_content, mode=mock.ANY) + ] == m_write_file.call_args_list diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index 639fb9e..daa1ef5 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock from cloudinit.config import cc_set_passwords as setpass from cloudinit.tests.helpers import CiTestCase @@ -14,7 +14,7 @@ class TestHandleSshPwauth(CiTestCase): with_logs = True - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_unknown_value_logs_warning(self, m_subp): setpass.handle_ssh_pwauth("floo") self.assertIn("Unrecognized value: ssh_pwauth=floo", @@ -22,7 +22,7 @@ class TestHandleSshPwauth(CiTestCase): m_subp.assert_not_called() @mock.patch(MODPATH + "update_ssh_config", return_value=True) - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): """If systemctl in service cmd: systemctl restart name.""" setpass.handle_ssh_pwauth( @@ -31,7 +31,7 @@ class TestHandleSshPwauth(CiTestCase): m_subp.call_args) @mock.patch(MODPATH + "update_ssh_config", return_value=True) - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_service_as_service_cmd(self, m_subp, m_update_ssh_config): """If systemctl in service cmd: systemctl restart name.""" setpass.handle_ssh_pwauth( @@ -40,15 +40,15 @@ class TestHandleSshPwauth(CiTestCase): m_subp.call_args) @mock.patch(MODPATH + "update_ssh_config", return_value=False) - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): """If config is not updated, then no system restart should be done.""" setpass.handle_ssh_pwauth(True) m_subp.assert_not_called() - self.assertIn("No need to restart ssh", self.logs.getvalue()) + self.assertIn("No need to restart SSH", self.logs.getvalue()) @mock.patch(MODPATH + "update_ssh_config", return_value=True) - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): """If 'unchanged', then no updates to config and no restart.""" setpass.handle_ssh_pwauth( @@ -56,7 +56,7 @@ class TestHandleSshPwauth(CiTestCase): m_update_ssh_config.assert_not_called() m_subp.assert_not_called() - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_valid_change_values(self, m_subp): """If value is a valid changen value, then update should be called.""" upname = MODPATH + "update_ssh_config" @@ -74,17 +74,21 @@ class TestSetPasswordsHandle(CiTestCase): with_logs = True + def setUp(self): + super(TestSetPasswordsHandle, self).setUp() + self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err') + def test_handle_on_empty_config(self, *args): """handle logs that no password has changed when config is empty.""" cloud = self.tmp_cloud(distro='ubuntu') setpass.handle( 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) self.assertEqual( - "DEBUG: Leaving ssh config 'PasswordAuthentication' unchanged. " + "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " 'ssh_pwauth=None\n', self.logs.getvalue()) - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "subp.subp") def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): """handle parses command password hashes.""" cloud = self.tmp_cloud(distro='ubuntu') @@ -94,7 +98,7 @@ class TestSetPasswordsHandle(CiTestCase): 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] cfg = {'chpasswd': {'list': valid_hashed_pwds}} - with mock.patch(MODPATH + 'util.subp') as m_subp: + with mock.patch(MODPATH + 'subp.subp') as m_subp: setpass.handle( 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) self.assertIn( @@ -108,12 +112,12 @@ class TestSetPasswordsHandle(CiTestCase): '\n'.join(valid_hashed_pwds) + '\n')], m_subp.call_args_list) - @mock.patch(MODPATH + "util.is_FreeBSD") - @mock.patch(MODPATH + "util.subp") - def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords( - self, m_subp, m_is_freebsd): - """FreeBSD calls custom pw commands instead of chpasswd and passwd""" - m_is_freebsd.return_value = True + @mock.patch(MODPATH + "util.is_BSD") + @mock.patch(MODPATH + "subp.subp") + def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords( + self, m_subp, m_is_bsd): + """BSD don't use chpasswd""" + m_is_bsd.return_value = True cloud = self.tmp_cloud(distro='freebsd') valid_pwds = ['ubuntu:passw0rd'] cfg = {'chpasswd': {'list': valid_pwds}} @@ -125,18 +129,18 @@ class TestSetPasswordsHandle(CiTestCase): mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], m_subp.call_args_list) - @mock.patch(MODPATH + "util.is_FreeBSD") - @mock.patch(MODPATH + "util.subp") + @mock.patch(MODPATH + "util.is_BSD") + @mock.patch(MODPATH + "subp.subp") def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, - m_is_freebsd): + m_is_bsd): """handle parses command set random passwords.""" - m_is_freebsd.return_value = False + m_is_bsd.return_value = False cloud = self.tmp_cloud(distro='ubuntu') valid_random_pwds = [ 'root:R', 'ubuntu:RANDOM'] cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} - with mock.patch(MODPATH + 'util.subp') as m_subp: + with mock.patch(MODPATH + 'subp.subp') as m_subp: setpass.handle( 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) self.assertIn( diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 3c47289..6d4c014 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import re -from six import StringIO +from io import StringIO from cloudinit.config.cc_snap import ( ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, @@ -92,7 +92,7 @@ class TestAddAssertions(CiTestCase): super(TestAddAssertions, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('cloudinit.config.cc_snap.util.subp') + @mock.patch('cloudinit.config.cc_snap.subp.subp') def test_add_assertions_on_empty_list(self, m_subp): """When provided with an empty list, add_assertions does nothing.""" add_assertions([]) @@ -107,7 +107,7 @@ class TestAddAssertions(CiTestCase): "assertion parameter was not a list or dict: I'm Not Valid", str(context_manager.exception)) - @mock.patch('cloudinit.config.cc_snap.util.subp') + @mock.patch('cloudinit.config.cc_snap.subp.subp') def test_add_assertions_adds_assertions_as_list(self, m_subp): """When provided with a list, add_assertions adds all assertions.""" self.assertEqual( @@ -130,7 +130,7 @@ class TestAddAssertions(CiTestCase): self.assertEqual( util.load_file(compare_file), util.load_file(assert_file)) - @mock.patch('cloudinit.config.cc_snap.util.subp') + @mock.patch('cloudinit.config.cc_snap.subp.subp') def test_add_assertions_adds_assertions_as_dict(self, m_subp): """When provided with a dict, add_assertions adds all assertions.""" self.assertEqual( @@ -168,7 +168,7 @@ class TestRunCommands(CiTestCase): super(TestRunCommands, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('cloudinit.config.cc_snap.util.subp') + @mock.patch('cloudinit.config.cc_snap.subp.subp') def test_run_commands_on_empty_list(self, m_subp): """When provided with an empty list, run_commands does nothing.""" run_commands([]) @@ -310,6 +310,52 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): {'snap': {'commands': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_commands_values_are_invalid_type(self, _): + """Warnings when snap:commands values are invalid type (e.g. int)""" + validate_cloudconfig_schema( + {'snap': {'commands': [123]}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {'01': 123}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\n" + "snap.commands.0: 123 is not valid under any of the given" + " schemas\n" + "WARNING: Invalid config:\n" + "snap.commands.01: 123 is not valid under any of the given" + " schemas\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_commands_list_values_are_invalid_type(self, _): + """Warnings when snap:commands list values are wrong type (e.g. int)""" + validate_cloudconfig_schema( + {'snap': {'commands': [["snap", "install", 123]]}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\n" + "snap.commands.0: ['snap', 'install', 123] is not valid under any" + " of the given schemas\n", + "WARNING: Invalid config:\n" + "snap.commands.0: ['snap', 'install', 123] is not valid under any" + " of the given schemas\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_assertions_values_are_invalid_type(self, _): + """Warnings when snap:assertions values are invalid type (e.g. int)""" + validate_cloudconfig_schema( + {'snap': {'assertions': [123]}}, schema) + validate_cloudconfig_schema( + {'snap': {'assertions': {'01': 123}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\n" + "snap.assertions.0: 123 is not of type 'string'\n" + "WARNING: Invalid config:\n" + "snap.assertions.01: 123 is not of type 'string'\n", + self.logs.getvalue()) + @mock.patch('cloudinit.config.cc_snap.add_assertions') def test_warn_schema_assertions_is_not_list_or_dict(self, _): """Warn when snap:assertions config is not a list or dict.""" @@ -345,7 +391,7 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): def test_duplicates_are_fine_array_array(self): """Duplicated commands array/array entries are allowed.""" self.assertSchemaValid( - {'commands': [["echo", "bye"], ["echo" "bye"]]}, + {'commands': [["echo", "bye"], ["echo", "bye"]]}, "command entries can be duplicate.") def test_duplicates_are_fine_array_string(self): @@ -431,7 +477,7 @@ class TestHandle(CiTestCase): self.assertEqual('HI\nMOM\n', util.load_file(outfile)) - @mock.patch('cloudinit.config.cc_snap.util.subp') + @mock.patch('cloudinit.config.cc_snap.subp.subp') def test_handle_adds_assertions(self, m_subp): """Any configured snap assertions are provided to add_assertions.""" assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) @@ -447,7 +493,7 @@ class TestHandle(CiTestCase): self.assertEqual( util.load_file(compare_file), util.load_file(assert_file)) - @mock.patch('cloudinit.config.cc_snap.util.subp') + @mock.patch('cloudinit.config.cc_snap.subp.subp') @skipUnlessJsonSchema() def test_handle_validates_schema(self, m_subp): """Any provided configuration is runs validate_cloudconfig_schema.""" diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index 8c4161e..db7fb72 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -3,7 +3,7 @@ from cloudinit.config.cc_ubuntu_advantage import ( configure_ua, handle, maybe_install_ua_tools, schema) from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit import util +from cloudinit import subp from cloudinit.tests.helpers import ( CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) @@ -26,10 +26,10 @@ class TestConfigureUA(CiTestCase): super(TestConfigureUA, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_error(self, m_subp): """Errors from ua attach command are raised.""" - m_subp.side_effect = util.ProcessExecutionError( + m_subp.side_effect = subp.ProcessExecutionError( 'Invalid token SomeToken') with self.assertRaises(RuntimeError) as context_manager: configure_ua(token='SomeToken') @@ -39,7 +39,7 @@ class TestConfigureUA(CiTestCase): 'Stdout: Invalid token SomeToken\nStderr: -', str(context_manager.exception)) - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_with_token(self, m_subp): """When token is provided, attach the machine to ua using the token.""" configure_ua(token='SomeToken') @@ -48,7 +48,7 @@ class TestConfigureUA(CiTestCase): 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', self.logs.getvalue()) - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_on_service_error(self, m_subp): """all services should be enabled and then any failures raised""" @@ -56,7 +56,7 @@ class TestConfigureUA(CiTestCase): fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] if cmd in fail_cmds and capture: svc = cmd[-1] - raise util.ProcessExecutionError( + raise subp.ProcessExecutionError( 'Invalid {} credentials'.format(svc.upper())) m_subp.side_effect = fake_subp @@ -83,7 +83,7 @@ class TestConfigureUA(CiTestCase): 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', str(context_manager.exception)) - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_with_empty_services(self, m_subp): """When services is an empty list, do not auto-enable attach.""" configure_ua(token='SomeToken', enable=[]) @@ -92,7 +92,7 @@ class TestConfigureUA(CiTestCase): 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', self.logs.getvalue()) - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_with_specific_services(self, m_subp): """When services a list, only enable specific services.""" configure_ua(token='SomeToken', enable=['fips']) @@ -105,7 +105,7 @@ class TestConfigureUA(CiTestCase): self.logs.getvalue()) @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_with_string_services(self, m_subp): """When services a string, treat as singleton list and warn""" configure_ua(token='SomeToken', enable='fips') @@ -119,7 +119,7 @@ class TestConfigureUA(CiTestCase): 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', self.logs.getvalue()) - @mock.patch('%s.util.subp' % MPATH) + @mock.patch('%s.subp.subp' % MPATH) def test_configure_ua_attach_with_weird_services(self, m_subp): """When services not string or list, warn but still attach""" configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) @@ -285,7 +285,7 @@ class TestMaybeInstallUATools(CiTestCase): super(TestMaybeInstallUATools, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.util.which' % MPATH) + @mock.patch('%s.subp.which' % MPATH) def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): """Do nothing if ubuntu-advantage-tools already exists.""" m_which.return_value = '/usr/bin/ua' # already installed @@ -294,7 +294,7 @@ class TestMaybeInstallUATools(CiTestCase): 'Some apt error') maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError - @mock.patch('%s.util.which' % MPATH) + @mock.patch('%s.subp.which' % MPATH) def test_maybe_install_ua_tools_raises_update_errors(self, m_which): """maybe_install_ua_tools logs and raises apt update errors.""" m_which.return_value = None @@ -306,7 +306,7 @@ class TestMaybeInstallUATools(CiTestCase): self.assertEqual('Some apt error', str(context_manager.exception)) self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) - @mock.patch('%s.util.which' % MPATH) + @mock.patch('%s.subp.which' % MPATH) def test_maybe_install_ua_raises_install_errors(self, m_which): """maybe_install_ua_tools logs and raises package install errors.""" m_which.return_value = None @@ -320,7 +320,7 @@ class TestMaybeInstallUATools(CiTestCase): self.assertIn( 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) - @mock.patch('%s.util.which' % MPATH) + @mock.patch('%s.subp.which' % MPATH) def test_maybe_install_ua_tools_happy_path(self, m_which): """maybe_install_ua_tools installs ubuntu-advantage-tools.""" m_which.return_value = None diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py index 4695269..504ba35 100644 --- a/cloudinit/config/tests/test_ubuntu_drivers.py +++ b/cloudinit/config/tests/test_ubuntu_drivers.py @@ -7,7 +7,7 @@ from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock from cloudinit.config.schema import ( SchemaValidationError, validate_cloudconfig_schema) from cloudinit.config import cc_ubuntu_drivers as drivers -from cloudinit.util import ProcessExecutionError +from cloudinit.subp import ProcessExecutionError MPATH = "cloudinit.config.cc_ubuntu_drivers." M_TMP_PATH = MPATH + "temp_utils.mkdtemp" @@ -16,6 +16,13 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") +# The tests in this module call helper methods which are decorated with +# mock.patch. pylint doesn't understand that mock.patch passes parameters to +# the decorated function, so it incorrectly reports that we aren't passing +# values for all parameters. Instead of annotating every single call, we +# disable it for the entire module: +# pylint: disable=no-value-for-parameter + class AnyTempScriptAndDebconfFile(object): def __init__(self, tmp_dir, debconf_file): @@ -46,8 +53,8 @@ class TestUbuntuDrivers(CiTestCase): schema=drivers.schema, strict=True) @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "util.subp", return_value=('', '')) - @mock.patch(MPATH + "util.which", return_value=False) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=False) def _assert_happy_path_taken( self, config, m_which, m_subp, m_tmp): """Positive path test through handle. Package should be installed.""" @@ -73,8 +80,8 @@ class TestUbuntuDrivers(CiTestCase): self._assert_happy_path_taken(new_config) @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "util.subp") - @mock.patch(MPATH + "util.which", return_value=False) + @mock.patch(MPATH + "subp.subp") + @mock.patch(MPATH + "subp.which", return_value=False) def test_handle_raises_error_if_no_drivers_found( self, m_which, m_subp, m_tmp): """If ubuntu-drivers doesn't install any drivers, raise an error.""" @@ -102,8 +109,8 @@ class TestUbuntuDrivers(CiTestCase): self.assertIn('ubuntu-drivers found no drivers for installation', self.logs.getvalue()) - @mock.patch(MPATH + "util.subp", return_value=('', '')) - @mock.patch(MPATH + "util.which", return_value=False) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=False) def _assert_inert_with_config(self, config, m_which, m_subp): """Helper to reduce repetition when testing negative cases""" myCloud = mock.MagicMock() @@ -147,8 +154,8 @@ class TestUbuntuDrivers(CiTestCase): self.assertEqual(0, m_install_drivers.call_count) @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "util.subp", return_value=('', '')) - @mock.patch(MPATH + "util.which", return_value=True) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=True) def test_install_drivers_no_install_if_present( self, m_which, m_subp, m_tmp): """If 'ubuntu-drivers' is present, no package install should occur.""" @@ -174,8 +181,8 @@ class TestUbuntuDrivers(CiTestCase): self.assertEqual(0, pkg_install.call_count) @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "util.subp") - @mock.patch(MPATH + "util.which", return_value=False) + @mock.patch(MPATH + "subp.subp") + @mock.patch(MPATH + "subp.which", return_value=False) def test_install_drivers_handles_old_ubuntu_drivers_gracefully( self, m_which, m_subp, m_tmp): """Older ubuntu-drivers versions should emit message and raise error""" @@ -212,8 +219,8 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers): install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "util.subp", return_value=('', '')) - @mock.patch(MPATH + "util.which", return_value=False) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=False) def test_version_none_uses_latest(self, m_which, m_subp, m_tmp): tdir = self.tmp_dir() debconf_file = os.path.join(tdir, 'nvidia.template') diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py index f620b59..df89ddb 100644 --- a/cloudinit/config/tests/test_users_groups.py +++ b/cloudinit/config/tests/test_users_groups.py @@ -39,7 +39,7 @@ class TestHandleUsersGroups(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertItemsEqual( + self.assertCountEqual( m_user.call_args_list, [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, shell='/bin/bash'), @@ -65,7 +65,7 @@ class TestHandleUsersGroups(CiTestCase): cloud = self.tmp_cloud( distro='freebsd', sys_cfg=sys_cfg, metadata=metadata) cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertItemsEqual( + self.assertCountEqual( m_fbsd_user.call_args_list, [mock.call('freebsd', groups='wheel', lock_passwd=True, shell='/bin/tcsh'), @@ -86,7 +86,7 @@ class TestHandleUsersGroups(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertItemsEqual( + self.assertCountEqual( m_user.call_args_list, [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, shell='/bin/bash'), @@ -107,7 +107,7 @@ class TestHandleUsersGroups(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertItemsEqual( + self.assertCountEqual( m_user.call_args_list, [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, shell='/bin/bash'), @@ -146,7 +146,7 @@ class TestHandleUsersGroups(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertItemsEqual( + self.assertCountEqual( m_user.call_args_list, [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, shell='/bin/bash'), diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 51c0958..8bac9c4 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -14,7 +14,7 @@ Having the server definition accessible by the VM can ve useful in various ways. For example it is possible to easily determine from within the VM, which network interfaces are connected to public and which to private network. Another use is to pass some data to initial VM setup scripts, like setting the -hostname to the VM name or passing ssh public keys through server meta. +hostname to the VM name or passing SSH public keys through server meta. For more information take a look at the Server Context section of CloudSigma API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2b559fe..2537608 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -9,13 +9,13 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import abc import os import re import stat +import string +import urllib.parse +from io import StringIO from cloudinit import importer from cloudinit import log as logging @@ -25,9 +25,14 @@ from cloudinit.net import network_state from cloudinit.net import renderers from cloudinit import ssh_util from cloudinit import type_utils +from cloudinit import subp from cloudinit import util +from cloudinit.features import \ + ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES + from cloudinit.distros.parsers import hosts +from .networking import LinuxNetworking # Used when a cloud-config module can be run on all cloud-init distibutions. @@ -35,12 +40,13 @@ from cloudinit.distros.parsers import hosts ALL_DISTROS = 'all' OSFAMILIES = { + 'alpine': ['alpine'], + 'arch': ['arch'], 'debian': ['debian', 'ubuntu'], - 'redhat': ['amazon', 'centos', 'fedora', 'rhel'], - 'gentoo': ['gentoo'], 'freebsd': ['freebsd'], + 'gentoo': ['gentoo'], + 'redhat': ['amazon', 'centos', 'fedora', 'rhel'], 'suse': ['opensuse', 'sles'], - 'arch': ['arch'], } LOG = logging.getLogger(__name__) @@ -52,9 +58,11 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') # Default NTP Client Configurations PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] +# Letters/Digits/Hyphen characters, for use in domain name validation +LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-" -@six.add_metaclass(abc.ABCMeta) -class Distro(object): + +class Distro(metaclass=abc.ABCMeta): usr_lib_exec = "/usr/lib" hosts_fn = "/etc/hosts" @@ -64,11 +72,13 @@ class Distro(object): init_cmd = ['service'] # systemctl, service etc renderer_configs = {} _preferred_ntp_clients = None + networking_cls = LinuxNetworking def __init__(self, name, cfg, paths): self._paths = paths self._cfg = cfg self.name = name + self.networking = self.networking_cls() @abc.abstractmethod def install_packages(self, pkglist): @@ -145,7 +155,7 @@ class Distro(object): # Write it out # pylint: disable=assignment-from-no-return - # We have implementations in arch, freebsd and gentoo still + # We have implementations in arch and gentoo still dev_names = self._write_network(settings) # pylint: enable=assignment-from-no-return # Now try to bring them up @@ -223,8 +233,8 @@ class Distro(object): LOG.debug("Non-persistently setting the system hostname to %s", hostname) try: - util.subp(['hostname', hostname]) - except util.ProcessExecutionError: + subp.subp(['hostname', hostname]) + except subp.ProcessExecutionError: util.logexc(LOG, "Failed to non-persistently adjust the system " "hostname to %s", hostname) @@ -359,12 +369,12 @@ class Distro(object): LOG.debug("Attempting to run bring up interface %s using command %s", device_name, cmd) try: - (_out, err) = util.subp(cmd) + (_out, err) = subp.subp(cmd) if len(err): LOG.warning("Running %s resulted in stderr output: %s", cmd, err) return True - except util.ProcessExecutionError: + except subp.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False @@ -383,9 +393,12 @@ class Distro(object): def add_user(self, name, **kwargs): """ Add a user to the system using standard GNU tools + + This should be overriden on distros where useradd is not desirable or + not available. """ # XXX need to make add_user idempotent somehow as we - # still want to add groups or modify ssh keys on pre-existing + # still want to add groups or modify SSH keys on pre-existing # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) @@ -429,7 +442,7 @@ class Distro(object): # support kwargs having groups=[list] or groups="g1,g2" groups = kwargs.get('groups') if groups: - if isinstance(groups, six.string_types): + if isinstance(groups, str): groups = groups.split(",") # remove any white spaces in group names, most likely @@ -478,7 +491,7 @@ class Distro(object): # Run the command LOG.debug("Adding user %s", name) try: - util.subp(useradd_cmd, logstring=log_useradd_cmd) + subp.subp(useradd_cmd, logstring=log_useradd_cmd) except Exception as e: util.logexc(LOG, "Failed to create user %s", name) raise e @@ -498,7 +511,7 @@ class Distro(object): # Run the command LOG.debug("Adding snap user %s", name) try: - (out, err) = util.subp(create_user_cmd, logstring=create_user_cmd, + (out, err) = subp.subp(create_user_cmd, logstring=create_user_cmd, capture=True) LOG.debug("snap create-user returned: %s:%s", out, err) jobj = util.load_json(out) @@ -511,9 +524,22 @@ class Distro(object): def create_user(self, name, **kwargs): """ - Creates users for the system using the GNU passwd tools. This - will work on an GNU system. This should be overriden on - distros where useradd is not desirable or not available. + Creates or partially updates the ``name`` user in the system. + + This defers the actual user creation to ``self.add_user`` or + ``self.add_snap_user``, and most of the keys in ``kwargs`` will be + processed there if and only if the user does not already exist. + + Once the existence of the ``name`` user has been ensured, this method + then processes these keys (for both just-created and pre-existing + users): + + * ``plain_text_passwd`` + * ``hashed_passwd`` + * ``lock_passwd`` + * ``sudo`` + * ``ssh_authorized_keys`` + * ``ssh_redirect_user`` """ # Add a snap user, if requested @@ -544,7 +570,7 @@ class Distro(object): if 'ssh_authorized_keys' in kwargs: # Try to handle this in a smart manner. keys = kwargs['ssh_authorized_keys'] - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] elif isinstance(keys, dict): keys = list(keys.values()) @@ -561,7 +587,7 @@ class Distro(object): cloud_keys = kwargs.get('cloud_public_ssh_keys', []) if not cloud_keys: LOG.warning( - 'Unable to disable ssh logins for %s given' + 'Unable to disable SSH logins for %s given' ' ssh_redirect_user: %s. No cloud public-keys present.', name, kwargs['ssh_redirect_user']) else: @@ -580,20 +606,21 @@ class Distro(object): # passwd must use short '-l' due to SLES11 lacking long form '--lock' lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name]) try: - cmd = next(l for l in lock_tools if util.which(l[0])) - except StopIteration: + cmd = next(tool for tool in lock_tools if subp.which(tool[0])) + except StopIteration as e: raise RuntimeError(( "Unable to lock user account '%s'. No tools available. " - " Tried: %s.") % (name, [c[0] for c in lock_tools])) + " Tried: %s.") % (name, [c[0] for c in lock_tools]) + ) from e try: - util.subp(cmd) + subp.subp(cmd) except Exception as e: util.logexc(LOG, 'Failed to disable password for user %s', name) raise e def expire_passwd(self, user): try: - util.subp(['passwd', '--expire', user]) + subp.subp(['passwd', '--expire', user]) except Exception as e: util.logexc(LOG, "Failed to set 'expire' for %s", user) raise e @@ -609,7 +636,7 @@ class Distro(object): cmd.append('-e') try: - util.subp(cmd, pass_string, logstring="chpasswd for %s" % user) + subp.subp(cmd, pass_string, logstring="chpasswd for %s" % user) except Exception as e: util.logexc(LOG, "Failed to set password for %s", user) raise e @@ -668,7 +695,7 @@ class Distro(object): if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) - elif isinstance(rules, six.string_types): + elif isinstance(rules, str): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" @@ -706,7 +733,7 @@ class Distro(object): LOG.warning("Skipping creation of existing group '%s'", name) else: try: - util.subp(group_add_cmd) + subp.subp(group_add_cmd) LOG.info("Created new group %s", name) except Exception: util.logexc(LOG, "Failed to create group %s", name) @@ -719,10 +746,115 @@ class Distro(object): "; user does not exist.", member, name) continue - util.subp(['usermod', '-a', '-G', name, member]) + subp.subp(['usermod', '-a', '-G', name, member]) LOG.info("Added user '%s' to group '%s'", member, name) +def _apply_hostname_transformations_to_url(url: str, transformations: list): + """ + Apply transformations to a URL's hostname, return transformed URL. + + This is a separate function because unwrapping and rewrapping only the + hostname portion of a URL is complex. + + :param url: + The URL to operate on. + :param transformations: + A list of ``(str) -> Optional[str]`` functions, which will be applied + in order to the hostname portion of the URL. If any function + (regardless of ordering) returns None, ``url`` will be returned without + any modification. + + :return: + A string whose value is ``url`` with the hostname ``transformations`` + applied, or ``None`` if ``url`` is unparseable. + """ + try: + parts = urllib.parse.urlsplit(url) + except ValueError: + # If we can't even parse the URL, we shouldn't use it for anything + return None + new_hostname = parts.hostname + if new_hostname is None: + # The URL given doesn't have a hostname component, so (a) we can't + # transform it, and (b) it won't work as a mirror; return None. + return None + + for transformation in transformations: + new_hostname = transformation(new_hostname) + if new_hostname is None: + # If a transformation returns None, that indicates we should abort + # processing and return `url` unmodified + return url + + new_netloc = new_hostname + if parts.port is not None: + new_netloc = "{}:{}".format(new_netloc, parts.port) + return urllib.parse.urlunsplit(parts._replace(netloc=new_netloc)) + + +def _sanitize_mirror_url(url: str): + """ + Given a mirror URL, replace or remove any invalid URI characters. + + This performs the following actions on the URL's hostname: + * Checks if it is an IP address, returning the URL immediately if it is + * Converts it to its IDN form (see below for details) + * Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with + hyphens + * TODO: Remove any leading/trailing hyphens from each domain name label + + Before we replace any invalid domain name characters, we first need to + ensure that any valid non-ASCII characters in the hostname will not be + replaced, by ensuring the hostname is in its Internationalized domain name + (IDN) representation (see RFC 5890). This conversion has to be applied to + the whole hostname (rather than just the substitution variables), because + the Punycode algorithm used by IDNA transcodes each part of the hostname as + a whole string (rather than encoding individual characters). It cannot be + applied to the whole URL, because (a) the Punycode algorithm expects to + operate on domain names so doesn't output a valid URL, and (b) non-ASCII + characters in non-hostname parts of the URL aren't encoded via Punycode. + + To put this in RFC 5890's terminology: before we remove or replace any + characters from our domain name (which we do to ensure that each label is a + valid LDH Label), we first ensure each label is in its A-label form. + + (Note that Python's builtin idna encoding is actually IDNA2003, not + IDNA2008. This changes the specifics of how some characters are encoded to + ASCII, but doesn't affect the logic here.) + + :param url: + The URL to operate on. + + :return: + A sanitized version of the URL, which will have been IDNA encoded if + necessary, or ``None`` if the generated string is not a parseable URL. + """ + # Acceptable characters are LDH characters, plus "." to separate each label + acceptable_chars = LDH_ASCII_CHARS + "." + transformations = [ + # This is an IP address, not a hostname, so no need to apply the + # transformations + lambda hostname: None if net.is_ip_address(hostname) else hostname, + + # Encode with IDNA to get the correct characters (as `bytes`), then + # decode with ASCII so we return a `str` + lambda hostname: hostname.encode('idna').decode('ascii'), + + # Replace any unacceptable characters with "-" + lambda hostname: ''.join( + c if c in acceptable_chars else "-" for c in hostname + ), + + # Drop leading/trailing hyphens from each part of the hostname + lambda hostname: '.'.join( + part.strip('-') for part in hostname.split('.') + ), + ] + + return _apply_hostname_transformations_to_url(url, transformations) + + def _get_package_mirror_info(mirror_info, data_source=None, mirror_filter=util.search_for_mirror): # given a arch specific 'mirror_info' entry (from package_mirrors) @@ -738,7 +870,12 @@ def _get_package_mirror_info(mirror_info, data_source=None, # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) # the region is us-east-1. so region = az[0:-1] if _EC2_AZ_RE.match(data_source.availability_zone): - subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] + ec2_region = data_source.availability_zone[0:-1] + + if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES: + subst['ec2_region'] = "%s" % ec2_region + elif data_source.platform_type == "ec2": + subst['ec2_region'] = "%s" % ec2_region if data_source and data_source.region: subst['region'] = data_source.region @@ -751,9 +888,13 @@ def _get_package_mirror_info(mirror_info, data_source=None, mirrors = [] for tmpl in searchlist: try: - mirrors.append(tmpl % subst) + mirror = tmpl % subst except KeyError: - pass + continue + + mirror = _sanitize_mirror_url(mirror) + if mirror is not None: + mirrors.append(mirror) found = mirror_filter(mirrors) if found: diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py new file mode 100644 index 0000000..e42443f --- /dev/null +++ b/cloudinit/distros/alpine.py @@ -0,0 +1,165 @@ +# Copyright (C) 2016 Matt Dainty +# Copyright (C) 2020 Dermot Bradley +# +# Author: Matt Dainty +# Author: Dermot Bradley +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import util + +from cloudinit.distros.parsers.hostname import HostnameConf + +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + +NETWORK_FILE_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} + +""" + + +class Distro(distros.Distro): + init_cmd = ['rc-service'] # init scripts + locale_conf_fn = "/etc/profile.d/locale.sh" + network_conf_fn = "/etc/network/interfaces" + renderer_configs = { + "eni": {"eni_path": network_conf_fn, + "eni_header": NETWORK_FILE_HEADER} + } + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + self.default_locale = 'C.UTF-8' + self.osfamily = 'alpine' + cfg['ssh_svcname'] = 'sshd' + + def get_locale(self): + """The default locale for Alpine Linux is different than + cloud-init's DataSource default. + """ + return self.default_locale + + def apply_locale(self, locale, out_fn=None): + # Alpine has limited locale support due to musl library limitations + + if not locale: + locale = self.default_locale + if not out_fn: + out_fn = self.locale_conf_fn + + lines = [ + "#", + "# This file is created by cloud-init once per new instance boot", + "#", + "export CHARSET=UTF-8", + "export LANG=%s" % locale, + "export LC_COLLATE=C", + "", + ] + util.write_file(out_fn, "\n".join(lines), 0o644) + + def install_packages(self, pkglist): + self.update_package_sources() + self.package_command('add', pkgs=pkglist) + + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + + def _bring_up_interfaces(self, device_names): + use_all = False + for d in device_names: + if d == 'all': + use_all = True + if use_all: + return distros.Distro._bring_up_interface(self, '-a') + else: + return distros.Distro._bring_up_interfaces(self, device_names) + + def _write_hostname(self, your_hostname, out_fn): + conf = None + try: + # Try to update the previous one + # so lets see if we can read it first. + conf = self._read_hostname_conf(out_fn) + except IOError: + pass + if not conf: + conf = HostnameConf('') + conf.set_hostname(your_hostname) + util.write_file(out_fn, str(conf), 0o644) + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname_conf(self, filename): + conf = HostnameConf(util.load_file(filename)) + conf.parse() + return conf + + def _read_hostname(self, filename, default=None): + hostname = None + try: + conf = self._read_hostname_conf(filename) + hostname = conf.hostname + except IOError: + pass + if not hostname: + return default + return hostname + + def _get_localhost_ip(self): + return "127.0.1.1" + + def set_timezone(self, tz): + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + cmd = ['apk'] + # Redirect output + cmd.append("--quiet") + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + if command: + cmd.append(command) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + subp.subp(cmd, capture=False) + + def update_package_sources(self): + self._runner.run("update-sources", self.package_command, + ["update"], freq=PER_INSTANCE) + + @property + def preferred_ntp_clients(self): + """Allow distro to determine the preferred ntp client list""" + if not self._preferred_ntp_clients: + self._preferred_ntp_clients = ['chrony', 'ntp'] + + return self._preferred_ntp_clients + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 9f89c5f..967be16 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -8,6 +8,7 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit import subp from cloudinit.distros import net_util from cloudinit.distros.parsers.hostname import HostnameConf @@ -44,7 +45,7 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: out_fn = self.locale_conf_fn - util.subp(['locale-gen', '-G', locale], capture=False) + subp.subp(['locale-gen', '-G', locale], capture=False) # "" provides trailing newline during join lines = [ util.make_header(), @@ -60,9 +61,9 @@ class Distro(distros.Distro): def _write_network_config(self, netconfig): try: return self._supported_write_network_config(netconfig) - except RendererNotFoundError: + except RendererNotFoundError as e: # Fall back to old _write_network - raise NotImplementedError + raise NotImplementedError from e def _write_network(self, settings): entries = net_util.translate_network(settings) @@ -76,11 +77,11 @@ class Distro(distros.Distro): def _enable_interface(self, device_name): cmd = ['netctl', 'reenable', device_name] try: - (_out, err) = util.subp(cmd) + (_out, err) = subp.subp(cmd) if len(err): LOG.warning("Running %s resulted in stderr output: %s", cmd, err) - except util.ProcessExecutionError: + except subp.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) def _bring_up_interface(self, device_name): @@ -88,12 +89,12 @@ class Distro(distros.Distro): LOG.debug("Attempting to run bring up interface %s using command %s", device_name, cmd) try: - (_out, err) = util.subp(cmd) + (_out, err) = subp.subp(cmd) if len(err): LOG.warning("Running %s resulted in stderr output: %s", cmd, err) return True - except util.ProcessExecutionError: + except subp.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False @@ -158,7 +159,7 @@ class Distro(distros.Distro): cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) def update_package_sources(self): self._runner.run("update-sources", self.package_command, @@ -173,8 +174,8 @@ def _render_network(entries, target="/", conf_dir="etc/netctl", devs = [] nameservers = [] - resolv_conf = util.target_path(target, resolv_conf) - conf_dir = util.target_path(target, conf_dir) + resolv_conf = subp.target_path(target, resolv_conf) + conf_dir = subp.target_path(target, conf_dir) for (dev, info) in entries.items(): if dev == 'lo': diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py new file mode 100644 index 0000000..2ed7a7d --- /dev/null +++ b/cloudinit/distros/bsd.py @@ -0,0 +1,129 @@ +import platform + +from cloudinit import distros +from cloudinit.distros import bsd_utils +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import net +from cloudinit import subp +from cloudinit import util +from .networking import BSDNetworking + +LOG = logging.getLogger(__name__) + + +class BSD(distros.Distro): + networking_cls = BSDNetworking + hostname_conf_fn = '/etc/rc.conf' + rc_conf_fn = "/etc/rc.conf" + + # Set in BSD distro subclasses + group_add_cmd_prefix = [] + pkg_cmd_install_prefix = [] + pkg_cmd_remove_prefix = [] + # There is no update/upgrade on OpenBSD + pkg_cmd_update_prefix = None + pkg_cmd_upgrade_prefix = None + + def __init__(self, name, cfg, paths): + super().__init__(name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + cfg['ssh_svcname'] = 'sshd' + self.osfamily = platform.system().lower() + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname(self, filename, default=None): + return bsd_utils.get_rc_config_value('hostname') + + def _get_add_member_to_group_cmd(self, member_name, group_name): + raise NotImplementedError('Return list cmd to add member to group') + + def _write_hostname(self, hostname, filename): + bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf') + + def create_group(self, name, members=None): + if util.is_group(name): + LOG.warning("Skipping creation of existing group '%s'", name) + else: + group_add_cmd = self.group_add_cmd_prefix + [name] + try: + subp.subp(group_add_cmd) + LOG.info("Created new group %s", name) + except Exception: + util.logexc(LOG, "Failed to create group %s", name) + + if not members: + members = [] + for member in members: + if not util.is_user(member): + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) + continue + try: + subp.subp(self._get_add_member_to_group_cmd(member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) + except Exception: + util.logexc(LOG, "Failed to add user '%s' to group '%s'", + member, name) + + def generate_fallback_config(self): + nconf = {'config': [], 'version': 1} + for mac, name in net.get_interfaces_by_mac().items(): + nconf['config'].append( + {'type': 'physical', 'name': name, + 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + + def install_packages(self, pkglist): + self.update_package_sources() + self.package_command('install', pkgs=pkglist) + + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" + raise NotImplementedError('BSD subclasses return a dict of env vars') + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + if command == 'install': + cmd = self.pkg_cmd_install_prefix + elif command == 'remove': + cmd = self.pkg_cmd_remove_prefix + elif command == 'update': + if not self.pkg_cmd_update_prefix: + return + cmd = self.pkg_cmd_update_prefix + elif command == 'upgrade': + if not self.pkg_cmd_upgrade_prefix: + return + cmd = self.pkg_cmd_upgrade_prefix + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False) + + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + + def set_timezone(self, tz): + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + + def apply_locale(self, locale, out_fn=None): + LOG.debug('Cannot set the locale.') + + def apply_network_config_names(self, netconfig): + LOG.debug('Cannot rename network interface.') diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py new file mode 100644 index 0000000..079d0d5 --- /dev/null +++ b/cloudinit/distros/bsd_utils.py @@ -0,0 +1,50 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import shlex + +from cloudinit import util + +# On NetBSD, /etc/rc.conf comes with a if block: +# if [ -r /etc/defaults/rc.conf ]; then +# as a consequence, the file is not a regular key/value list +# anymore and we cannot use cloudinit.distros.parsers.sys_conf +# The module comes with a more naive parser, but is able to +# preserve these if blocks. + + +def _unquote(value): + if value[0] == value[-1] and value[0] in ['"', "'"]: + return value[1:-1] + return value + + +def get_rc_config_value(key, fn='/etc/rc.conf'): + key_prefix = '{}='.format(key) + for line in util.load_file(fn).splitlines(): + if line.startswith(key_prefix): + value = line.replace(key_prefix, '') + return _unquote(value) + + +def set_rc_config_value(key, value, fn='/etc/rc.conf'): + lines = [] + done = False + value = shlex.quote(value) + original_content = util.load_file(fn) + for line in original_content.splitlines(): + if '=' in line: + k, v = line.split('=', 1) + if k == key: + v = value + done = True + lines.append('='.join([k, v])) + else: + lines.append(line) + if not done: + lines.append('='.join([key, value])) + new_content = '\n'.join(lines) + '\n' + if new_content != original_content: + util.write_file(fn, new_content) + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index cf082c7..844aaf2 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -13,6 +13,7 @@ import os from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging +from cloudinit import subp from cloudinit import util from cloudinit.distros.parsers.hostname import HostnameConf @@ -197,7 +198,7 @@ class Distro(distros.Distro): # Allow the output of this to flow outwards (ie not be captured) util.log_time(logfunc=LOG.debug, msg="apt-%s [%s]" % (command, ' '.join(cmd)), - func=util.subp, + func=subp.subp, args=(cmd,), kwargs={'env': e, 'capture': False}) def update_package_sources(self): @@ -205,8 +206,7 @@ class Distro(distros.Distro): ["update"], freq=PER_INSTANCE) def get_primary_arch(self): - (arch, _err) = util.subp(['dpkg', '--print-architecture']) - return str(arch).strip() + return util.get_dpkg_architecture() def _get_wrapper_prefix(cmd, mode): @@ -215,7 +215,7 @@ def _get_wrapper_prefix(cmd, mode): if (util.is_true(mode) or (str(mode).lower() == "auto" and cmd[0] and - util.which(cmd[0]))): + subp.which(cmd[0]))): return cmd else: return [] @@ -270,7 +270,7 @@ def update_locale_conf(locale, sys_path, keyname='LANG'): """Update system locale config""" LOG.debug('Updating %s with locale setting %s=%s', sys_path, keyname, locale) - util.subp( + subp.subp( ['update-locale', '--locale-file=' + sys_path, '%s=%s' % (keyname, locale)], capture=False) @@ -292,7 +292,7 @@ def regenerate_locale(locale, sys_path, keyname='LANG'): # finally, trigger regeneration LOG.debug('Generating locales for %s', locale) - util.subp(['locale-gen', locale], capture=False) + subp.subp(['locale-gen', locale], capture=False) # vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 8e5ae96..dde34d4 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -5,133 +5,28 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -import six -from six import StringIO - import re +from io import StringIO -from cloudinit import distros -from cloudinit import helpers +import cloudinit.distros.bsd from cloudinit import log as logging -from cloudinit import ssh_util +from cloudinit import subp from cloudinit import util - -from cloudinit.distros import net_util -from cloudinit.distros.parsers.resolv_conf import ResolvConf - from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -class Distro(distros.Distro): +class Distro(cloudinit.distros.bsd.BSD): usr_lib_exec = '/usr/local/lib' - rc_conf_fn = "/etc/rc.conf" login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' - resolv_conf_fn = '/etc/resolv.conf' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' - default_primary_nic = 'hn0' - - def __init__(self, name, cfg, paths): - distros.Distro.__init__(self, name, cfg, paths) - # This will be used to restrict certain - # calls from repeatly happening (when they - # should only happen say once per instance...) - self._runner = helpers.Runners(paths) - self.osfamily = 'freebsd' - self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+") - cfg['ssh_svcname'] = 'sshd' - - # Updates a key in /etc/rc.conf. - def updatercconf(self, key, value): - LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value) - conf = self.loadrcconf() - config_changed = False - if key not in conf: - LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key, - value) - conf[key] = value - config_changed = True - else: - for item in conf.keys(): - if item == key and conf[item] != value: - conf[item] = value - LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn, - key, value) - config_changed = True - - if config_changed: - LOG.info("Writing %s", self.rc_conf_fn) - buf = StringIO() - for keyval in conf.items(): - buf.write('%s="%s"\n' % keyval) - util.write_file(self.rc_conf_fn, buf.getvalue()) - - # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure - # quotes are ignored: - # hostname="bla" - def loadrcconf(self): - RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*') - conf = {} - lines = util.load_file(self.rc_conf_fn).splitlines() - for line in lines: - m = RE_MATCH.match(line) - if not m: - LOG.debug("Skipping line from /etc/rc.conf: %s", line) - continue - key = m.group(1).rstrip() - val = m.group(2).rstrip() - # Kill them quotes (not completely correct, aka won't handle - # quoted values, but should be ok ...) - if val[0] in ('"', "'"): - val = val[1:] - if val[-1] in ('"', "'"): - val = val[0:-1] - if len(val) == 0: - LOG.debug("Skipping empty value from /etc/rc.conf: %s", line) - continue - conf[key] = val - return conf - - def readrcconf(self, key): - conf = self.loadrcconf() - try: - val = conf[key] - except KeyError: - val = None - return val - - # NOVA will inject something like eth0, rewrite that to use the FreeBSD - # adapter. Since this adapter is based on the used driver, we need to - # figure out which interfaces are available. On KVM platforms this is - # vtnet0, where Xen would use xn0. - def getnetifname(self, dev): - LOG.debug("Translating network interface %s", dev) - if dev.startswith('lo'): - return dev - - n = re.search(r'\d+$', dev) - index = n.group(0) - - (out, _err) = util.subp(['ifconfig', '-a']) - ifconfigoutput = [x for x in (out.strip()).splitlines() - if len(x.split()) > 0] - bsddev = 'NOT_FOUND' - for line in ifconfigoutput: - m = re.match(r'^\w+', line) - if m: - if m.group(0).startswith('lo'): - continue - # Just settle with the first non-lo adapter we find, since it's - # rather unlikely there will be multiple nicdrivers involved. - bsddev = m.group(0) - break - - # Replace the index with the one we're after. - bsddev = re.sub(r'\d+$', index, bsddev) - LOG.debug("Using network interface %s", bsddev) - return bsddev + group_add_cmd_prefix = ['pw', 'group', 'add'] + pkg_cmd_install_prefix = ["pkg", "install"] + pkg_cmd_remove_prefix = ["pkg", "remove"] + pkg_cmd_update_prefix = ["pkg", "update"] + pkg_cmd_upgrade_prefix = ["pkg", "upgrade"] def _select_hostname(self, hostname, fqdn): # Should be FQDN if available. See rc.conf(5) in FreeBSD @@ -139,47 +34,8 @@ class Distro(distros.Distro): return fqdn return hostname - def _read_system_hostname(self): - sys_hostname = self._read_hostname(filename=None) - return ('rc.conf', sys_hostname) - - def _read_hostname(self, filename, default=None): - hostname = None - try: - hostname = self.readrcconf('hostname') - except IOError: - pass - if not hostname: - return default - return hostname - - def _write_hostname(self, hostname, filename): - self.updatercconf('hostname', hostname) - - def create_group(self, name, members): - group_add_cmd = ['pw', '-n', name] - if util.is_group(name): - LOG.warning("Skipping creation of existing group '%s'", name) - else: - try: - util.subp(group_add_cmd) - LOG.info("Created new group %s", name) - except Exception as e: - util.logexc(LOG, "Failed to create group %s", name) - raise e - - if len(members) > 0: - for member in members: - if not util.is_user(member): - LOG.warning("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) - continue - try: - util.subp(['pw', 'usermod', '-n', name, '-G', member]) - LOG.info("Added user '%s' to group '%s'", member, name) - except Exception: - util.logexc(LOG, "Failed to add user '%s' to group '%s'", - member, name) + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['pw', 'usermod', '-n', member_name, '-G', group_name] def add_user(self, name, **kwargs): if util.is_user(name): @@ -204,8 +60,7 @@ class Distro(distros.Distro): } for key, val in kwargs.items(): - if (key in pw_useradd_opts and val and - isinstance(val, six.string_types)): + if key in pw_useradd_opts and val and isinstance(val, str): pw_useradd_cmd.extend([pw_useradd_opts[key], val]) elif key in pw_useradd_flags and val: @@ -224,10 +79,10 @@ class Distro(distros.Distro): # Run the command LOG.info("Adding user %s", name) try: - util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd) - except Exception as e: + subp.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd) + except Exception: util.logexc(LOG, "Failed to create user %s", name) - raise e + raise # Set the password if it is provided # For security consideration, only hashed passwd is assumed passwd_val = kwargs.get('passwd', None) @@ -236,10 +91,10 @@ class Distro(distros.Distro): def expire_passwd(self, user): try: - util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970']) - except Exception as e: + subp.subp(['pw', 'usermod', user, '-p', '01-Jan-1970']) + except Exception: util.logexc(LOG, "Failed to set pw expiration for %s", user) - raise e + raise def set_passwd(self, user, passwd, hashed=False): if hashed: @@ -248,346 +103,21 @@ class Distro(distros.Distro): hash_opt = "-h" try: - util.subp(['pw', 'usermod', user, hash_opt, '0'], + subp.subp(['pw', 'usermod', user, hash_opt, '0'], data=passwd, logstring="chpasswd for %s" % user) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to set password for %s", user) - raise e + raise def lock_passwd(self, name): try: - util.subp(['pw', 'usermod', name, '-h', '-']) - except Exception as e: + subp.subp(['pw', 'usermod', name, '-h', '-']) + except Exception: util.logexc(LOG, "Failed to lock user %s", name) - raise e - - def create_user(self, name, **kwargs): - self.add_user(name, **kwargs) - - # Set password if plain-text password provided and non-empty - if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: - self.set_passwd(name, kwargs['plain_text_passwd']) - - # Default locking down the account. 'lock_passwd' defaults to True. - # lock account unless lock_password is False. - if kwargs.get('lock_passwd', True): - self.lock_passwd(name) - - # Configure sudo access - if 'sudo' in kwargs and kwargs['sudo'] is not False: - self.write_sudo_rules(name, kwargs['sudo']) - - # Import SSH keys - if 'ssh_authorized_keys' in kwargs: - keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, options=None) - - @staticmethod - def get_ifconfig_list(): - cmd = ['ifconfig', '-l'] - (nics, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - return None - return nics - - @staticmethod - def get_ifconfig_ifname_out(ifname): - cmd = ['ifconfig', ifname] - (if_result, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - return None - return if_result - - @staticmethod - def get_ifconfig_ether(): - cmd = ['ifconfig', '-l', 'ether'] - (nics, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - return None - return nics - - @staticmethod - def get_interface_mac(ifname): - if_result = Distro.get_ifconfig_ifname_out(ifname) - for item in if_result.splitlines(): - if item.find('ether ') != -1: - mac = str(item.split()[1]) - if mac: - return mac - - @staticmethod - def get_devicelist(): - nics = Distro.get_ifconfig_list() - return nics.split() - - @staticmethod - def get_ipv6(): - ipv6 = [] - nics = Distro.get_devicelist() - for nic in nics: - if_result = Distro.get_ifconfig_ifname_out(nic) - for item in if_result.splitlines(): - if item.find("inet6 ") != -1 and item.find("scopeid") == -1: - ipv6.append(nic) - return ipv6 - - def get_ipv4(self): - ipv4 = [] - nics = Distro.get_devicelist() - for nic in nics: - if_result = Distro.get_ifconfig_ifname_out(nic) - for item in if_result.splitlines(): - print(item) - if self.ipv4_pat.match(item): - ipv4.append(nic) - return ipv4 - - def is_up(self, ifname): - if_result = Distro.get_ifconfig_ifname_out(ifname) - pat = "^" + ifname - for item in if_result.splitlines(): - if re.match(pat, item): - flags = item.split('<')[1].split('>')[0] - if flags.find("UP") != -1: - return True - - def _get_current_rename_info(self, check_downable=True): - """Collect information necessary for rename_interfaces.""" - names = Distro.get_devicelist() - bymac = {} - for n in names: - bymac[Distro.get_interface_mac(n)] = { - 'name': n, 'up': self.is_up(n), 'downable': None} - - nics_with_addresses = set() - if check_downable: - nics_with_addresses = set(self.get_ipv4() + self.get_ipv6()) - - for d in bymac.values(): - d['downable'] = (d['up'] is False or - d['name'] not in nics_with_addresses) - - return bymac - - def _rename_interfaces(self, renames): - if not len(renames): - LOG.debug("no interfaces to rename") - return - - current_info = self._get_current_rename_info() - - cur_bymac = {} - for mac, data in current_info.items(): - cur = data.copy() - cur['mac'] = mac - cur_bymac[mac] = cur - - def update_byname(bymac): - return dict((data['name'], data) - for data in bymac.values()) - - def rename(cur, new): - util.subp(["ifconfig", cur, "name", new], capture=True) - - def down(name): - util.subp(["ifconfig", name, "down"], capture=True) - - def up(name): - util.subp(["ifconfig", name, "up"], capture=True) - - ops = [] - errors = [] - ups = [] - cur_byname = update_byname(cur_bymac) - tmpname_fmt = "cirename%d" - tmpi = -1 - - for mac, new_name in renames: - cur = cur_bymac.get(mac, {}) - cur_name = cur.get('name') - cur_ops = [] - if cur_name == new_name: - # nothing to do - continue - - if not cur_name: - errors.append("[nic not present] Cannot rename mac=%s to %s" - ", not available." % (mac, new_name)) - continue - - if cur['up']: - msg = "[busy] Error renaming mac=%s from %s to %s" - if not cur['downable']: - errors.append(msg % (mac, cur_name, new_name)) - continue - cur['up'] = False - cur_ops.append(("down", mac, new_name, (cur_name,))) - ups.append(("up", mac, new_name, (new_name,))) - - if new_name in cur_byname: - target = cur_byname[new_name] - if target['up']: - msg = "[busy-target] Error renaming mac=%s from %s to %s." - if not target['downable']: - errors.append(msg % (mac, cur_name, new_name)) - continue - else: - cur_ops.append(("down", mac, new_name, (new_name,))) - - tmp_name = None - while tmp_name is None or tmp_name in cur_byname: - tmpi += 1 - tmp_name = tmpname_fmt % tmpi - - cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) - target['name'] = tmp_name - cur_byname = update_byname(cur_bymac) - if target['up']: - ups.append(("up", mac, new_name, (tmp_name,))) - - cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) - cur['name'] = new_name - cur_byname = update_byname(cur_bymac) - ops += cur_ops - - opmap = {'rename': rename, 'down': down, 'up': up} - if len(ops) + len(ups) == 0: - if len(errors): - LOG.debug("unable to do any work for renaming of %s", renames) - else: - LOG.debug("no work necessary for renaming of %s", renames) - else: - LOG.debug("achieving renaming of %s with ops %s", - renames, ops + ups) - - for op, mac, new_name, params in ops + ups: - try: - opmap.get(op)(*params) - except Exception as e: - errors.append( - "[unknown] Error performing %s%s for %s, %s: %s" % - (op, params, mac, new_name, e)) - if len(errors): - raise Exception('\n'.join(errors)) - - def apply_network_config_names(self, netcfg): - renames = [] - for ent in netcfg.get('config', {}): - if ent.get('type') != 'physical': - continue - mac = ent.get('mac_address') - name = ent.get('name') - if not mac: - continue - renames.append([mac, name]) - return self._rename_interfaces(renames) - - @classmethod - def generate_fallback_config(self): - nics = Distro.get_ifconfig_ether() - if nics is None: - LOG.debug("Fail to get network interfaces") - return None - potential_interfaces = nics.split() - connected = [] - for nic in potential_interfaces: - pat = "^" + nic - if_result = Distro.get_ifconfig_ifname_out(nic) - for item in if_result.split("\n"): - if re.match(pat, item): - flags = item.split('<')[1].split('>')[0] - if flags.find("RUNNING") != -1: - connected.append(nic) - if connected: - potential_interfaces = connected - names = list(sorted(potential_interfaces)) - default_pri_nic = Distro.default_primary_nic - if default_pri_nic in names: - names.remove(default_pri_nic) - names.insert(0, default_pri_nic) - target_name = None - target_mac = None - for name in names: - mac = Distro.get_interface_mac(name) - if mac: - target_name = name - target_mac = mac - break - if target_mac and target_name: - nconf = {'config': [], 'version': 1} - nconf['config'].append( - {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) - return nconf - else: - return None - - def _write_network(self, settings): - entries = net_util.translate_network(settings) - nameservers = [] - searchdomains = [] - dev_names = entries.keys() - for (device, info) in entries.items(): - # Skip the loopback interface. - if device.startswith('lo'): - continue - - dev = self.getnetifname(device) - - LOG.info('Configuring interface %s', dev) - - if info.get('bootproto') == 'static': - LOG.debug('Configuring dev %s with %s / %s', dev, - info.get('address'), info.get('netmask')) - # Configure an ipv4 address. - ifconfig = (info.get('address') + ' netmask ' + - info.get('netmask')) - - # Configure the gateway. - self.updatercconf('defaultrouter', info.get('gateway')) - - if 'dns-nameservers' in info: - nameservers.extend(info['dns-nameservers']) - if 'dns-search' in info: - searchdomains.extend(info['dns-search']) - else: - ifconfig = 'DHCP' - - self.updatercconf('ifconfig_' + dev, ifconfig) - - # Try to read the /etc/resolv.conf or just start from scratch if that - # fails. - try: - resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn)) - resolvconf.parse() - except IOError: - util.logexc(LOG, "Failed to parse %s, use new empty file", - self.resolv_conf_fn) - resolvconf = ResolvConf('') - resolvconf.parse() - - # Add some nameservers - for server in nameservers: - try: - resolvconf.add_nameserver(server) - except ValueError: - util.logexc(LOG, "Failed to add nameserver %s", server) - - # And add any searchdomains. - for domain in searchdomains: - try: - resolvconf.add_search_domain(domain) - except ValueError: - util.logexc(LOG, "Failed to add search domain %s", domain) - util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644) - - return dev_names + raise def apply_locale(self, locale, out_fn=None): - # Adjust the locals value to the new value + # Adjust the locales value to the new value newconf = StringIO() for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', @@ -602,8 +132,8 @@ class Distro(distros.Distro): try: LOG.debug("Running cap_mkdb for %s", locale) - util.subp(['cap_mkdb', self.login_conf_fn]) - except util.ProcessExecutionError: + subp.subp(['cap_mkdb', self.login_conf_fn]) + except subp.ProcessExecutionError: # cap_mkdb failed, so restore the backup. util.logexc(LOG, "Failed to apply locale %s", locale) try: @@ -612,50 +142,22 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to restore %s backup", self.login_conf_fn) - def _bring_up_interface(self, device_name): - if device_name.startswith('lo'): - return - dev = self.getnetifname(device_name) - cmd = ['/etc/rc.d/netif', 'start', dev] - LOG.debug("Attempting to bring up interface %s using command %s", - dev, cmd) - # This could return 1 when the interface has already been put UP by the - # OS. This is just fine. - (_out, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - - def install_packages(self, pkglist): - self.update_package_sources() - self.package_command('install', pkgs=pkglist) - - def package_command(self, command, args=None, pkgs=None): - if pkgs is None: - pkgs = [] + def apply_network_config_names(self, netconfig): + # This is handled by the freebsd network renderer. It writes in + # /etc/rc.conf a line with the following format: + # ifconfig_OLDNAME_name=NEWNAME + # FreeBSD network script will rename the interface automatically. + pass + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" e = os.environ.copy() e['ASSUME_ALWAYS_YES'] = 'YES' - - cmd = ['pkg'] - if args and isinstance(args, str): - cmd.append(args) - elif args and isinstance(args, list): - cmd.extend(args) - - if command: - cmd.append(command) - - pkglist = util.expand_package_list('%s-%s', pkgs) - cmd.extend(pkglist) - - # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, env=e, capture=False) - - def set_timezone(self, tz): - distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + return e def update_package_sources(self): - self._runner.run("update-sources", self.package_command, - ["update"], freq=PER_INSTANCE) + self._runner.run( + "update-sources", self.package_command, + ["update"], freq=PER_INSTANCE) # vi: ts=4 expandtab diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index dc57717..2bee1c8 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -9,6 +9,7 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging +from cloudinit import subp from cloudinit import util from cloudinit.distros import net_util @@ -39,7 +40,7 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: out_fn = self.locale_conf_fn - util.subp(['locale-gen', '-G', locale], capture=False) + subp.subp(['locale-gen', '-G', locale], capture=False) # "" provides trailing newline during join lines = [ util.make_header(), @@ -94,11 +95,11 @@ class Distro(distros.Distro): cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev), 'default'] try: - (_out, err) = util.subp(cmd) + (_out, err) = subp.subp(cmd) if len(err): LOG.warning("Running %s resulted in stderr output: %s", cmd, err) - except util.ProcessExecutionError: + except subp.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -119,12 +120,12 @@ class Distro(distros.Distro): LOG.debug("Attempting to run bring up interface %s using command %s", device_name, cmd) try: - (_out, err) = util.subp(cmd) + (_out, err) = subp.subp(cmd) if len(err): LOG.warning("Running %s resulted in stderr output: %s", cmd, err) return True - except util.ProcessExecutionError: + except subp.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False @@ -137,11 +138,11 @@ class Distro(distros.Distro): # Grab device names from init scripts cmd = ['ls', '/etc/init.d/net.*'] try: - (_out, err) = util.subp(cmd) + (_out, err) = subp.subp(cmd) if len(err): LOG.warning("Running %s resulted in stderr output: %s", cmd, err) - except util.ProcessExecutionError: + except subp.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False devices = [x.split('.')[2] for x in _out.split(' ')] @@ -208,7 +209,7 @@ class Distro(distros.Distro): cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) def update_package_sources(self): self._runner.run("update-sources", self.package_command, diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py new file mode 100644 index 0000000..f1a9b18 --- /dev/null +++ b/cloudinit/distros/netbsd.py @@ -0,0 +1,159 @@ +# Copyright (C) 2019-2020 Gonéri Le Bouder +# +# This file is part of cloud-init. See LICENSE file for license information. + +import crypt +import os +import platform + +import cloudinit.distros.bsd +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class NetBSD(cloudinit.distros.bsd.BSD): + """ + Distro subclass for NetBSD. + + (N.B. OpenBSD inherits from this class.) + """ + + ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users' + group_add_cmd_prefix = ["groupadd"] + + def __init__(self, name, cfg, paths): + super().__init__(name, cfg, paths) + if os.path.exists("/usr/pkg/bin/pkgin"): + self.pkg_cmd_install_prefix = ['pkgin', '-y', 'install'] + self.pkg_cmd_remove_prefix = ['pkgin', '-y', 'remove'] + self.pkg_cmd_update_prefix = ['pkgin', '-y', 'update'] + self.pkg_cmd_upgrade_prefix = ['pkgin', '-y', 'full-upgrade'] + else: + self.pkg_cmd_install_prefix = ['pkg_add', '-U'] + self.pkg_cmd_remove_prefix = ['pkg_delete'] + + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['usermod', '-G', group_name, member_name] + + def add_user(self, name, **kwargs): + if util.is_user(name): + LOG.info("User %s already exists, skipping.", name) + return False + + adduser_cmd = ['useradd'] + log_adduser_cmd = ['useradd'] + + adduser_opts = { + "homedir": '-d', + "gecos": '-c', + "primary_group": '-g', + "groups": '-G', + "shell": '-s', + } + adduser_flags = { + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + } + + for key, val in kwargs.items(): + if key in adduser_opts and val and isinstance(val, str): + adduser_cmd.extend([adduser_opts[key], val]) + + elif key in adduser_flags and val: + adduser_cmd.append(adduser_flags[key]) + log_adduser_cmd.append(adduser_flags[key]) + + if 'no_create_home' not in kwargs or 'system' not in kwargs: + adduser_cmd += ['-m'] + log_adduser_cmd += ['-m'] + + adduser_cmd += [name] + log_adduser_cmd += [name] + + # Run the command + LOG.info("Adding user %s", name) + try: + subp.subp(adduser_cmd, logstring=log_adduser_cmd) + except Exception: + util.logexc(LOG, "Failed to create user %s", name) + raise + # Set the password if it is provided + # For security consideration, only hashed passwd is assumed + passwd_val = kwargs.get('passwd', None) + if passwd_val is not None: + self.set_passwd(name, passwd_val, hashed=True) + + def set_passwd(self, user, passwd, hashed=False): + if hashed: + hashed_pw = passwd + elif not hasattr(crypt, 'METHOD_BLOWFISH'): + # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available + # on NetBSD 7 and 8. + LOG.error(( + 'Cannot set non-encrypted password for user %s. ' + 'Python >= 3.7 is required.'), user) + return + else: + method = crypt.METHOD_BLOWFISH # pylint: disable=E1101 + hashed_pw = crypt.crypt( + passwd, + crypt.mksalt(method) + ) + + try: + subp.subp(['usermod', '-p', hashed_pw, user]) + except Exception: + util.logexc(LOG, "Failed to set password for %s", user) + raise + self.unlock_passwd(user) + + def force_passwd_change(self, user): + try: + subp.subp(['usermod', '-F', user]) + except Exception: + util.logexc(LOG, "Failed to set pw expiration for %s", user) + raise + + def lock_passwd(self, name): + try: + subp.subp(['usermod', '-C', 'yes', name]) + except Exception: + util.logexc(LOG, "Failed to lock user %s", name) + raise + + def unlock_passwd(self, name): + try: + subp.subp(['usermod', '-C', 'no', name]) + except Exception: + util.logexc(LOG, "Failed to unlock user %s", name) + raise + + def apply_locale(self, locale, out_fn=None): + LOG.debug('Cannot set the locale.') + + def apply_network_config_names(self, netconfig): + LOG.debug('NetBSD cannot rename network interface.') + + def _get_pkg_cmd_environ(self): + """Return env vars used in NetBSD package_command operations""" + os_release = platform.release() + os_arch = platform.machine() + e = os.environ.copy() + e['PKG_PATH'] = ( + 'http://cdn.netbsd.org/pub/pkgsrc/' + 'packages/NetBSD/%s/%s/All' + ) % (os_arch, os_release) + return e + + def update_package_sources(self): + pass + + +class Distro(NetBSD): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py new file mode 100644 index 0000000..10ed249 --- /dev/null +++ b/cloudinit/distros/networking.py @@ -0,0 +1,212 @@ +import abc +import logging +import os + +from cloudinit import net, util + + +LOG = logging.getLogger(__name__) + + +# Type aliases (https://docs.python.org/3/library/typing.html#type-aliases), +# used to make the signatures of methods a little clearer +DeviceName = str +NetworkConfig = dict + + +class Networking(metaclass=abc.ABCMeta): + """The root of the Networking hierarchy in cloud-init. + + This is part of an ongoing refactor in the cloud-init codebase, for more + details see "``cloudinit.net`` -> ``cloudinit.distros.networking`` + Hierarchy" in HACKING.rst for full details. + """ + + def _get_current_rename_info(self) -> dict: + return net._get_current_rename_info() + + def _rename_interfaces(self, renames: list, *, current_info=None) -> None: + return net._rename_interfaces(renames, current_info=current_info) + + def apply_network_config_names(self, netcfg: NetworkConfig) -> None: + return net.apply_network_config_names(netcfg) + + def device_devid(self, devname: DeviceName): + return net.device_devid(devname) + + def device_driver(self, devname: DeviceName): + return net.device_driver(devname) + + def extract_physdevs(self, netcfg: NetworkConfig) -> list: + return net.extract_physdevs(netcfg) + + def find_fallback_nic(self, *, blacklist_drivers=None): + return net.find_fallback_nic(blacklist_drivers=blacklist_drivers) + + def generate_fallback_config( + self, *, blacklist_drivers=None, config_driver: bool = False + ): + return net.generate_fallback_config( + blacklist_drivers=blacklist_drivers, config_driver=config_driver + ) + + def get_devicelist(self) -> list: + return net.get_devicelist() + + def get_ib_hwaddrs_by_interface(self) -> dict: + return net.get_ib_hwaddrs_by_interface() + + def get_ib_interface_hwaddr( + self, devname: DeviceName, ethernet_format: bool + ): + return net.get_ib_interface_hwaddr(devname, ethernet_format) + + def get_interface_mac(self, devname: DeviceName): + return net.get_interface_mac(devname) + + def get_interfaces(self) -> list: + return net.get_interfaces() + + def get_interfaces_by_mac(self) -> dict: + return net.get_interfaces_by_mac() + + def get_master(self, devname: DeviceName): + return net.get_master(devname) + + def interface_has_own_mac( + self, devname: DeviceName, *, strict: bool = False + ) -> bool: + return net.interface_has_own_mac(devname, strict=strict) + + def is_bond(self, devname: DeviceName) -> bool: + return net.is_bond(devname) + + def is_bridge(self, devname: DeviceName) -> bool: + return net.is_bridge(devname) + + @abc.abstractmethod + def is_physical(self, devname: DeviceName) -> bool: + """ + Is ``devname`` a physical network device? + + Examples of non-physical network devices: bonds, bridges, tunnels, + loopback devices. + """ + + def is_renamed(self, devname: DeviceName) -> bool: + return net.is_renamed(devname) + + def is_up(self, devname: DeviceName) -> bool: + return net.is_up(devname) + + def is_vlan(self, devname: DeviceName) -> bool: + return net.is_vlan(devname) + + def master_is_bridge_or_bond(self, devname: DeviceName) -> bool: + return net.master_is_bridge_or_bond(devname) + + @abc.abstractmethod + def settle(self, *, exists=None) -> None: + """Wait for device population in the system to complete. + + :param exists: + An optional optimisation. If given, only perform as much of the + settle process as is required for the given DeviceName to be + present in the system. (This may include skipping the settle + process entirely, if the device already exists.) + :type exists: Optional[DeviceName] + """ + + def wait_for_physdevs( + self, netcfg: NetworkConfig, *, strict: bool = True + ) -> None: + """Wait for all the physical devices in `netcfg` to exist on the system + + Specifically, this will call `self.settle` 5 times, and check after + each one if the physical devices are now present in the system. + + :param netcfg: + The NetworkConfig from which to extract physical devices to wait + for. + :param strict: + Raise a `RuntimeError` if any physical devices are not present + after waiting. + """ + physdevs = self.extract_physdevs(netcfg) + + # set of expected iface names and mac addrs + expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs]) + expected_macs = set(expected_ifaces.keys()) + + # set of current macs + present_macs = self.get_interfaces_by_mac().keys() + + # compare the set of expected mac address values to + # the current macs present; we only check MAC as cloud-init + # has not yet renamed interfaces and the netcfg may include + # such renames. + for _ in range(0, 5): + if expected_macs.issubset(present_macs): + LOG.debug("net: all expected physical devices present") + return + + missing = expected_macs.difference(present_macs) + LOG.debug("net: waiting for expected net devices: %s", missing) + for mac in missing: + # trigger a settle, unless this interface exists + devname = expected_ifaces[mac] + msg = "Waiting for settle or {} exists".format(devname) + util.log_time( + LOG.debug, + msg, + func=self.settle, + kwargs={"exists": devname}, + ) + + # update present_macs after settles + present_macs = self.get_interfaces_by_mac().keys() + + msg = "Not all expected physical devices present: %s" % missing + LOG.warning(msg) + if strict: + raise RuntimeError(msg) + + +class BSDNetworking(Networking): + """Implementation of networking functionality shared across BSDs.""" + + def is_physical(self, devname: DeviceName) -> bool: + raise NotImplementedError() + + def settle(self, *, exists=None) -> None: + """BSD has no equivalent to `udevadm settle`; noop.""" + + +class LinuxNetworking(Networking): + """Implementation of networking functionality common to Linux distros.""" + + def get_dev_features(self, devname: DeviceName) -> str: + return net.get_dev_features(devname) + + def has_netfail_standby_feature(self, devname: DeviceName) -> bool: + return net.has_netfail_standby_feature(devname) + + def is_netfailover(self, devname: DeviceName) -> bool: + return net.is_netfailover(devname) + + def is_netfail_master(self, devname: DeviceName) -> bool: + return net.is_netfail_master(devname) + + def is_netfail_primary(self, devname: DeviceName) -> bool: + return net.is_netfail_primary(devname) + + def is_netfail_standby(self, devname: DeviceName) -> bool: + return net.is_netfail_standby(devname) + + def is_physical(self, devname: DeviceName) -> bool: + return os.path.exists(net.sys_dev_path(devname, "device")) + + def settle(self, *, exists=None) -> None: + if exists is not None: + exists = net.sys_dev_path(exists) + util.udevadm_settle(exists=exists) diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py new file mode 100644 index 0000000..720c9cf --- /dev/null +++ b/cloudinit/distros/openbsd.py @@ -0,0 +1,52 @@ +# Copyright (C) 2019-2020 Gonéri Le Bouder +# +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import platform + +import cloudinit.distros.netbsd +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class Distro(cloudinit.distros.netbsd.NetBSD): + hostname_conf_fn = '/etc/myname' + + def _read_hostname(self, filename, default=None): + return util.load_file(self.hostname_conf_fn) + + def _write_hostname(self, hostname, filename): + content = hostname + '\n' + util.write_file(self.hostname_conf_fn, content) + + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['usermod', '-G', group_name, member_name] + + def lock_passwd(self, name): + try: + subp.subp(['usermod', '-p', '*', name]) + except Exception: + util.logexc(LOG, "Failed to lock user %s", name) + raise + + def unlock_passwd(self, name): + pass + + def _get_pkg_cmd_environ(self): + """Return env vars used in OpenBSD package_command operations""" + os_release = platform.release() + os_arch = platform.machine() + e = os.environ.copy() + e['PKG_PATH'] = ( + 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/' + 'packages/{os_arch}/').format( + os_arch=os_arch, os_release=os_release + ) + return e + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index e41e2f7..b8e557b 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -14,6 +14,7 @@ from cloudinit.distros.parsers.hostname import HostnameConf from cloudinit import helpers from cloudinit import log as logging +from cloudinit import subp from cloudinit import util from cloudinit.distros import rhel_util as rhutil @@ -37,6 +38,7 @@ class Distro(distros.Distro): renderer_configs = { 'sysconfig': { 'control': 'etc/sysconfig/network/config', + 'flavor': 'suse', 'iface_templates': '%(base)s/network/ifcfg-%(name)s', 'netrules_path': ( 'etc/udev/rules.d/85-persistent-net-cloud-init.rules'), @@ -96,7 +98,7 @@ class Distro(distros.Distro): cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) def set_timezone(self, tz): tz_file = self._find_tz_file(tz) @@ -128,7 +130,7 @@ class Distro(distros.Distro): if self.uses_systemd() and filename.endswith('/previous-hostname'): return util.load_file(filename).strip() elif self.uses_systemd(): - (out, _err) = util.subp(['hostname']) + (out, _err) = subp.subp(['hostname']) if len(out): return out else: @@ -143,6 +145,9 @@ class Distro(distros.Distro): return default return hostname + def _get_localhost_ip(self): + return "127.0.1.1" + def _read_hostname_conf(self, filename): conf = HostnameConf(util.load_file(filename)) conf.parse() @@ -159,7 +164,7 @@ class Distro(distros.Distro): if self.uses_systemd() and out_fn.endswith('/previous-hostname'): util.write_file(out_fn, hostname) elif self.uses_systemd(): - util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + subp.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: conf = None try: @@ -180,7 +185,7 @@ class Distro(distros.Distro): def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" - """Allow distro to determine the preferred ntp client list""" + # Allow distro to determine the preferred ntp client list if not self._preferred_ntp_clients: distro_info = util.system_info()['dist'] name = distro_info[0] diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index dd434ac..e74c083 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 6444458..54e4e93 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index a62055a..62929d0 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment from cloudinit import log as logging @@ -150,9 +150,10 @@ class ResolvConf(object): tail = '' try: (cfg_opt, cfg_values) = head.split(None, 1) - except (IndexError, ValueError): - raise IOError("Incorrectly formatted resolv.conf line %s" - % (i + 1)) + except (IndexError, ValueError) as e: + raise IOError( + "Incorrectly formatted resolv.conf line %s" % (i + 1) + ) from e if cfg_opt not in ['nameserver', 'domain', 'search', 'sortlist', 'options']: raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 44df17d..dee4c55 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -4,11 +4,9 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import pipes import re +from io import StringIO # This library is used to parse/write # out the various sysconfig files edited (best attempt effort) @@ -65,7 +63,7 @@ class SysConf(configobj.ConfigObj): return out_contents.getvalue() def _quote(self, value, multiline=False): - if not isinstance(value, six.string_types): + if not isinstance(value, str): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index f55d96f..c72f7c1 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -11,6 +11,7 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging +from cloudinit import subp from cloudinit import util from cloudinit.distros import rhel_util @@ -83,7 +84,7 @@ class Distro(distros.Distro): if self.uses_systemd() and out_fn.endswith('/previous-hostname'): util.write_file(out_fn, hostname) elif self.uses_systemd(): - util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + subp.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: host_cfg = { 'HOSTNAME': hostname, @@ -108,7 +109,7 @@ class Distro(distros.Distro): if self.uses_systemd() and filename.endswith('/previous-hostname'): return util.load_file(filename).strip() elif self.uses_systemd(): - (out, _err) = util.subp(['hostname']) + (out, _err) = subp.subp(['hostname']) if len(out): return out else: @@ -146,7 +147,7 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - if util.which('dnf'): + if subp.which('dnf'): LOG.debug('Using DNF for package management') cmd = ['dnf'] else: @@ -173,7 +174,7 @@ class Distro(distros.Distro): cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, capture=False) + subp.subp(cmd, capture=False) def update_package_sources(self): self._runner.run("update-sources", self.package_command, diff --git a/cloudinit/distros/tests/__init__.py b/cloudinit/distros/tests/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/cloudinit/distros/tests/__init__.py diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py new file mode 100644 index 0000000..db53465 --- /dev/null +++ b/cloudinit/distros/tests/test_init.py @@ -0,0 +1,156 @@ +# Copyright (C) 2020 Canonical Ltd. +# +# Author: Daniel Watkins +# +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests for cloudinit/distros/__init__.py""" + +from unittest import mock + +import pytest + +from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS + + +# Define a set of characters we would expect to be replaced +INVALID_URL_CHARS = [ + chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS +] +for separator in [":", ".", "/", "#", "?", "@", "[", "]"]: + # Remove from the set characters that either separate hostname parts (":", + # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be + # unable to parse URLs ("[", "]"). + INVALID_URL_CHARS.remove(separator) + + +class TestGetPackageMirrorInfo: + """ + Tests for cloudinit.distros._get_package_mirror_info. + + These supplement the tests in tests/unittests/test_distros/test_generic.py + which are more focused on testing a single production-like configuration. + These tests are more focused on specific aspects of the unit under test. + """ + + @pytest.mark.parametrize('mirror_info,expected', [ + # Empty info gives empty return + ({}, {}), + # failsafe values used if present + ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}}, + {'primary': 'http://value', 'security': 'http://other'}), + # search values used if present + ({'search': {'primary': ['http://value'], + 'security': ['http://other']}}, + {'primary': ['http://value'], 'security': ['http://other']}), + # failsafe values used if search value not present + ({'search': {'primary': ['http://value']}, + 'failsafe': {'security': 'http://other'}}, + {'primary': ['http://value'], 'security': 'http://other'}) + ]) + def test_get_package_mirror_info_failsafe(self, mirror_info, expected): + """ + Test the interaction between search and failsafe inputs + + (This doesn't test the case where the mirror_filter removes all search + options; test_failsafe_used_if_all_search_results_filtered_out covers + that.) + """ + assert expected == _get_package_mirror_info(mirror_info, + mirror_filter=lambda x: x) + + def test_failsafe_used_if_all_search_results_filtered_out(self): + """Test the failsafe option used if all search options eliminated.""" + mirror_info = { + 'search': {'primary': ['http://value']}, + 'failsafe': {'primary': 'http://other'} + } + assert {'primary': 'http://other'} == _get_package_mirror_info( + mirror_info, mirror_filter=lambda x: False) + + @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [ + (True, 'ec2') + ]) + @pytest.mark.parametrize('availability_zone,region,patterns,expected', ( + # Test ec2_region alone + ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'], + ['http://ec2-fk-fake-1/ubuntu']), + # Test availability_zone alone + ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'], + ['http://az-fk-fake-1f/ubuntu']), + # Test region alone + (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'], + ['http://rg-fk-fake-1/ubuntu']), + # Test that ec2_region is not available for non-matching AZs + ('fake-fake-1f', None, + ['http://EC2-%(ec2_region)s/ubuntu', + 'http://AZ-%(availability_zone)s/ubuntu'], + ['http://az-fake-fake-1f/ubuntu']), + # Test that template order maintained + (None, 'fake-region', + ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'], + ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']), + # Test that non-ASCII hostnames are IDNA encoded; + # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" + (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'], + ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']), + # Test that non-ASCII hostnames with a port are IDNA encoded; + # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" + (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'], + ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']), + # Test that non-ASCII non-hostname parts of URLs are unchanged + (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'], + ['http://www.example.com/ТεЅТ̣/ubuntu']), + # Test that IPv4 addresses are unchanged + (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'], + ['http://192.168.1.1:8080/fk-fake-1/ubuntu']), + # Test that IPv6 addresses are unchanged + (None, 'fk-fake-1', + ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'], + ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']), + # Test that unparseable URLs are filtered out of the mirror list + (None, 'inv[lid', + ['http://%(region)s.in.hostname/should/be/filtered', + 'http://but.not.in.the.path/%(region)s'], + ['http://but.not.in.the.path/inv[lid']), + (None, '-some-region-', + ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'], + ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']), + ) + tuple( + # Dynamically generate a test case for each non-LDH + # (Letters/Digits/Hyphen) ASCII character, testing that it is + # substituted with a hyphen + (None, 'fk{0}fake{0}1'.format(invalid_char), + ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu']) + for invalid_char in INVALID_URL_CHARS + )) + def test_valid_substitution(self, + allow_ec2_mirror, + platform_type, + availability_zone, + region, + patterns, + expected): + """Test substitution works as expected.""" + flag_path = "cloudinit.distros." \ + "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + + m_data_source = mock.Mock( + availability_zone=availability_zone, + region=region, + platform_type=platform_type + ) + mirror_info = {'search': {'primary': patterns}} + + with mock.patch(flag_path, allow_ec2_mirror): + ret = _get_package_mirror_info( + mirror_info, + data_source=m_data_source, + mirror_filter=lambda x: x + ) + print(allow_ec2_mirror) + print(platform_type) + print(availability_zone) + print(region) + print(patterns) + print(expected) + assert {'primary': expected} == ret diff --git a/cloudinit/distros/tests/test_networking.py b/cloudinit/distros/tests/test_networking.py new file mode 100644 index 0000000..b9a6384 --- /dev/null +++ b/cloudinit/distros/tests/test_networking.py @@ -0,0 +1,192 @@ +from unittest import mock + +import pytest + +from cloudinit import net +from cloudinit.distros.networking import ( + BSDNetworking, + LinuxNetworking, + Networking, +) + +# See https://docs.pytest.org/en/stable/example +# /parametrize.html#parametrizing-conditional-raising +from contextlib import ExitStack as does_not_raise + + +@pytest.yield_fixture +def generic_networking_cls(): + """Returns a direct Networking subclass which errors on /sys usage. + + This enables the direct testing of functionality only present on the + ``Networking`` super-class, and provides a check on accidentally using /sys + in that context. + """ + + class TestNetworking(Networking): + def is_physical(self, *args, **kwargs): + raise NotImplementedError + + def settle(self, *args, **kwargs): + raise NotImplementedError + + error = AssertionError("Unexpectedly used /sys in generic networking code") + with mock.patch( + "cloudinit.net.get_sys_class_path", side_effect=error, + ): + yield TestNetworking + + +@pytest.yield_fixture +def sys_class_net(tmpdir): + sys_class_net_path = tmpdir.join("sys/class/net") + sys_class_net_path.ensure_dir() + with mock.patch( + "cloudinit.net.get_sys_class_path", + return_value=sys_class_net_path.strpath + "/", + ): + yield sys_class_net_path + + +class TestBSDNetworkingIsPhysical: + def test_raises_notimplementederror(self): + with pytest.raises(NotImplementedError): + BSDNetworking().is_physical("eth0") + + +class TestLinuxNetworkingIsPhysical: + def test_returns_false_by_default(self, sys_class_net): + assert not LinuxNetworking().is_physical("eth0") + + def test_returns_false_if_devname_exists_but_not_physical( + self, sys_class_net + ): + devname = "eth0" + sys_class_net.join(devname).mkdir() + assert not LinuxNetworking().is_physical(devname) + + def test_returns_true_if_device_is_physical(self, sys_class_net): + devname = "eth0" + device_dir = sys_class_net.join(devname) + device_dir.mkdir() + device_dir.join("device").write("") + + assert LinuxNetworking().is_physical(devname) + + +class TestBSDNetworkingSettle: + def test_settle_doesnt_error(self): + # This also implicitly tests that it doesn't use subp.subp + BSDNetworking().settle() + + +@pytest.mark.usefixtures("sys_class_net") +@mock.patch("cloudinit.distros.networking.util.udevadm_settle", autospec=True) +class TestLinuxNetworkingSettle: + def test_no_arguments(self, m_udevadm_settle): + LinuxNetworking().settle() + + assert [mock.call(exists=None)] == m_udevadm_settle.call_args_list + + def test_exists_argument(self, m_udevadm_settle): + LinuxNetworking().settle(exists="ens3") + + expected_path = net.sys_dev_path("ens3") + assert [ + mock.call(exists=expected_path) + ] == m_udevadm_settle.call_args_list + + +class TestNetworkingWaitForPhysDevs: + @pytest.fixture + def wait_for_physdevs_netcfg(self): + """This config is shared across all the tests in this class.""" + + def ethernet(mac, name, driver=None, device_id=None): + v2_cfg = {"set-name": name, "match": {"macaddress": mac}} + if driver: + v2_cfg["match"].update({"driver": driver}) + if device_id: + v2_cfg["match"].update({"device_id": device_id}) + + return v2_cfg + + physdevs = [ + ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"], + ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"], + ] + netcfg = { + "version": 2, + "ethernets": {args[1]: ethernet(*args) for args in physdevs}, + } + return netcfg + + def test_skips_settle_if_all_present( + self, generic_networking_cls, wait_for_physdevs_netcfg, + ): + networking = generic_networking_cls() + with mock.patch.object( + networking, "get_interfaces_by_mac" + ) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.side_effect = iter( + [{"aa:bb:cc:dd:ee:ff": "eth0", "00:11:22:33:44:55": "ens3"}] + ) + with mock.patch.object( + networking, "settle", autospec=True + ) as m_settle: + networking.wait_for_physdevs(wait_for_physdevs_netcfg) + assert 0 == m_settle.call_count + + def test_calls_udev_settle_on_missing( + self, generic_networking_cls, wait_for_physdevs_netcfg, + ): + networking = generic_networking_cls() + with mock.patch.object( + networking, "get_interfaces_by_mac" + ) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.side_effect = iter( + [ + { + "aa:bb:cc:dd:ee:ff": "eth0" + }, # first call ens3 is missing + { + "aa:bb:cc:dd:ee:ff": "eth0", + "00:11:22:33:44:55": "ens3", + }, # second call has both + ] + ) + with mock.patch.object( + networking, "settle", autospec=True + ) as m_settle: + networking.wait_for_physdevs(wait_for_physdevs_netcfg) + m_settle.assert_called_with(exists="ens3") + + @pytest.mark.parametrize( + "strict,expectation", + [(True, pytest.raises(RuntimeError)), (False, does_not_raise())], + ) + def test_retrying_and_strict_behaviour( + self, + strict, + expectation, + generic_networking_cls, + wait_for_physdevs_netcfg, + ): + networking = generic_networking_cls() + with mock.patch.object( + networking, "get_interfaces_by_mac" + ) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.return_value = {} + + with mock.patch.object( + networking, "settle", autospec=True + ) as m_settle: + with expectation: + networking.wait_for_physdevs( + wait_for_physdevs_netcfg, strict=strict + ) + + assert ( + 5 * len(wait_for_physdevs_netcfg["ethernets"]) + == m_settle.call_count + ) diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index 23be3bd..b4c4b0c 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -49,7 +49,5 @@ class Distro(debian.Distro): copy.deepcopy(PREFERRED_NTP_CLIENTS)) return self._preferred_ntp_clients - pass - # vi: ts=4 expandtab diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 9378dd7..08446a9 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -9,8 +9,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util @@ -29,7 +27,7 @@ LOG = logging.getLogger(__name__) # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, six.string_types): + if isinstance(grp_cfg, str): grp_cfg = grp_cfg.strip().split(",") if isinstance(grp_cfg, list): c_grp_cfg = {} @@ -39,7 +37,7 @@ def _normalize_groups(grp_cfg): if k not in c_grp_cfg: if isinstance(v, list): c_grp_cfg[k] = list(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % @@ -47,12 +45,12 @@ def _normalize_groups(grp_cfg): else: if isinstance(v, list): c_grp_cfg[k].extend(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % type_utils.obj_name(v)) - elif isinstance(i, six.string_types): + elif isinstance(i, str): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: @@ -89,7 +87,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, dict): ad_ucfg = [] for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, float) + six.string_types): + if isinstance(v, (bool, int, float, str)): if util.is_true(v): ad_ucfg.append(str(k)) elif isinstance(v, dict): @@ -99,12 +97,12 @@ def _normalize_users(u_cfg, def_user_cfg=None): raise TypeError(("Unmappable user value type %s" " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg - elif isinstance(u_cfg, six.string_types): + elif isinstance(u_cfg, str): u_cfg = util.uniq_merge_sorted(u_cfg) users = {} for user_config in u_cfg: - if isinstance(user_config, (list,) + six.string_types): + if isinstance(user_config, (list, str)): for u in util.uniq_merge(user_config): if u and u not in users: users[u] = {} @@ -209,7 +207,7 @@ def normalize_users_groups(cfg, distro): old_user = cfg['user'] # Translate it into the format that is more useful # going forward - if isinstance(old_user, six.string_types): + if isinstance(old_user, str): old_user = { 'name': old_user, } @@ -238,7 +236,7 @@ def normalize_users_groups(cfg, distro): default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) - if not isinstance(base_users, (list, dict) + six.string_types): + if not isinstance(base_users, (list, dict, str)): LOG.warning(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), type_utils.obj_name(base_users)) @@ -252,7 +250,7 @@ def normalize_users_groups(cfg, distro): base_users.append({'name': 'default'}) elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) - elif isinstance(base_users, six.string_types): + elif isinstance(base_users, str): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 57708c1..34acfe8 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -142,7 +142,8 @@ def skip_retry_on_codes(status_codes, _request_args, cause): def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - headers_cb=None, exception_cb=None): + headers_cb=None, headers_redact=None, + exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' @@ -155,7 +156,8 @@ def get_instance_userdata(api_version='latest', SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb, + headers_redact=headers_redact) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -169,11 +171,13 @@ def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries, headers_cb=headers_cb, + headers_redact=headers_redact, exception_cb=exception_cb) def mcaller(url): @@ -197,6 +201,7 @@ def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) @@ -204,6 +209,7 @@ def get_instance_metadata(api_version='latest', metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) @@ -212,12 +218,14 @@ def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, + headers_redact=None, exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/features.py b/cloudinit/features.py new file mode 100644 index 0000000..c44fa29 --- /dev/null +++ b/cloudinit/features.py @@ -0,0 +1,44 @@ +# This file is part of cloud-init. See LICENSE file for license information. +""" +Feature flags are used as a way to easily toggle configuration +**at build time**. They are provided to accommodate feature deprecation and +downstream configuration changes. + +Currently used upstream values for feature flags are set in +``cloudinit/features.py``. Overrides to these values (typically via quilt +patch) can be placed +in a file called ``feature_overrides.py`` in the same directory. Any value +set in ``feature_overrides.py`` will override the original value set +in ``features.py``. + +Each flag should include a short comment regarding the reason for +the flag and intended lifetime. + +Tests are required for new feature flags, and tests must verify +all valid states of a flag, not just the default state. +""" + +ERROR_ON_USER_DATA_FAILURE = True +""" +If there is a failure in obtaining user data (i.e., #include or +decompress fails), old behavior is to log a warning and proceed. +After the 20.2 release, we instead raise an exception. +This flag can be removed after Focal is no longer supported +""" + + +ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES = False +""" +When configuring apt mirrors, old behavior is to allow +the use of ec2 mirrors if the datasource availability_zone format +matches one of the possible aws ec2 regions. After the 20.2 release, we +no longer publish ec2 region mirror urls on non-AWS cloud platforms. +Besides feature_overrides.py, users can override this by providing +#cloud-config apt directives. +""" + +try: + # pylint: disable=wildcard-import + from cloudinit.feature_overrides import * # noqa +except ImportError: + pass diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index 7fe17a2..be0ca0e 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -8,7 +8,7 @@ """gpg.py - Collection of gpg key related functions""" from cloudinit import log as logging -from cloudinit import util +from cloudinit import subp import time @@ -18,9 +18,9 @@ LOG = logging.getLogger(__name__) def export_armour(key): """Export gpg key, armoured key gets returned""" try: - (armour, _) = util.subp(["gpg", "--export", "--armour", key], + (armour, _) = subp.subp(["gpg", "--export", "--armour", key], capture=True) - except util.ProcessExecutionError as error: + except subp.ProcessExecutionError as error: # debug, since it happens for any key not on the system initially LOG.debug('Failed to export armoured key "%s": %s', key, error) armour = None @@ -51,11 +51,11 @@ def recv_key(key, keyserver, retries=(1, 1)): while True: trynum += 1 try: - util.subp(cmd, capture=True) + subp.subp(cmd, capture=True) LOG.debug("Imported key '%s' from keyserver '%s' on try %d", key, keyserver, trynum) return - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: error = e try: naplen = next(sleeps) @@ -63,18 +63,19 @@ def recv_key(key, keyserver, retries=(1, 1)): "Import failed with exit code %d, will try again in %ss", error.exit_code, naplen) time.sleep(naplen) - except StopIteration: + except StopIteration as e: raise ValueError( ("Failed to import key '%s' from keyserver '%s' " - "after %d tries: %s") % (key, keyserver, trynum, error)) + "after %d tries: %s") % (key, keyserver, trynum, error) + ) from e def delete_key(key): """Delete the specified key from the local gpg ring""" try: - util.subp(["gpg", "--batch", "--yes", "--delete-keys", key], + subp.subp(["gpg", "--batch", "--yes", "--delete-keys", key], capture=True) - except util.ProcessExecutionError as error: + except subp.ProcessExecutionError as error: LOG.warning('Failed delete key "%s": %s', key, error) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 0db75af..a409ff8 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -10,14 +10,12 @@ import abc import os -import six - -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util +from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) LOG = logging.getLogger(__name__) @@ -60,8 +58,7 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()), key=(lambda e: 0 - len(e))) -@six.add_metaclass(abc.ABCMeta) -class Handler(object): +class Handler(metaclass=abc.ABCMeta): def __init__(self, frequency, version=2): self.handler_version = version @@ -159,7 +156,7 @@ def _extract_first_or_bytes(blob, size): # Extract the first line or upto X symbols for text objects # Extract first X bytes for binary objects try: - if isinstance(blob, six.string_types): + if isinstance(blob, str): start = blob.split("\n", 1)[0] else: # We want to avoid decoding the whole blob (it might be huge) diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index dca50a4..c620509 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -12,6 +12,7 @@ import os from cloudinit import handlers from cloudinit import log as logging +from cloudinit import subp from cloudinit import util from cloudinit.settings import (PER_ALWAYS) @@ -48,8 +49,8 @@ class BootHookPartHandler(handlers.Handler): env = os.environ.copy() if self.instance_id is not None: env['INSTANCE_ID'] = str(self.instance_id) - util.subp([filepath], env=env) - except util.ProcessExecutionError: + subp.subp([filepath], env=env) + except subp.ProcessExecutionError: util.logexc(LOG, "Boothooks script %s execution error", filepath) except Exception: util.logexc(LOG, "Boothooks unknown error when running %s", diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py index ce3accf..aadfbf8 100644 --- a/cloudinit/handlers/jinja_template.py +++ b/cloudinit/handlers/jinja_template.py @@ -83,7 +83,8 @@ def render_jinja_payload_from_file( if e.errno == EACCES: raise RuntimeError( 'Cannot render jinja template vars. No read permission on' - " '%s'. Try sudo" % instance_data_file) + " '%s'. Try sudo" % instance_data_file + ) from e rendered_payload = render_jinja_payload( payload, payload_fn, instance_data, debug) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 003cad6..a9d2953 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -13,6 +13,7 @@ import re from cloudinit import handlers from cloudinit import log as logging +from cloudinit import subp from cloudinit import util from cloudinit.settings import (PER_INSTANCE) @@ -52,7 +53,7 @@ class UpstartJobPartHandler(handlers.Handler): util.write_file(path, payload, 0o644) if SUITABLE_UPSTART: - util.subp(["initctl", "reload-configuration"], capture=False) + subp.subp(["initctl", "reload-configuration"], capture=False) def _has_suitable_upstart(): @@ -63,7 +64,7 @@ def _has_suitable_upstart(): if not os.path.exists("/sbin/initctl"): return False try: - (version_out, _err) = util.subp(["initctl", "version"]) + (version_out, _err) = subp.subp(["initctl", "version"]) except Exception: util.logexc(LOG, "initctl version failed") return False @@ -77,7 +78,7 @@ def _has_suitable_upstart(): if not os.path.exists("/usr/bin/dpkg-query"): return False try: - (dpkg_ver, _err) = util.subp(["dpkg-query", + (dpkg_ver, _err) = subp.subp(["dpkg-query", "--showformat=${Version}", "--show", "upstart"], rcs=[0, 1]) except Exception: @@ -86,9 +87,9 @@ def _has_suitable_upstart(): try: good = "1.8-0ubuntu1.2" - util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) + subp.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) return True - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if e.exit_code == 1: pass else: diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index dcd2645..9752ad2 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -12,10 +12,8 @@ from time import time import contextlib import os - -from six import StringIO -from six.moves.configparser import ( - NoSectionError, NoOptionError, RawConfigParser) +from configparser import NoSectionError, NoOptionError, RawConfigParser +from io import StringIO from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CFG_ENV_NAME) @@ -453,8 +451,4 @@ class DefaultingConfigParser(RawConfigParser): contents = '\n'.join([header, contents, '']) return contents - -def identity(object): - return object - # vi: ts=4 expandtab diff --git a/cloudinit/log.py b/cloudinit/log.py index 5ae312b..2e5df04 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -8,17 +8,13 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections +import io import logging import logging.config import logging.handlers - -import collections import os import sys - -import six -from six import StringIO - import time # Logging levels for easy access @@ -74,13 +70,13 @@ def setupLogging(cfg=None): log_cfgs = [] log_cfg = cfg.get('logcfg') - if log_cfg and isinstance(log_cfg, six.string_types): + if log_cfg and isinstance(log_cfg, str): # If there is a 'logcfg' entry in the config, # respect it, it is the old keyname log_cfgs.append(str(log_cfg)) elif "log_cfgs" in cfg: for a_cfg in cfg['log_cfgs']: - if isinstance(a_cfg, six.string_types): + if isinstance(a_cfg, str): log_cfgs.append(a_cfg) elif isinstance(a_cfg, (collections.Iterable)): cfg_str = [str(c) for c in a_cfg] @@ -100,7 +96,7 @@ def setupLogging(cfg=None): # is acting as a file) pass else: - log_cfg = StringIO(log_cfg) + log_cfg = io.StringIO(log_cfg) # Attempt to load its config logging.config.fileConfig(log_cfg) # The first one to work wins! @@ -126,17 +122,12 @@ def getLogger(name='cloudinit'): return logging.getLogger(name) -# Fixes this annoyance... -# No handlers could be found for logger XXX annoying output... -try: - from logging import NullHandler -except ImportError: - class NullHandler(logging.Handler): - def emit(self, record): - pass - - def _resetLogger(log): + """Remove all current handlers, unset log level and add a NullHandler. + + (Adding the NullHandler avoids "No handlers could be found for logger XXX" + messages.) + """ if not log: return handlers = list(log.handlers) @@ -145,7 +136,7 @@ def _resetLogger(log): h.close() log.removeHandler(h) log.setLevel(NOTSET) - log.addHandler(NullHandler()) + log.addHandler(logging.NullHandler()) def resetLogging(): diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 7fbc25f..668e3cd 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -6,8 +6,6 @@ import re -import six - from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -85,7 +83,7 @@ def dict_extract_mergers(config): raw_mergers = config.pop('merge_type', None) if raw_mergers is None: return parsed_mergers - if isinstance(raw_mergers, six.string_types): + if isinstance(raw_mergers, str): return string_extract_mergers(raw_mergers) for m in raw_mergers: if isinstance(m, (dict)): diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 6c5fddc..93472f1 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'no_replace' MERGE_TYPES = ('replace', DEF_MERGE_TYPE,) @@ -47,7 +45,7 @@ class Merger(object): return new_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index daa0469..19f3277 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') @@ -63,7 +61,7 @@ class Merger(object): return old_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py index 629df58..539e3e2 100644 --- a/cloudinit/mergers/m_str.py +++ b/cloudinit/mergers/m_str.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - class Merger(object): def __init__(self, _merger, opts): @@ -23,13 +21,10 @@ class Merger(object): # perform the following action, if appending we will # merge them together, otherwise we will just return value. def _on_str(self, value, merge_with): - if not isinstance(value, six.string_types): + if not isinstance(value, str): return merge_with if not self._append: return merge_with - if isinstance(value, six.text_type): - return value + six.text_type(merge_with) - else: - return value + six.binary_type(merge_with) + return value + merge_with # vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index bd80637..e233149 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -6,13 +6,14 @@ # This file is part of cloud-init. See LICENSE file for license information. import errno +import ipaddress import logging import os import re -from functools import partial -from cloudinit.net.network_state import mask_to_net_prefix +from cloudinit import subp from cloudinit import util +from cloudinit.net.network_state import mask_to_net_prefix from cloudinit.url_helper import UrlError, readurl LOG = logging.getLogger(__name__) @@ -97,10 +98,6 @@ def is_up(devname): return read_sys_net_safe(devname, "operstate", translate=translate) -def is_wireless(devname): - return os.path.exists(sys_dev_path(devname, "wireless")) - - def is_bridge(devname): return os.path.exists(sys_dev_path(devname, "bridge")) @@ -264,28 +261,6 @@ def is_vlan(devname): return 'DEVTYPE=vlan' in uevent.splitlines() -def is_connected(devname): - # is_connected isn't really as simple as that. 2 is - # 'physically connected'. 3 is 'not connected'. but a wlan interface will - # always show 3. - iflink = read_sys_net_safe(devname, "iflink") - if iflink == "2": - return True - if not is_wireless(devname): - return False - LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname) - return read_sys_net_safe(devname, "carrier", - translate={'0': False, '1': True}) - - -def is_physical(devname): - return os.path.exists(sys_dev_path(devname, "device")) - - -def is_present(devname): - return os.path.exists(sys_dev_path(devname)) - - def device_driver(devname): """Return the device driver for net device named 'devname'.""" driver = None @@ -307,6 +282,9 @@ def device_devid(devname): def get_devicelist(): + if util.is_FreeBSD(): + return list(get_interfaces_by_mac().values()) + try: devs = os.listdir(get_sys_class_path()) except OSError as e: @@ -329,6 +307,45 @@ def is_disabled_cfg(cfg): def find_fallback_nic(blacklist_drivers=None): """Return the name of the 'fallback' network device.""" + if util.is_FreeBSD(): + return find_fallback_nic_on_freebsd(blacklist_drivers) + elif util.is_NetBSD() or util.is_OpenBSD(): + return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers) + else: + return find_fallback_nic_on_linux(blacklist_drivers) + + +def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None): + values = list(sorted( + get_interfaces_by_mac().values(), + key=natural_sort_key)) + if values: + return values[0] + + +def find_fallback_nic_on_freebsd(blacklist_drivers=None): + """Return the name of the 'fallback' network device on FreeBSD. + + @param blacklist_drivers: currently ignored + @return default interface, or None + + + we'll use the first interface from ``ifconfig -l -u ether`` + """ + stdout, _stderr = subp.subp(['ifconfig', '-l', '-u', 'ether']) + values = stdout.split() + if values: + return values[0] + # On FreeBSD <= 10, 'ifconfig -l' ignores the interfaces with DOWN + # status + values = list(get_interfaces_by_mac().values()) + values.sort() + if values: + return values[0] + + +def find_fallback_nic_on_linux(blacklist_drivers=None): + """Return the name of the 'fallback' network device on Linux.""" if not blacklist_drivers: blacklist_drivers = [] @@ -476,43 +493,6 @@ def extract_physdevs(netcfg): raise RuntimeError('Unknown network config version: %s' % version) -def wait_for_physdevs(netcfg, strict=True): - physdevs = extract_physdevs(netcfg) - - # set of expected iface names and mac addrs - expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs]) - expected_macs = set(expected_ifaces.keys()) - - # set of current macs - present_macs = get_interfaces_by_mac().keys() - - # compare the set of expected mac address values to - # the current macs present; we only check MAC as cloud-init - # has not yet renamed interfaces and the netcfg may include - # such renames. - for _ in range(0, 5): - if expected_macs.issubset(present_macs): - LOG.debug('net: all expected physical devices present') - return - - missing = expected_macs.difference(present_macs) - LOG.debug('net: waiting for expected net devices: %s', missing) - for mac in missing: - # trigger a settle, unless this interface exists - syspath = sys_dev_path(expected_ifaces[mac]) - settle = partial(util.udevadm_settle, exists=syspath) - msg = 'Waiting for udev events to settle or %s exists' % syspath - util.log_time(LOG.debug, msg, func=settle) - - # update present_macs after settles - present_macs = get_interfaces_by_mac().keys() - - msg = 'Not all expected physical devices present: %s' % missing - LOG.warning(msg) - if strict: - raise RuntimeError(msg) - - def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): """read the network config and rename devices accordingly. if strict_present is false, then do not raise exception if no devices @@ -526,7 +506,9 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): try: _rename_interfaces(extract_physdevs(netcfg)) except RuntimeError as e: - raise RuntimeError('Failed to apply network config names: %s' % e) + raise RuntimeError( + 'Failed to apply network config names: %s' % e + ) from e def interface_has_own_mac(ifname, strict=False): @@ -577,9 +559,9 @@ def _get_current_rename_info(check_downable=True): if check_downable: nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]") - ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent', + ipv6, _err = subp.subp(['ip', '-6', 'addr', 'show', 'permanent', 'scope', 'global'], capture=True) - ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True) + ipv4, _err = subp.subp(['ip', '-4', 'addr', 'show'], capture=True) nics_with_addresses = set() for bytes_out in (ipv6, ipv4): @@ -615,13 +597,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True, for data in cur_info.values()) def rename(cur, new): - util.subp(["ip", "link", "set", cur, "name", new], capture=True) + subp.subp(["ip", "link", "set", cur, "name", new], capture=True) def down(name): - util.subp(["ip", "link", "set", name, "down"], capture=True) + subp.subp(["ip", "link", "set", name, "down"], capture=True) def up(name): - util.subp(["ip", "link", "set", name, "up"], capture=True) + subp.subp(["ip", "link", "set", name, "up"], capture=True) ops = [] errors = [] @@ -765,6 +747,75 @@ def get_ib_interface_hwaddr(ifname, ethernet_format): def get_interfaces_by_mac(): + if util.is_FreeBSD(): + return get_interfaces_by_mac_on_freebsd() + elif util.is_NetBSD(): + return get_interfaces_by_mac_on_netbsd() + elif util.is_OpenBSD(): + return get_interfaces_by_mac_on_openbsd() + else: + return get_interfaces_by_mac_on_linux() + + +def get_interfaces_by_mac_on_freebsd(): + (out, _) = subp.subp(['ifconfig', '-a', 'ether']) + + # flatten each interface block in a single line + def flatten(out): + curr_block = '' + for line in out.split('\n'): + if line.startswith('\t'): + curr_block += line + else: + if curr_block: + yield curr_block + curr_block = line + yield curr_block + + # looks for interface and mac in a list of flatten block + def find_mac(flat_list): + for block in flat_list: + m = re.search( + r"^(?P\S*): .*ether\s(?P[\da-f:]{17}).*", + block) + if m: + yield (m.group('mac'), m.group('ifname')) + results = {mac: ifname for mac, ifname in find_mac(flatten(out))} + return results + + +def get_interfaces_by_mac_on_netbsd(): + ret = {} + re_field_match = ( + r"(?P\w+).*address:\s" + r"(?P([\da-f]{2}[:-]){5}([\da-f]{2})).*" + ) + (out, _) = subp.subp(['ifconfig', '-a']) + if_lines = re.sub(r'\n\s+', ' ', out).splitlines() + for line in if_lines: + m = re.match(re_field_match, line) + if m: + fields = m.groupdict() + ret[fields['mac']] = fields['ifname'] + return ret + + +def get_interfaces_by_mac_on_openbsd(): + ret = {} + re_field_match = ( + r"(?P\w+).*lladdr\s" + r"(?P([\da-f]{2}[:-]){5}([\da-f]{2})).*") + (out, _) = subp.subp(['ifconfig', '-a']) + if_lines = re.sub(r'\n\s+', ' ', out).splitlines() + for line in if_lines: + m = re.match(re_field_match, line) + if m: + fields = m.groupdict() + ret[fields['mac']] = fields['ifname'] + return ret + + +def get_interfaces_by_mac_on_linux(): """Build a dictionary of tuples {mac: name}. Bridges and any devices that have a 'stolen' mac are excluded.""" @@ -851,6 +902,38 @@ def has_url_connectivity(url): return True +def is_ip_address(s: str) -> bool: + """Returns a bool indicating if ``s`` is an IP address. + + :param s: + The string to test. + + :return: + A bool indicating if the string contains an IP address or not. + """ + try: + ipaddress.ip_address(s) + except ValueError: + return False + return True + + +def is_ipv4_address(s: str) -> bool: + """Returns a bool indicating if ``s`` is an IPv4 address. + + :param s: + The string to test. + + :return: + A bool indicating if the string contains an IPv4 address or not. + """ + try: + ipaddress.IPv4Address(s) + except ValueError: + return False + return True + + class EphemeralIPv4Network(object): """Context manager which sets up temporary static network configuration. @@ -884,7 +967,8 @@ class EphemeralIPv4Network(object): self.prefix = mask_to_net_prefix(prefix_or_mask) except ValueError as e: raise ValueError( - 'Cannot setup network: {0}'.format(e)) + 'Cannot setup network: {0}'.format(e) + ) from e self.connectivity_url = connectivity_url self.interface = interface @@ -924,11 +1008,11 @@ class EphemeralIPv4Network(object): def __exit__(self, excp_type, excp_value, excp_traceback): """Teardown anything we set up.""" for cmd in self.cleanup_cmds: - util.subp(cmd, capture=True) + subp.subp(cmd, capture=True) def _delete_address(self, address, prefix): """Perform the ip command to remove the specified address.""" - util.subp( + subp.subp( ['ip', '-family', 'inet', 'addr', 'del', '%s/%s' % (address, prefix), 'dev', self.interface], capture=True) @@ -940,11 +1024,11 @@ class EphemeralIPv4Network(object): 'Attempting setup of ephemeral network on %s with %s brd %s', self.interface, cidr, self.broadcast) try: - util.subp( + subp.subp( ['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast', self.broadcast, 'dev', self.interface], capture=True, update_env={'LANG': 'C'}) - except util.ProcessExecutionError as e: + except subp.ProcessExecutionError as e: if "File exists" not in e.stderr: raise LOG.debug( @@ -952,7 +1036,7 @@ class EphemeralIPv4Network(object): self.interface, self.ip) else: # Address creation success, bring up device and queue cleanup - util.subp( + subp.subp( ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface, 'up'], capture=True) self.cleanup_cmds.append( @@ -969,7 +1053,7 @@ class EphemeralIPv4Network(object): via_arg = [] if gateway != "0.0.0.0/0": via_arg = ['via', gateway] - util.subp( + subp.subp( ['ip', '-4', 'route', 'add', net_address] + via_arg + ['dev', self.interface], capture=True) self.cleanup_cmds.insert( @@ -979,20 +1063,20 @@ class EphemeralIPv4Network(object): def _bringup_router(self): """Perform the ip commands to fully setup the router if needed.""" # Check if a default route exists and exit if it does - out, _ = util.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True) + out, _ = subp.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True) if 'default' in out: LOG.debug( 'Skip ephemeral route setup. %s already has default route: %s', self.interface, out.strip()) return - util.subp( + subp.subp( ['ip', '-4', 'route', 'add', self.router, 'dev', self.interface, 'src', self.ip], capture=True) self.cleanup_cmds.insert( 0, ['ip', '-4', 'route', 'del', self.router, 'dev', self.interface, 'src', self.ip]) - util.subp( + subp.subp( ['ip', '-4', 'route', 'add', 'default', 'via', self.router, 'dev', self.interface], capture=True) self.cleanup_cmds.insert( diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py new file mode 100644 index 0000000..e34e045 --- /dev/null +++ b/cloudinit/net/bsd.py @@ -0,0 +1,167 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re + +from cloudinit import log as logging +from cloudinit import net +from cloudinit import util +from cloudinit import subp +from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.distros import bsd_utils + +from . import renderer + +LOG = logging.getLogger(__name__) + + +class BSDRenderer(renderer.Renderer): + resolv_conf_fn = 'etc/resolv.conf' + rc_conf_fn = 'etc/rc.conf' + + def get_rc_config_value(self, key): + fn = subp.target_path(self.target, self.rc_conf_fn) + bsd_utils.get_rc_config_value(key, fn=fn) + + def set_rc_config_value(self, key, value): + fn = subp.target_path(self.target, self.rc_conf_fn) + bsd_utils.set_rc_config_value(key, value, fn=fn) + + def __init__(self, config=None): + if not config: + config = {} + self.target = None + self.interface_configurations = {} + self._postcmds = config.get('postcmds', True) + + def _ifconfig_entries(self, settings, target=None): + ifname_by_mac = net.get_interfaces_by_mac() + for interface in settings.iter_interfaces(): + device_name = interface.get("name") + device_mac = interface.get("mac_address") + if device_name and re.match(r'^lo\d+$', device_name): + continue + if device_mac not in ifname_by_mac: + LOG.info('Cannot find any device with MAC %s', device_mac) + elif device_mac and device_name: + cur_name = ifname_by_mac[device_mac] + if cur_name != device_name: + LOG.info('netif service will rename interface %s to %s', + cur_name, device_name) + try: + self.rename_interface(cur_name, device_name) + except NotImplementedError: + LOG.error(( + 'Interface renaming is ' + 'not supported on this OS')) + device_name = cur_name + + else: + device_name = ifname_by_mac[device_mac] + + LOG.info('Configuring interface %s', device_name) + + self.interface_configurations[device_name] = 'DHCP' + + for subnet in interface.get("subnets", []): + if subnet.get('type') == 'static': + if not subnet.get('netmask'): + LOG.debug( + 'Skipping IP %s, because there is no netmask', + subnet.get('address') + ) + continue + LOG.debug('Configuring dev %s with %s / %s', device_name, + subnet.get('address'), subnet.get('netmask')) + + self.interface_configurations[device_name] = { + 'address': subnet.get('address'), + 'netmask': subnet.get('netmask'), + } + + def _route_entries(self, settings, target=None): + routes = list(settings.iter_routes()) + for interface in settings.iter_interfaces(): + subnets = interface.get("subnets", []) + for subnet in subnets: + if subnet.get('type') != 'static': + continue + gateway = subnet.get('gateway') + if gateway and len(gateway.split('.')) == 4: + routes.append({ + 'network': '0.0.0.0', + 'netmask': '0.0.0.0', + 'gateway': gateway}) + routes += subnet.get('routes', []) + for route in routes: + network = route.get('network') + if not network: + LOG.debug('Skipping a bad route entry') + continue + netmask = route.get('netmask') + gateway = route.get('gateway') + self.set_route(network, netmask, gateway) + + def _resolve_conf(self, settings, target=None): + nameservers = settings.dns_nameservers + searchdomains = settings.dns_searchdomains + for interface in settings.iter_interfaces(): + for subnet in interface.get("subnets", []): + if 'dns_nameservers' in subnet: + nameservers.extend(subnet['dns_nameservers']) + if 'dns_search' in subnet: + searchdomains.extend(subnet['dns_search']) + # Try to read the /etc/resolv.conf or just start from scratch if that + # fails. + try: + resolvconf = ResolvConf(util.load_file(subp.target_path( + target, self.resolv_conf_fn))) + resolvconf.parse() + except IOError: + util.logexc(LOG, "Failed to parse %s, use new empty file", + subp.target_path(target, self.resolv_conf_fn)) + resolvconf = ResolvConf('') + resolvconf.parse() + + # Add some nameservers + for server in nameservers: + try: + resolvconf.add_nameserver(server) + except ValueError: + util.logexc(LOG, "Failed to add nameserver %s", server) + + # And add any searchdomains. + for domain in searchdomains: + try: + resolvconf.add_search_domain(domain) + except ValueError: + util.logexc(LOG, "Failed to add search domain %s", domain) + util.write_file( + subp.target_path(target, self.resolv_conf_fn), + str(resolvconf), 0o644) + + def render_network_state(self, network_state, templates=None, target=None): + self._ifconfig_entries(settings=network_state) + self._route_entries(settings=network_state) + self._resolve_conf(settings=network_state) + + self.write_config() + self.start_services(run=self._postcmds) + + def dhcp_interfaces(self): + ic = self.interface_configurations.items + return [k for k, v in ic() if v == 'DHCP'] + + def start_services(self, run=False): + raise NotImplementedError() + + def write_config(self, target=None): + raise NotImplementedError() + + def set_gateway(self, gateway): + raise NotImplementedError() + + def rename_interface(self, cur_name, device_name): + raise NotImplementedError() + + def set_route(self, network, netmask, gateway): + raise NotImplementedError() diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 55166ea..cc8dc17 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -10,10 +10,9 @@ import base64 import glob import gzip import io +import logging import os -import six - from cloudinit import util from . import get_devicelist @@ -21,22 +20,19 @@ from . import read_sys_net_safe _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" +KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED = "disabled" + -@six.add_metaclass(abc.ABCMeta) -class InitramfsNetworkConfigSource(object): +class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta): """ABC for net config sources that read config written by initramfses""" @abc.abstractmethod - def is_applicable(self): - # type: () -> bool + def is_applicable(self) -> bool: """Is this initramfs config source applicable to the current system?""" - pass @abc.abstractmethod - def render_config(self): - # type: () -> dict + def render_config(self) -> dict: """Render a v1 network config from the initramfs configuration""" - pass class KlibcNetworkConfigSource(InitramfsNetworkConfigSource): @@ -65,8 +61,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource): if mac_addr: self._mac_addrs[k] = mac_addr - def is_applicable(self): - # type: () -> bool + def is_applicable(self) -> bool: """ Return whether this system has klibc initramfs network config or not @@ -84,8 +79,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource): return True return False - def render_config(self): - # type: () -> dict + def render_config(self) -> dict: return config_from_klibc_net_cfg( files=self._files, mac_addrs=self._mac_addrs, ) @@ -104,9 +98,12 @@ def _klibc_to_config_entry(content, mac_addrs=None): provided here. There is no good documentation on this unfortunately. DEVICE= is expected/required and PROTO should indicate if - this is 'static' or 'dhcp' or 'dhcp6' (LP: #1621507). + this is 'none' (static) or 'dhcp' or 'dhcp6' (LP: #1621507). note that IPV6PROTO is also written by newer code to address the possibility of both ipv4 and ipv6 getting addresses. + + Full syntax is documented at: + https://git.kernel.org/pub/scm/libs/klibc/klibc.git/plain/usr/kinit/ipconfig/README.ipconfig """ if mac_addrs is None: @@ -115,8 +112,8 @@ def _klibc_to_config_entry(content, mac_addrs=None): data = util.load_shell_content(content) try: name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6'] - except KeyError: - raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") + except KeyError as e: + raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") from e # ipconfig on precise does not write PROTO # IPv6 config gives us IPV6PROTO, not PROTO. @@ -125,9 +122,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): if data.get('filename'): proto = 'dhcp' else: - proto = 'static' + proto = 'none' - if proto not in ('static', 'dhcp', 'dhcp6'): + if proto not in ('none', 'dhcp', 'dhcp6'): raise ValueError("Unexpected value for PROTO: %s" % proto) iface = { @@ -147,6 +144,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): # PROTO for ipv4, IPV6PROTO for ipv6 cur_proto = data.get(pre + 'PROTO', proto) + # ipconfig's 'none' is called 'static' + if cur_proto == 'none': + cur_proto = 'static' subnet = {'type': cur_proto, 'control': 'manual'} # only populate address for static types. While the rendered config @@ -230,34 +230,35 @@ def read_initramfs_config(): return None -def _decomp_gzip(blob, strict=True): - # decompress blob. raise exception if not compressed unless strict=False. +def _decomp_gzip(blob): + # decompress blob or return original blob with io.BytesIO(blob) as iobuf: gzfp = None try: gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf) return gzfp.read() except IOError: - if strict: - raise return blob finally: if gzfp: gzfp.close() -def _b64dgz(b64str, gzipped="try"): - # decode a base64 string. If gzipped is true, transparently uncompresss - # if gzipped is 'try', then try gunzip, returning the original on fail. - try: - blob = base64.b64decode(b64str) - except TypeError: - raise ValueError("Invalid base64 text: %s" % b64str) +def _b64dgz(data): + """Decode a string base64 encoding, if gzipped, uncompress as well - if not gzipped: - return blob + :return: decompressed unencoded string of the data or empty string on + unencoded data. + """ + try: + blob = base64.b64decode(data) + except (TypeError, ValueError): + logging.error( + "Expected base64 encoded kernel commandline parameter" + " network-config. Ignoring network-config=%s.", data) + return '' - return _decomp_gzip(blob, strict=gzipped != "try") + return _decomp_gzip(blob) def read_kernel_cmdline_config(cmdline=None): @@ -270,6 +271,8 @@ def read_kernel_cmdline_config(cmdline=None): if tok.startswith("network-config="): data64 = tok.split("=", 1)[1] if data64: + if data64 == KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED: + return {"config": "disabled"} return util.load_yaml(_b64dgz(data64)) return None diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index c033cc8..4394c68 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -10,14 +10,15 @@ import os import re import signal import time +from io import StringIO from cloudinit.net import ( EphemeralIPv4Network, find_fallback_nic, get_devicelist, has_url_connectivity) from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils +from cloudinit import subp from cloudinit import util -from six import StringIO LOG = logging.getLogger(__name__) @@ -30,19 +31,18 @@ class InvalidDHCPLeaseFileError(Exception): Current uses are DataSourceAzure and DataSourceEc2 during ephemeral boot to scrape metadata. """ - pass class NoDHCPLeaseError(Exception): """Raised when unable to get a DHCP lease.""" - pass class EphemeralDHCPv4(object): - def __init__(self, iface=None, connectivity_url=None): + def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None): self.iface = iface self._ephipv4 = None self.lease = None + self.dhcp_log_func = dhcp_log_func self.connectivity_url = connectivity_url def __enter__(self): @@ -80,9 +80,10 @@ class EphemeralDHCPv4(object): if self.lease: return self.lease try: - leases = maybe_perform_dhcp_discovery(self.iface) - except InvalidDHCPLeaseFileError: - raise NoDHCPLeaseError() + leases = maybe_perform_dhcp_discovery( + self.iface, self.dhcp_log_func) + except InvalidDHCPLeaseFileError as e: + raise NoDHCPLeaseError() from e if not leases: raise NoDHCPLeaseError() self.lease = leases[-1] @@ -130,13 +131,15 @@ class EphemeralDHCPv4(object): result[internal_mapping] = self.lease.get(different_names) -def maybe_perform_dhcp_discovery(nic=None): +def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None): """Perform dhcp discovery if nic valid and dhclient command exists. If the nic is invalid or undiscoverable or dhclient command is not found, skip dhcp_discovery and return an empty dict. @param nic: Name of the network interface we want to run dhclient on. + @param dhcp_log_func: A callable accepting the dhclient output and error + streams. @return: A list of dicts representing dhcp options for each lease obtained from the dhclient discovery if run, otherwise an empty list is returned. @@ -150,7 +153,7 @@ def maybe_perform_dhcp_discovery(nic=None): LOG.debug( 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic) return [] - dhclient_path = util.which('dhclient') + dhclient_path = subp.which('dhclient') if not dhclient_path: LOG.debug('Skip dhclient configuration: No dhclient command found.') return [] @@ -158,7 +161,7 @@ def maybe_perform_dhcp_discovery(nic=None): prefix='cloud-init-dhcp-', needs_exe=True) as tdir: # Use /var/tmp because /run/cloud-init/tmp is mounted noexec - return dhcp_discovery(dhclient_path, nic, tdir) + return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func) def parse_dhcp_lease_file(lease_file): @@ -192,13 +195,15 @@ def parse_dhcp_lease_file(lease_file): return dhcp_leases -def dhcp_discovery(dhclient_cmd_path, interface, cleandir): +def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None): """Run dhclient on the interface without scripts or filesystem artifacts. @param dhclient_cmd_path: Full path to the dhclient used. @param interface: Name of the network inteface on which to dhclient. @param cleandir: The directory from which to run dhclient as well as store dhcp leases. + @param dhcp_log_func: A callable accepting the dhclient output and error + streams. @return: A list of dicts of representing the dhcp leases parsed from the dhcp.leases file or empty list. @@ -215,14 +220,20 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): pid_file = os.path.join(cleandir, 'dhclient.pid') lease_file = os.path.join(cleandir, 'dhcp.leases') + # In some cases files in /var/tmp may not be executable, launching dhclient + # from there will certainly raise 'Permission denied' error. Try launching + # the original dhclient instead. + if not os.access(sandbox_dhclient_cmd, os.X_OK): + sandbox_dhclient_cmd = dhclient_cmd_path + # ISC dhclient needs the interface up to send initial discovery packets. # Generally dhclient relies on dhclient-script PREINIT action to bring the # link up before attempting discovery. Since we are using -sf /bin/true, # we need to do that "link up" ourselves first. - util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True) + subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True) cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file, '-pf', pid_file, interface, '-sf', '/bin/true'] - util.subp(cmd, capture=True) + out, err = subp.subp(cmd, capture=True) # Wait for pid file and lease file to appear, and for the process # named by the pid file to daemonize (have pid 1 as its parent). If we @@ -239,6 +250,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): return [] ppid = 'unknown' + daemonized = False for _ in range(0, 1000): pid_content = util.load_file(pid_file).strip() try: @@ -250,13 +262,17 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): if ppid == 1: LOG.debug('killing dhclient with pid=%s', pid) os.kill(pid, signal.SIGKILL) - return parse_dhcp_lease_file(lease_file) + daemonized = True + break time.sleep(0.01) - LOG.error( - 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', - pid_content, ppid, 0.01 * 1000 - ) + if not daemonized: + LOG.error( + 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s ' + 'seconds', pid_content, ppid, 0.01 * 1000 + ) + if dhcp_log_func is not None: + dhcp_log_func(out, err) return parse_dhcp_lease_file(lease_file) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 7077106..13c041f 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -11,6 +11,7 @@ from . import renderer from .network_state import subnet_is_ipv6 from cloudinit import log as logging +from cloudinit import subp from cloudinit import util @@ -429,7 +430,9 @@ class Renderer(renderer.Renderer): iface['mode'] = 'auto' # Use stateless DHCPv6 (0=off, 1=on) iface['dhcp'] = '0' - elif subnet_is_ipv6(subnet) and subnet['type'] == 'static': + elif subnet_is_ipv6(subnet): + # mode might be static6, eni uses 'static' + iface['mode'] = 'static' if accept_ra is not None: # Accept router advertisements (0=off, 1=on) iface['accept_ra'] = '1' if accept_ra else '0' @@ -480,10 +483,8 @@ class Renderer(renderer.Renderer): if searchdomains: lo['subnets'][0]["dns_search"] = (" ".join(searchdomains)) - ''' Apply a sort order to ensure that we write out - the physical interfaces first; this is critical for - bonding - ''' + # Apply a sort order to ensure that we write out the physical + # interfaces first; this is critical for bonding order = { 'loopback': 0, 'physical': 1, @@ -509,13 +510,13 @@ class Renderer(renderer.Renderer): return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n" def render_network_state(self, network_state, templates=None, target=None): - fpeni = util.target_path(target, self.eni_path) + fpeni = subp.target_path(target, self.eni_path) util.ensure_dir(os.path.dirname(fpeni)) header = self.eni_header if self.eni_header else "" util.write_file(fpeni, header + self._render_interfaces(network_state)) if self.netrules_path: - netrules = util.target_path(target, self.netrules_path) + netrules = subp.target_path(target, self.netrules_path) util.ensure_dir(os.path.dirname(netrules)) util.write_file(netrules, self._render_persistent_net(network_state)) @@ -542,9 +543,9 @@ def available(target=None): expected = ['ifquery', 'ifup', 'ifdown'] search = ['/sbin', '/usr/sbin'] for p in expected: - if not util.which(p, search=search, target=target): + if not subp.which(p, search=search, target=target): return False - eni = util.target_path(target, 'etc/network/interfaces') + eni = subp.target_path(target, 'etc/network/interfaces') if not os.path.isfile(eni): return False diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py new file mode 100644 index 0000000..0285dfe --- /dev/null +++ b/cloudinit/net/freebsd.py @@ -0,0 +1,59 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +import cloudinit.net.bsd +from cloudinit import subp +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class Renderer(cloudinit.net.bsd.BSDRenderer): + + def __init__(self, config=None): + self._route_cpt = 0 + super(Renderer, self).__init__() + + def rename_interface(self, cur_name, device_name): + self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name) + + def write_config(self): + for device_name, v in self.interface_configurations.items(): + if isinstance(v, dict): + self.set_rc_config_value( + 'ifconfig_' + device_name, + v.get('address') + ' netmask ' + v.get('netmask')) + else: + self.set_rc_config_value('ifconfig_' + device_name, 'DHCP') + + def start_services(self, run=False): + if not run: + LOG.debug("freebsd generate postcmd disabled") + return + + subp.subp(['service', 'netif', 'restart'], capture=True) + # On FreeBSD 10, the restart of routing and dhclient is likely to fail + # because + # - routing: it cannot remove the loopback route, but it will still set + # up the default route as expected. + # - dhclient: it cannot stop the dhclient started by the netif service. + # In both case, the situation is ok, and we can proceed. + subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1]) + + for dhcp_interface in self.dhcp_interfaces(): + subp.subp(['service', 'dhclient', 'restart', dhcp_interface], + rcs=[0, 1], + capture=True) + + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + self.set_rc_config_value('defaultrouter', gateway) + else: + route_name = 'route_net%d' % self._route_cpt + route_cmd = "-route %s/%s %s" % (network, netmask, gateway) + self.set_rc_config_value(route_name, route_cmd) + self._route_cpt += 1 + + +def available(target=None): + return util.is_FreeBSD() diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py new file mode 100644 index 0000000..71b38ee --- /dev/null +++ b/cloudinit/net/netbsd.py @@ -0,0 +1,44 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import util +import cloudinit.net.bsd + +LOG = logging.getLogger(__name__) + + +class Renderer(cloudinit.net.bsd.BSDRenderer): + + def __init__(self, config=None): + super(Renderer, self).__init__() + + def write_config(self): + if self.dhcp_interfaces(): + self.set_rc_config_value('dhcpcd', 'YES') + self.set_rc_config_value( + 'dhcpcd_flags', + ' '.join(self.dhcp_interfaces()) + ) + for device_name, v in self.interface_configurations.items(): + if isinstance(v, dict): + self.set_rc_config_value( + 'ifconfig_' + device_name, + v.get('address') + ' netmask ' + v.get('netmask')) + + def start_services(self, run=False): + if not run: + LOG.debug("netbsd generate postcmd disabled") + return + + subp.subp(['service', 'network', 'restart'], capture=True) + if self.dhcp_interfaces(): + subp.subp(['service', 'dhcpcd', 'restart'], capture=True) + + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + self.set_rc_config_value('defaultroute', gateway) + + +def available(target=None): + return util.is_NetBSD() diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 14d3999..53347c8 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -8,6 +8,7 @@ from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES from cloudinit import log as logging from cloudinit import util +from cloudinit import subp from cloudinit import safeyaml from cloudinit.net import SYS_CLASS_NET, get_devicelist @@ -98,7 +99,7 @@ def _extract_addresses(config, entry, ifname, features=None): entry.update({sn_type: True}) elif sn_type in IPV6_DYNAMIC_TYPES: entry.update({'dhcp6': True}) - elif sn_type in ['static']: + elif sn_type in ['static', 'static6']: addr = "%s" % subnet.get('address') if 'prefix' in subnet: addr += "/%d" % subnet.get('prefix') @@ -164,14 +165,14 @@ def _extract_bond_slaves_by_name(interfaces, entry, bond_master): def _clean_default(target=None): # clean out any known default files and derived files in target # LP: #1675576 - tpath = util.target_path(target, "etc/netplan/00-snapd-config.yaml") + tpath = subp.target_path(target, "etc/netplan/00-snapd-config.yaml") if not os.path.isfile(tpath): return content = util.load_file(tpath, decode=False) if content != KNOWN_SNAPD_CONFIG: return - derived = [util.target_path(target, f) for f in ( + derived = [subp.target_path(target, f) for f in ( 'run/systemd/network/10-netplan-all-en.network', 'run/systemd/network/10-netplan-all-eth.network', 'run/systemd/generator/netplan.stamp')] @@ -203,10 +204,10 @@ class Renderer(renderer.Renderer): def features(self): if self._features is None: try: - info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True) + info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True) info = util.load_yaml(info_blob) self._features = info['netplan.io']['features'] - except util.ProcessExecutionError: + except subp.ProcessExecutionError: # if the info subcommand is not present then we don't have any # new features pass @@ -218,7 +219,7 @@ class Renderer(renderer.Renderer): # check network state for version # if v2, then extract network_state.config # else render_v2_from_state - fpnplan = os.path.join(util.target_path(target), self.netplan_path) + fpnplan = os.path.join(subp.target_path(target), self.netplan_path) util.ensure_dir(os.path.dirname(fpnplan)) header = self.netplan_header if self.netplan_header else "" @@ -239,7 +240,7 @@ class Renderer(renderer.Renderer): if not run: LOG.debug("netplan generate postcmd disabled") return - util.subp(self.NETPLAN_GENERATE, capture=True) + subp.subp(self.NETPLAN_GENERATE, capture=True) def _net_setup_link(self, run=False): """To ensure device link properties are applied, we poke @@ -253,7 +254,7 @@ class Renderer(renderer.Renderer): for cmd in [setup_lnk + [SYS_CLASS_NET + iface] for iface in get_devicelist() if os.path.islink(SYS_CLASS_NET + iface)]: - util.subp(cmd, capture=True) + subp.subp(cmd, capture=True) def _render_content(self, network_state): @@ -406,7 +407,7 @@ def available(target=None): expected = ['netplan'] search = ['/usr/sbin', '/sbin'] for p in expected: - if not util.which(p, search=search, target=target): + if not subp.which(p, search=search, target=target): return False return True diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index f3e8e25..b2f7d31 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -10,8 +10,6 @@ import logging import socket import struct -import six - from cloudinit import safeyaml from cloudinit import util @@ -186,7 +184,7 @@ class NetworkState(object): def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) - for iface in six.itervalues(ifaces): + for iface in ifaces.values(): if filter_func is None: yield iface else: @@ -217,11 +215,10 @@ class NetworkState(object): return ( route.get('prefix') == 0 and route.get('network') in default_nets - ) + ) -@six.add_metaclass(CommandHandlerMeta) -class NetworkStateInterpreter(object): +class NetworkStateInterpreter(metaclass=CommandHandlerMeta): initial_network_state = { 'interfaces': {}, @@ -300,9 +297,10 @@ class NetworkStateInterpreter(object): command_type = command['type'] try: handler = self.command_handlers[command_type] - except KeyError: - raise RuntimeError("No handler found for" - " command '%s'" % command_type) + except KeyError as e: + raise RuntimeError( + "No handler found for command '%s'" % command_type + ) from e try: handler(self, command) except InvalidCommand: @@ -315,13 +313,14 @@ class NetworkStateInterpreter(object): def parse_config_v2(self, skip_broken=True): for command_type, command in self._config.items(): - if command_type == 'version': + if command_type in ['version', 'renderer']: continue try: handler = self.command_handlers[command_type] - except KeyError: - raise RuntimeError("No handler found for" - " command '%s'" % command_type) + except KeyError as e: + raise RuntimeError( + "No handler found for command '%s'" % command_type + ) from e try: handler(self, command) self._v2_common(command) @@ -699,7 +698,7 @@ class NetworkStateInterpreter(object): def handle_wifis(self, command): LOG.warning('Wifi configuration is only available to distros with' - 'netplan rendering support.') + ' netplan rendering support.') def _v2_common(self, cfg): LOG.debug('v2_common: handling config:\n%s', cfg) @@ -725,10 +724,10 @@ class NetworkStateInterpreter(object): item_params = dict((key, value) for (key, value) in item_cfg.items() if key not in NETWORK_V2_KEY_FILTER) - # we accept the fixed spelling, but write the old for compatability + # we accept the fixed spelling, but write the old for compatibility # Xenial does not have an updated netplan which supports the # correct spelling. LP: #1756701 - params = item_params['parameters'] + params = item_params.get('parameters', {}) grat_value = params.pop('gratuitous-arp', None) if grat_value: params['gratuitious-arp'] = grat_value @@ -737,8 +736,7 @@ class NetworkStateInterpreter(object): 'type': cmd_type, 'name': item_name, cmd_type + '_interfaces': item_cfg.get('interfaces'), - 'params': dict((v2key_to_v1[k], v) for k, v in - item_params.get('parameters', {}).items()) + 'params': dict((v2key_to_v1[k], v) for k, v in params.items()) } if 'mtu' in item_cfg: v1_cmd['mtu'] = item_cfg['mtu'] @@ -918,9 +916,10 @@ def _normalize_route(route): if metric: try: normal_route['metric'] = int(metric) - except ValueError: + except ValueError as e: raise TypeError( - 'Route config metric {} is not an integer'.format(metric)) + 'Route config metric {} is not an integer'.format(metric) + ) from e return normal_route @@ -941,7 +940,7 @@ def subnet_is_ipv6(subnet): # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or # 'ipv6_slaac' if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES: - # This is a request for DHCPv6. + # This is a request either static6 type or DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): return True @@ -970,7 +969,7 @@ def ipv4_mask_to_net_prefix(mask): """ if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: @@ -997,7 +996,7 @@ def ipv6_mask_to_net_prefix(mask): if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py new file mode 100644 index 0000000..166d77e --- /dev/null +++ b/cloudinit/net/openbsd.py @@ -0,0 +1,46 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import util +import cloudinit.net.bsd + +LOG = logging.getLogger(__name__) + + +class Renderer(cloudinit.net.bsd.BSDRenderer): + + def write_config(self): + for device_name, v in self.interface_configurations.items(): + if_file = 'etc/hostname.{}'.format(device_name) + fn = subp.target_path(self.target, if_file) + if device_name in self.dhcp_interfaces(): + content = 'dhcp\n' + elif isinstance(v, dict): + try: + content = "inet {address} {netmask}\n".format( + address=v['address'], + netmask=v['netmask'] + ) + except KeyError: + LOG.error( + "Invalid static configuration for %s", + device_name) + util.write_file(fn, content) + + def start_services(self, run=False): + if not self._postcmds: + LOG.debug("openbsd generate postcmd disabled") + return + subp.subp(['sh', '/etc/netstart'], capture=True) + + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + if_file = 'etc/mygate' + fn = subp.target_path(self.target, if_file) + content = gateway + '\n' + util.write_file(fn, content) + + +def available(target=None): + return util.is_OpenBSD() diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 5f32e90..2a61a7a 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -6,7 +6,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import six +import io from .network_state import parse_net_config_data from .udev import generate_udev_rule @@ -34,7 +34,7 @@ class Renderer(object): """Given state, emit udev rules to map mac to ifname.""" # TODO(harlowja): this seems shared between eni renderer and # this, so move it to a shared location. - content = six.StringIO() + content = io.StringIO() for iface in network_state.iter_interfaces(filter_by_physical): # for physical interfaces write out a persist net udev rule if 'name' in iface and iface.get('mac_address'): diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py index 5117b4a..e2de4d5 100644 --- a/cloudinit/net/renderers.py +++ b/cloudinit/net/renderers.py @@ -1,17 +1,24 @@ # This file is part of cloud-init. See LICENSE file for license information. from . import eni +from . import freebsd +from . import netbsd from . import netplan from . import RendererNotFoundError +from . import openbsd from . import sysconfig NAME_TO_RENDERER = { "eni": eni, + "freebsd": freebsd, + "netbsd": netbsd, "netplan": netplan, + "openbsd": openbsd, "sysconfig": sysconfig, } -DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan"] +DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd", + "netbsd", "openbsd"] def search(priority=None, target=None, first=False): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 310cdf0..0a5d481 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -1,16 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy +import io import os import re -import six +from configobj import ConfigObj -from cloudinit.distros.parsers import networkmanager_conf -from cloudinit.distros.parsers import resolv_conf from cloudinit import log as logging from cloudinit import util - -from configobj import ConfigObj +from cloudinit import subp +from cloudinit.distros.parsers import networkmanager_conf +from cloudinit.distros.parsers import resolv_conf from . import renderer from .network_state import ( @@ -86,6 +87,9 @@ class ConfigMap(object): def __getitem__(self, key): return self._conf[key] + def get(self, key): + return self._conf.get(key) + def __contains__(self, key): return key in self._conf @@ -96,7 +100,7 @@ class ConfigMap(object): return len(self._conf) def to_string(self): - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") @@ -104,11 +108,14 @@ class ConfigMap(object): value = self._conf[key] if isinstance(value, bool): value = self._bool_map[value] - if not isinstance(value, six.string_types): + if not isinstance(value, str): value = str(value) buf.write("%s=%s\n" % (key, _quote_value(value))) return buf.getvalue() + def update(self, updates): + self._conf.update(updates) + class Route(ConfigMap): """Represents a route configuration.""" @@ -150,7 +157,7 @@ class Route(ConfigMap): # only accept ipv4 and ipv6 if proto not in ['ipv4', 'ipv6']: raise ValueError("Unknown protocol '%s'" % (str(proto))) - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") @@ -269,13 +276,29 @@ class Renderer(renderer.Renderer): # s1-networkscripts-interfaces.html (or other docs for # details about this) - iface_defaults = tuple([ - ('ONBOOT', True), - ('USERCTL', False), - ('NM_CONTROLLED', False), - ('BOOTPROTO', 'none'), - ('STARTMODE', 'auto'), - ]) + iface_defaults = { + 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False, + 'BOOTPROTO': 'none'}, + 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'}, + } + + cfg_key_maps = { + 'rhel': { + 'accept-ra': 'IPV6_FORCE_ACCEPT_RA', + 'bridge_stp': 'STP', + 'bridge_ageing': 'AGEING', + 'bridge_bridgeprio': 'PRIO', + 'mac_address': 'HWADDR', + 'mtu': 'MTU', + }, + 'suse': { + 'bridge_stp': 'BRIDGE_STP', + 'bridge_ageing': 'BRIDGE_AGEINGTIME', + 'bridge_bridgeprio': 'BRIDGE_PRIORITY', + 'mac_address': 'LLADDR', + 'mtu': 'MTU', + }, + } # If these keys exist, then their values will be used to form # a BONDING_OPTS grouping; otherwise no grouping will be set. @@ -297,12 +320,6 @@ class Renderer(renderer.Renderer): ('bond_primary_reselect', "primary_reselect=%s"), ]) - bridge_opts_keys = tuple([ - ('bridge_stp', 'STP'), - ('bridge_ageing', 'AGEING'), - ('bridge_bridgeprio', 'PRIO'), - ]) - templates = {} def __init__(self, config=None): @@ -320,65 +337,101 @@ class Renderer(renderer.Renderer): 'iface_templates': config.get('iface_templates'), 'route_templates': config.get('route_templates'), } + self.flavor = config.get('flavor', 'rhel') @classmethod - def _render_iface_shared(cls, iface, iface_cfg): - for k, v in cls.iface_defaults: - iface_cfg[k] = v + def _render_iface_shared(cls, iface, iface_cfg, flavor): + flavor_defaults = copy.deepcopy(cls.iface_defaults.get(flavor, {})) + iface_cfg.update(flavor_defaults) - for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]: + for old_key in ('mac_address', 'mtu', 'accept-ra'): old_value = iface.get(old_key) if old_value is not None: # only set HWADDR on physical interfaces if (old_key == 'mac_address' and iface['type'] not in ['physical', 'infiniband']): continue - iface_cfg[new_key] = old_value - - if iface['accept-ra'] is not None: - iface_cfg['IPV6_FORCE_ACCEPT_RA'] = iface['accept-ra'] + new_key = cls.cfg_key_maps[flavor].get(old_key) + if new_key: + iface_cfg[new_key] = old_value @classmethod - def _render_subnets(cls, iface_cfg, subnets, has_default_route): + def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): # setting base values - iface_cfg['BOOTPROTO'] = 'none' + if flavor == 'suse': + iface_cfg['BOOTPROTO'] = 'static' + if 'BRIDGE' in iface_cfg: + iface_cfg['BOOTPROTO'] = 'dhcp' + iface_cfg.drop('BRIDGE') + else: + iface_cfg['BOOTPROTO'] = 'none' # modifying base values according to subnets for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): mtu_key = 'MTU' subnet_type = subnet.get('type') if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful': - # TODO need to set BOOTPROTO to dhcp6 on SUSE - iface_cfg['IPV6INIT'] = True - # Configure network settings using DHCPv6 - iface_cfg['DHCPV6C'] = True + if flavor == 'suse': + # User wants dhcp for both protocols + if iface_cfg['BOOTPROTO'] == 'dhcp4': + iface_cfg['BOOTPROTO'] = 'dhcp' + else: + # Only IPv6 is DHCP, IPv4 may be static + iface_cfg['BOOTPROTO'] = 'dhcp6' + iface_cfg['DHCLIENT6_MODE'] = 'managed' + else: + iface_cfg['IPV6INIT'] = True + # Configure network settings using DHCPv6 + iface_cfg['DHCPV6C'] = True elif subnet_type == 'ipv6_dhcpv6-stateless': - iface_cfg['IPV6INIT'] = True - # Configure network settings using SLAAC from RAs and optional - # info from dhcp server using DHCPv6 - iface_cfg['IPV6_AUTOCONF'] = True - iface_cfg['DHCPV6C'] = True - # Use Information-request to get only stateless configuration - # parameters (i.e., without address). - iface_cfg['DHCPV6C_OPTIONS'] = '-S' + if flavor == 'suse': + # User wants dhcp for both protocols + if iface_cfg['BOOTPROTO'] == 'dhcp4': + iface_cfg['BOOTPROTO'] = 'dhcp' + else: + # Only IPv6 is DHCP, IPv4 may be static + iface_cfg['BOOTPROTO'] = 'dhcp6' + iface_cfg['DHCLIENT6_MODE'] = 'info' + else: + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs and + # optional info from dhcp server using DHCPv6 + iface_cfg['IPV6_AUTOCONF'] = True + iface_cfg['DHCPV6C'] = True + # Use Information-request to get only stateless + # configuration parameters (i.e., without address). + iface_cfg['DHCPV6C_OPTIONS'] = '-S' elif subnet_type == 'ipv6_slaac': - iface_cfg['IPV6INIT'] = True - # Configure network settings using SLAAC from RAs - iface_cfg['IPV6_AUTOCONF'] = True + if flavor == 'suse': + # User wants dhcp for both protocols + if iface_cfg['BOOTPROTO'] == 'dhcp4': + iface_cfg['BOOTPROTO'] = 'dhcp' + else: + # Only IPv6 is DHCP, IPv4 may be static + iface_cfg['BOOTPROTO'] = 'dhcp6' + iface_cfg['DHCLIENT6_MODE'] = 'info' + else: + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs + iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: + bootproto_in = iface_cfg['BOOTPROTO'] iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': + if flavor == 'suse' and subnet_type == 'dhcp4': + # If dhcp6 is already specified the user wants dhcp + # for both protocols + if bootproto_in != 'dhcp6': + # Only IPv4 is DHCP, IPv6 may be static + iface_cfg['BOOTPROTO'] = 'dhcp4' + elif subnet_type in ['static', 'static6']: + # RH info # grep BOOTPROTO sysconfig.txt -A2 | head -3 # BOOTPROTO=none|bootp|dhcp # 'bootp' or 'dhcp' cause a DHCP client # to run on the device. Any other # value causes any static configuration # in the file to be applied. - # ==> the following should not be set to 'static' - # but should remain 'none' - # if iface_cfg['BOOTPROTO'] == 'none': - # iface_cfg['BOOTPROTO'] = 'static' - if subnet_is_ipv6(subnet): + if subnet_is_ipv6(subnet) and flavor != 'suse': mtu_key = 'IPV6_MTU' iface_cfg['IPV6INIT'] = True if 'mtu' in subnet: @@ -389,18 +442,31 @@ class Renderer(renderer.Renderer): 'Network config: ignoring %s device-level mtu:%s' ' because ipv4 subnet-level mtu:%s provided.', iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) - iface_cfg[mtu_key] = subnet['mtu'] + if subnet_is_ipv6(subnet): + if flavor == 'suse': + # TODO(rjschwei) write mtu setting to + # /etc/sysctl.d/ + pass + else: + iface_cfg[mtu_key] = subnet['mtu'] + else: + iface_cfg[mtu_key] = subnet['mtu'] elif subnet_type == 'manual': - # If the subnet has an MTU setting, then ONBOOT=True - # to apply the setting - iface_cfg['ONBOOT'] = mtu_key in iface_cfg + if flavor == 'suse': + LOG.debug('Unknown subnet type setting "%s"', subnet_type) + else: + # If the subnet has an MTU setting, then ONBOOT=True + # to apply the setting + iface_cfg['ONBOOT'] = mtu_key in iface_cfg else: raise ValueError("Unknown subnet type '%s' found" " for interface '%s'" % (subnet_type, iface_cfg.name)) if subnet.get('control') == 'manual': - iface_cfg['ONBOOT'] = False - iface_cfg['STARTMODE'] = 'manual' + if flavor == 'suse': + iface_cfg['STARTMODE'] = 'manual' + else: + iface_cfg['ONBOOT'] = False # set IPv4 and IPv6 static addresses ipv4_index = -1 @@ -409,29 +475,37 @@ class Renderer(renderer.Renderer): subnet_type = subnet.get('type') # metric may apply to both dhcp and static config if 'metric' in subnet: - iface_cfg['METRIC'] = subnet['metric'] - # TODO(hjensas): Including dhcp6 here is likely incorrect. DHCPv6 - # does not ever provide a default gateway, the default gateway - # come from RA's. (https://github.com/openSUSE/wicked/issues/570) - if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: - if has_default_route and iface_cfg['BOOTPROTO'] != 'none': - iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False + if flavor != 'suse': + iface_cfg['METRIC'] = subnet['metric'] + if subnet_type in ['dhcp', 'dhcp4']: + # On SUSE distros 'DHCLIENT_SET_DEFAULT_ROUTE' is a global + # setting in /etc/sysconfig/network/dhcp + if flavor != 'suse': + if has_default_route and iface_cfg['BOOTPROTO'] != 'none': + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue elif subnet_type in IPV6_DYNAMIC_TYPES: continue - elif subnet_type == 'static': + elif subnet_type in ['static', 'static6']: if subnet_is_ipv6(subnet): ipv6_index = ipv6_index + 1 ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) if ipv6_index == 0: - iface_cfg['IPV6ADDR'] = ipv6_cidr - iface_cfg['IPADDR6'] = ipv6_cidr + if flavor == 'suse': + iface_cfg['IPADDR6'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR'] = ipv6_cidr elif ipv6_index == 1: - iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr - iface_cfg['IPADDR6_0'] = ipv6_cidr + if flavor == 'suse': + iface_cfg['IPADDR6_1'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr else: - iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr - iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr + if flavor == 'suse': + iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] += \ + " " + ipv6_cidr else: ipv4_index = ipv4_index + 1 suff = "" if ipv4_index == 0 else str(ipv4_index) @@ -439,17 +513,17 @@ class Renderer(renderer.Renderer): iface_cfg['NETMASK' + suff] = \ net_prefix_to_ipv4_mask(subnet['prefix']) - if 'gateway' in subnet: + if 'gateway' in subnet and flavor != 'suse': iface_cfg['DEFROUTE'] = True if is_ipv6_addr(subnet['gateway']): iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway'] else: iface_cfg['GATEWAY'] = subnet['gateway'] - if 'dns_search' in subnet: + if 'dns_search' in subnet and flavor != 'suse': iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) - if 'dns_nameservers' in subnet: + if 'dns_nameservers' in subnet and flavor != 'suse': if len(subnet['dns_nameservers']) > 3: # per resolv.conf(5) MAXNS sets this to 3. LOG.debug("%s has %d entries in dns_nameservers. " @@ -459,7 +533,12 @@ class Renderer(renderer.Renderer): iface_cfg['DNS' + str(i)] = k @classmethod - def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): + def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor): + # TODO(rjschwei): route configuration on SUSE distro happens via + # ifroute-* files, see lp#1812117. SUSE currently carries a local + # patch in their package. + if flavor == 'suse': + return for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') for route in subnet.get('routes', []): @@ -487,14 +566,7 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True - # TODO(hjensas): Including dhcp6 here is likely incorrect. - # DHCPv6 does not ever provide a default gateway, the - # default gateway come from RA's. - # (https://github.com/openSUSE/wicked/issues/570) - if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): - # NOTE(hjensas): DHCLIENT_SET_DEFAULT_ROUTE is SuSE - # only. RHEL, CentOS, Fedora does not implement this - # option. + if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4'): iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: if is_ipv6: @@ -538,7 +610,9 @@ class Renderer(renderer.Renderer): iface_cfg['BONDING_OPTS'] = " ".join(bond_opts) @classmethod - def _render_physical_interfaces(cls, network_state, iface_contents): + def _render_physical_interfaces( + cls, network_state, iface_contents, flavor + ): physical_filter = renderer.filter_by_physical for iface in network_state.iter_interfaces(physical_filter): iface_name = iface['name'] @@ -547,12 +621,15 @@ class Renderer(renderer.Renderer): route_cfg = iface_cfg.routes cls._render_subnets( - iface_cfg, iface_subnets, network_state.has_default_route + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor ) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod - def _render_bond_interfaces(cls, network_state, iface_contents): + def _render_bond_interfaces(cls, network_state, iface_contents, flavor): bond_filter = renderer.filter_by_type('bond') slave_filter = renderer.filter_by_attr('bond-master') for iface in network_state.iter_interfaces(bond_filter): @@ -566,17 +643,24 @@ class Renderer(renderer.Renderer): master_cfgs.extend(iface_cfg.children) for master_cfg in master_cfgs: master_cfg['BONDING_MASTER'] = True - master_cfg.kind = 'bond' + if flavor != 'suse': + master_cfg.kind = 'bond' if iface.get('mac_address'): - iface_cfg['MACADDR'] = iface.get('mac_address') + if flavor == 'suse': + iface_cfg['LLADDR'] = iface.get('mac_address') + else: + iface_cfg['MACADDR'] = iface.get('mac_address') iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes cls._render_subnets( - iface_cfg, iface_subnets, network_state.has_default_route + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor ) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) # iter_interfaces on network-state is not sorted to produce # consistent numbers we need to sort. @@ -586,28 +670,44 @@ class Renderer(renderer.Renderer): if slave_iface['bond-master'] == iface_name]) for index, bond_slave in enumerate(bond_slaves): - slavestr = 'BONDING_SLAVE%s' % index + if flavor == 'suse': + slavestr = 'BONDING_SLAVE_%s' % index + else: + slavestr = 'BONDING_SLAVE%s' % index iface_cfg[slavestr] = bond_slave slave_cfg = iface_contents[bond_slave] - slave_cfg['MASTER'] = iface_name - slave_cfg['SLAVE'] = True + if flavor == 'suse': + slave_cfg['BOOTPROTO'] = 'none' + slave_cfg['STARTMODE'] = 'hotplug' + else: + slave_cfg['MASTER'] = iface_name + slave_cfg['SLAVE'] = True @classmethod - def _render_vlan_interfaces(cls, network_state, iface_contents): + def _render_vlan_interfaces(cls, network_state, iface_contents, flavor): vlan_filter = renderer.filter_by_type('vlan') for iface in network_state.iter_interfaces(vlan_filter): iface_name = iface['name'] iface_cfg = iface_contents[iface_name] - iface_cfg['VLAN'] = True - iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')] + if flavor == 'suse': + vlan_id = iface.get('vlan_id') + if vlan_id: + iface_cfg['VLAN_ID'] = vlan_id + iface_cfg['ETHERDEVICE'] = iface_name[:iface_name.rfind('.')] + else: + iface_cfg['VLAN'] = True + iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')] iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes cls._render_subnets( - iface_cfg, iface_subnets, network_state.has_default_route + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor ) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @staticmethod def _render_dns(network_state, existing_dns_path=None): @@ -644,19 +744,39 @@ class Renderer(renderer.Renderer): return out @classmethod - def _render_bridge_interfaces(cls, network_state, iface_contents): + def _render_bridge_interfaces(cls, network_state, iface_contents, flavor): + bridge_key_map = { + old_k: new_k for old_k, new_k in cls.cfg_key_maps[flavor].items() + if old_k.startswith('bridge')} bridge_filter = renderer.filter_by_type('bridge') + for iface in network_state.iter_interfaces(bridge_filter): iface_name = iface['name'] iface_cfg = iface_contents[iface_name] - iface_cfg.kind = 'bridge' - for old_key, new_key in cls.bridge_opts_keys: + if flavor != 'suse': + iface_cfg.kind = 'bridge' + for old_key, new_key in bridge_key_map.items(): if old_key in iface: iface_cfg[new_key] = iface[old_key] - if iface.get('mac_address'): - iface_cfg['MACADDR'] = iface.get('mac_address') + if flavor == 'suse': + if 'BRIDGE_STP' in iface_cfg: + if iface_cfg.get('BRIDGE_STP'): + iface_cfg['BRIDGE_STP'] = 'on' + else: + iface_cfg['BRIDGE_STP'] = 'off' + if iface.get('mac_address'): + key = 'MACADDR' + if flavor == 'suse': + key = 'LLADDRESS' + iface_cfg[key] = iface.get('mac_address') + + if flavor == 'suse': + if iface.get('bridge_ports', []): + iface_cfg['BRIDGE_PORTS'] = '%s' % " ".join( + iface.get('bridge_ports') + ) # Is this the right key to get all the connected interfaces? for bridged_iface_name in iface.get('bridge_ports', []): # Ensure all bridged interfaces are correctly tagged @@ -665,17 +785,23 @@ class Renderer(renderer.Renderer): bridged_cfgs = [bridged_cfg] bridged_cfgs.extend(bridged_cfg.children) for bridge_cfg in bridged_cfgs: - bridge_cfg['BRIDGE'] = iface_name + bridge_value = iface_name + if flavor == 'suse': + bridge_value = 'yes' + bridge_cfg['BRIDGE'] = bridge_value iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes cls._render_subnets( - iface_cfg, iface_subnets, network_state.has_default_route + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor ) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod - def _render_ib_interfaces(cls, network_state, iface_contents): + def _render_ib_interfaces(cls, network_state, iface_contents, flavor): ib_filter = renderer.filter_by_type('infiniband') for iface in network_state.iter_interfaces(ib_filter): iface_name = iface['name'] @@ -684,12 +810,15 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes cls._render_subnets( - iface_cfg, iface_subnets, network_state.has_default_route + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor ) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod - def _render_sysconfig(cls, base_sysconf_dir, network_state, + def _render_sysconfig(cls, base_sysconf_dir, network_state, flavor, templates=None): '''Given state, return /etc/sysconfig files + contents''' if not templates: @@ -700,13 +829,17 @@ class Renderer(renderer.Renderer): continue iface_name = iface['name'] iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates) - cls._render_iface_shared(iface, iface_cfg) + if flavor == 'suse': + iface_cfg.drop('DEVICE') + # If type detection fails it is considered a bug in SUSE + iface_cfg.drop('TYPE') + cls._render_iface_shared(iface, iface_cfg, flavor) iface_contents[iface_name] = iface_cfg - cls._render_physical_interfaces(network_state, iface_contents) - cls._render_bond_interfaces(network_state, iface_contents) - cls._render_vlan_interfaces(network_state, iface_contents) - cls._render_bridge_interfaces(network_state, iface_contents) - cls._render_ib_interfaces(network_state, iface_contents) + cls._render_physical_interfaces(network_state, iface_contents, flavor) + cls._render_bond_interfaces(network_state, iface_contents, flavor) + cls._render_vlan_interfaces(network_state, iface_contents, flavor) + cls._render_bridge_interfaces(network_state, iface_contents, flavor) + cls._render_ib_interfaces(network_state, iface_contents, flavor) contents = {} for iface_name, iface_cfg in iface_contents.items(): if iface_cfg or iface_cfg.children: @@ -726,19 +859,19 @@ class Renderer(renderer.Renderer): if not templates: templates = self.templates file_mode = 0o644 - base_sysconf_dir = util.target_path(target, self.sysconf_dir) + base_sysconf_dir = subp.target_path(target, self.sysconf_dir) for path, data in self._render_sysconfig(base_sysconf_dir, - network_state, + network_state, self.flavor, templates=templates).items(): util.write_file(path, data, file_mode) if self.dns_path: - dns_path = util.target_path(target, self.dns_path) + dns_path = subp.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) if resolv_content: util.write_file(dns_path, resolv_content, file_mode) if self.networkmanager_conf_path: - nm_conf_path = util.target_path(target, + nm_conf_path = subp.target_path(target, self.networkmanager_conf_path) nm_conf_content = self._render_networkmanager_conf(network_state, templates) @@ -746,12 +879,12 @@ class Renderer(renderer.Renderer): util.write_file(nm_conf_path, nm_conf_content, file_mode) if self.netrules_path: netrules_content = self._render_persistent_net(network_state) - netrules_path = util.target_path(target, self.netrules_path) + netrules_path = subp.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) if available_nm(target=target): - enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE)) + enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE)) - sysconfig_path = util.target_path(target, templates.get('control')) + sysconfig_path = subp.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos if sysconfig_path.endswith('network'): util.ensure_dir(os.path.dirname(sysconfig_path)) @@ -774,20 +907,20 @@ def available_sysconfig(target=None): expected = ['ifup', 'ifdown'] search = ['/sbin', '/usr/sbin'] for p in expected: - if not util.which(p, search=search, target=target): + if not subp.which(p, search=search, target=target): return False expected_paths = [ 'etc/sysconfig/network-scripts/network-functions', 'etc/sysconfig/config'] for p in expected_paths: - if os.path.isfile(util.target_path(target, p)): + if os.path.isfile(subp.target_path(target, p)): return True return False def available_nm(target=None): - if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)): + if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)): return False return True diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index c3fa1e0..74cf4b9 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -62,7 +62,7 @@ class TestParseDHCPLeasesFile(CiTestCase): {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] write_file(lease_file, content) - self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) class TestDHCPRFC3442(CiTestCase): @@ -88,7 +88,7 @@ class TestDHCPRFC3442(CiTestCase): 'renew': '4 2017/07/27 18:02:30', 'expire': '5 2017/07/28 07:08:15'}] write_file(lease_file, content) - self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) def test_parse_lease_finds_classless_static_routes(self): """ @@ -114,7 +114,7 @@ class TestDHCPRFC3442(CiTestCase): 'renew': '4 2017/07/27 18:02:30', 'expire': '5 2017/07/28 07:08:15'}] write_file(lease_file, content) - self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @@ -211,7 +211,7 @@ class TestDHCPParseStaticRoutes(CiTestCase): "class_b": "16,172,16,10", "class_a": "8,10,10", "gateway": "0,0", - "netlen": "33,0", + "netlen": "33,0", } for rfc3442 in bad_rfc3442.values(): self.assertEqual([], parse_static_routes(rfc3442)) @@ -266,7 +266,7 @@ class TestDHCPDiscoveryClean(CiTestCase): 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.', self.logs.getvalue()) - @mock.patch('cloudinit.net.dhcp.util.which') + @mock.patch('cloudinit.net.dhcp.subp.which') @mock.patch('cloudinit.net.dhcp.find_fallback_nic') def test_absent_dhclient_command(self, m_fallback, m_which): """When dhclient doesn't exist in the OS, log the issue and no-op.""" @@ -279,7 +279,7 @@ class TestDHCPDiscoveryClean(CiTestCase): @mock.patch('cloudinit.temp_utils.os.getuid') @mock.patch('cloudinit.net.dhcp.dhcp_discovery') - @mock.patch('cloudinit.net.dhcp.util.which') + @mock.patch('cloudinit.net.dhcp.subp.which') @mock.patch('cloudinit.net.dhcp.find_fallback_nic') def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid): """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery.""" @@ -302,13 +302,14 @@ class TestDHCPDiscoveryClean(CiTestCase): @mock.patch('time.sleep', mock.MagicMock()) @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.util.subp') + @mock.patch('cloudinit.net.dhcp.subp.subp') def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, m_kill): """dhcp_discovery logs a warning when pidfile contains invalid content. Lease processing still occurs and no proc kill is attempted. """ + m_subp.return_value = ('', '') tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') script_content = '#!/bin/bash\necho fake-dhclient' @@ -324,7 +325,7 @@ class TestDHCPDiscoveryClean(CiTestCase): """) write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content) - self.assertItemsEqual( + self.assertCountEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) @@ -337,13 +338,14 @@ class TestDHCPDiscoveryClean(CiTestCase): @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.wait_for_files') - @mock.patch('cloudinit.net.dhcp.util.subp') + @mock.patch('cloudinit.net.dhcp.subp.subp') def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, m_subp, m_wait, m_kill, m_getppid): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" + m_subp.return_value = ('', '') tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') script_content = '#!/bin/bash\necho fake-dhclient' @@ -364,12 +366,13 @@ class TestDHCPDiscoveryClean(CiTestCase): @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.util.subp') + @mock.patch('cloudinit.net.dhcp.subp.subp') def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. """ + m_subp.return_value = ('', '') tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') script_content = '#!/bin/bash\necho fake-dhclient' @@ -389,7 +392,7 @@ class TestDHCPDiscoveryClean(CiTestCase): write_file(pid_file, "%d\n" % my_pid) m_getppid.return_value = 1 # Indicate that dhclient has daemonized - self.assertItemsEqual( + self.assertCountEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) @@ -406,6 +409,87 @@ class TestDHCPDiscoveryClean(CiTestCase): 'eth9', '-sf', '/bin/true'], capture=True)]) m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid): + """dhcp_discovery brings up the interface and runs dhclient. + + It also returns the parsed dhcp.leases file generated in the sandbox. + """ + m_subp.return_value = ('', '') + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + lease_content = dedent(""" + lease { + interface "eth9"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + lease_file = os.path.join(tmpdir, 'dhcp.leases') + write_file(lease_file, lease_content) + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized + + with mock.patch('os.access', return_value=False): + self.assertCountEqual( + [{'interface': 'eth9', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], + dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + # dhclient script got copied + with open(os.path.join(tmpdir, 'dhclient.orig')) as stream: + self.assertEqual(script_content, stream.read()) + # Interface was brought up before dhclient called from sandbox + m_subp.assert_has_calls([ + mock.call( + ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), + mock.call( + [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf', + lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), + 'eth9', '-sf', '/bin/true'], capture=True)]) + m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) + + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid): + """"dhcp_log_func is called with the output and error streams of + dhclinet when the callable is passed.""" + dhclient_err = 'FAKE DHCLIENT ERROR' + dhclient_out = 'FAKE DHCLIENT OUT' + m_subp.return_value = (dhclient_out, dhclient_err) + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + lease_content = dedent(""" + lease { + interface "eth9"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + lease_file = os.path.join(tmpdir, 'dhcp.leases') + write_file(lease_file, lease_content) + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized + + def dhcp_log_func(out, err): + self.assertEqual(out, dhclient_out) + self.assertEqual(err, dhclient_err) + + dhcp_discovery( + dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func) + class TestSystemdParseLeases(CiTestCase): @@ -529,7 +613,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): # Ensure that no teardown happens: m_dhcp.assert_not_called() - @mock.patch('cloudinit.net.dhcp.util.subp') + @mock.patch('cloudinit.net.dhcp.subp.subp') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_ephemeral_dhcp_setup_network_if_url_connectivity( self, m_dhcp, m_subp): diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 6db93e2..311ab6f 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -2,16 +2,20 @@ import copy import errno -import httpretty -import mock +import ipaddress import os -import requests import textwrap +from unittest import mock + +import httpretty +import pytest +import requests import cloudinit.net as net -from cloudinit.util import ensure_file, write_file, ProcessExecutionError -from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase from cloudinit import safeyaml as yaml +from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase +from cloudinit.subp import ProcessExecutionError +from cloudinit.util import ensure_file, write_file class TestSysDevPath(CiTestCase): @@ -139,12 +143,6 @@ class TestReadSysNet(CiTestCase): write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) self.assertFalse(net.is_up('eth0')) - def test_is_wireless(self): - """is_wireless is True when /sys/net/devname/wireless exists.""" - self.assertFalse(net.is_wireless('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless')) - self.assertTrue(net.is_wireless('eth0')) - def test_is_bridge(self): """is_bridge is True when /sys/net/devname/bridge exists.""" self.assertFalse(net.is_bridge('eth0')) @@ -200,32 +198,6 @@ class TestReadSysNet(CiTestCase): write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content) self.assertTrue(net.is_vlan('eth0')) - def test_is_connected_when_physically_connected(self): - """is_connected is True when /sys/net/devname/iflink reports 2.""" - self.assertFalse(net.is_connected('eth0')) - write_file(os.path.join(self.sysdir, 'eth0', 'iflink'), "2") - self.assertTrue(net.is_connected('eth0')) - - def test_is_connected_when_wireless_and_carrier_active(self): - """is_connected is True if wireless /sys/net/devname/carrier is 1.""" - self.assertFalse(net.is_connected('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless')) - self.assertFalse(net.is_connected('eth0')) - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1") - self.assertTrue(net.is_connected('eth0')) - - def test_is_physical(self): - """is_physical is True when /sys/net/devname/device exists.""" - self.assertFalse(net.is_physical('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'device')) - self.assertTrue(net.is_physical('eth0')) - - def test_is_present(self): - """is_present is True when /sys/net/devname exists.""" - self.assertFalse(net.is_present('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'device')) - self.assertTrue(net.is_present('eth0')) - class TestGenerateFallbackConfig(CiTestCase): @@ -341,8 +313,6 @@ class TestGenerateFallbackConfig(CiTestCase): class TestNetFindFallBackNic(CiTestCase): - with_logs = True - def setUp(self): super(TestNetFindFallBackNic, self).setUp() sys_mock = mock.patch('cloudinit.net.get_sys_class_path') @@ -396,7 +366,7 @@ class TestGetDeviceList(CiTestCase): """get_devicelist returns a directory listing for SYS_CLASS_NET.""" write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up') write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up') - self.assertItemsEqual(['eth0', 'eth1'], net.get_devicelist()) + self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) class TestGetInterfaceMAC(CiTestCase): @@ -540,7 +510,7 @@ class TestInterfaceHasOwnMAC(CiTestCase): net.interface_has_own_mac('eth1', strict=True) -@mock.patch('cloudinit.net.util.subp') +@mock.patch('cloudinit.net.subp.subp') class TestEphemeralIPV4Network(CiTestCase): with_logs = True @@ -993,86 +963,8 @@ class TestExtractPhysdevs(CiTestCase): net.extract_physdevs({'version': 3, 'awesome_config': []}) -class TestWaitForPhysdevs(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestWaitForPhysdevs, self).setUp() - self.add_patch('cloudinit.net.get_interfaces_by_mac', - 'm_get_iface_mac') - self.add_patch('cloudinit.util.udevadm_settle', 'm_udev_settle') - - def test_wait_for_physdevs_skips_settle_if_all_present(self): - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) - for args in physdevs}, - } - self.m_get_iface_mac.side_effect = iter([ - {'aa:bb:cc:dd:ee:ff': 'eth0', - '00:11:22:33:44:55': 'ens3'}, - ]) - net.wait_for_physdevs(netcfg) - self.assertEqual(0, self.m_udev_settle.call_count) - - def test_wait_for_physdevs_calls_udev_settle_on_missing(self): - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) - for args in physdevs}, - } - self.m_get_iface_mac.side_effect = iter([ - {'aa:bb:cc:dd:ee:ff': 'eth0'}, # first call ens3 is missing - {'aa:bb:cc:dd:ee:ff': 'eth0', - '00:11:22:33:44:55': 'ens3'}, # second call has both - ]) - net.wait_for_physdevs(netcfg) - self.m_udev_settle.assert_called_with(exists=net.sys_dev_path('ens3')) - - def test_wait_for_physdevs_raise_runtime_error_if_missing_and_strict(self): - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) - for args in physdevs}, - } - self.m_get_iface_mac.return_value = {} - with self.assertRaises(RuntimeError): - net.wait_for_physdevs(netcfg) - - self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) - - def test_wait_for_physdevs_no_raise_if_not_strict(self): - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) - for args in physdevs}, - } - self.m_get_iface_mac.return_value = {} - net.wait_for_physdevs(netcfg, strict=False) - self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) - - class TestNetFailOver(CiTestCase): - with_logs = True - def setUp(self): super(TestNetFailOver, self).setUp() self.add_patch('cloudinit.net.util', 'm_util') @@ -1297,4 +1189,48 @@ class TestNetFailOver(CiTestCase): m_standby.return_value = False self.assertFalse(net.is_netfailover(devname, driver)) + +class TestIsIpAddress: + """Tests for net.is_ip_address. + + Instead of testing with values we rely on the ipaddress stdlib module to + handle all values correctly, so simply test that is_ip_address defers to + the ipaddress module correctly. + """ + + @pytest.mark.parametrize('ip_address_side_effect,expected_return', ( + (ValueError, False), + (lambda _: ipaddress.IPv4Address('192.168.0.1'), True), + (lambda _: ipaddress.IPv6Address('2001:db8::'), True), + )) + def test_is_ip_address(self, ip_address_side_effect, expected_return): + with mock.patch('cloudinit.net.ipaddress.ip_address', + side_effect=ip_address_side_effect) as m_ip_address: + ret = net.is_ip_address(mock.sentinel.ip_address_in) + assert expected_return == ret + expected_call = mock.call(mock.sentinel.ip_address_in) + assert [expected_call] == m_ip_address.call_args_list + + +class TestIsIpv4Address: + """Tests for net.is_ipv4_address. + + Instead of testing with values we rely on the ipaddress stdlib module to + handle all values correctly, so simply test that is_ipv4_address defers to + the ipaddress module correctly. + """ + + @pytest.mark.parametrize('ipv4address_mock,expected_return', ( + (mock.Mock(side_effect=ValueError), False), + (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True), + )) + def test_is_ip_address(self, ipv4address_mock, expected_return): + with mock.patch('cloudinit.net.ipaddress.IPv4Address', + ipv4address_mock) as m_ipv4address: + ret = net.is_ipv4_address(mock.sentinel.ip_address_in) + assert expected_return == ret + expected_call = mock.call(mock.sentinel.ip_address_in) + assert [expected_call] == m_ipv4address.call_args_list + + # vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py index fcb4a99..07d726e 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/cloudinit/net/tests/test_network_state.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock + from cloudinit.net import network_state from cloudinit.tests.helpers import CiTestCase @@ -44,4 +45,14 @@ class TestNetworkStateParseConfig(CiTestCase): self.assertNotEqual(None, result) +class TestNetworkStateParseConfigV2(CiTestCase): + + def test_version_2_ignores_renderer_key(self): + ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}} + nsi = network_state.NetworkStateInterpreter(version=ncfg['version'], + config=ncfg) + nsi.parse_config(skip_broken=False) + self.assertEqual(ncfg, nsi.as_dict()['config']) + + # vi: ts=4 expandtab diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 6ba21f4..628e290 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -13,6 +13,7 @@ import re from cloudinit import log as logging from cloudinit.net.network_state import net_prefix_to_ipv4_mask +from cloudinit import subp from cloudinit import util from cloudinit.simpletable import SimpleTable @@ -91,6 +92,53 @@ def _netdev_info_iproute(ipaddr_out): return devs +def _netdev_info_ifconfig_netbsd(ifconfig_data): + # fields that need to be returned in devs for each dev + devs = {} + for line in ifconfig_data.splitlines(): + if len(line) == 0: + continue + if line[0] not in ("\t", " "): + curdev = line.split()[0] + # current ifconfig pops a ':' on the end of the device + if curdev.endswith(':'): + curdev = curdev[:-1] + if curdev not in devs: + devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO) + toks = line.lower().strip().split() + if len(toks) > 1: + if re.search(r"flags=[x\d]+", toks[1]): + devs[curdev]['up'] = True + + for i in range(len(toks)): + if toks[i] == "inet": # Create new ipv4 addr entry + network, net_bits = toks[i + 1].split('/') + devs[curdev]['ipv4'].append( + {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)}) + elif toks[i] == "broadcast": + devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1] + elif toks[i] == "address:": + devs[curdev]['hwaddr'] = toks[i + 1] + elif toks[i] == "inet6": + if toks[i + 1] == "addr:": + devs[curdev]['ipv6'].append({'ip': toks[i + 2]}) + else: + devs[curdev]['ipv6'].append({'ip': toks[i + 1]}) + elif toks[i] == "prefixlen": # Add prefix to current ipv6 value + addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1] + devs[curdev]['ipv6'][-1]['ip'] = addr6 + elif toks[i].startswith("scope:"): + devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") + elif toks[i] == "scopeid": + res = re.match(r'.*<(\S+)>', toks[i + 1]) + if res: + devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + + return devs + + def _netdev_info_ifconfig(ifconfig_data): # fields that need to be returned in devs for each dev devs = {} @@ -149,13 +197,16 @@ def _netdev_info_ifconfig(ifconfig_data): def netdev_info(empty=""): devs = {} - if util.which('ip'): + if util.is_NetBSD(): + (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1]) + devs = _netdev_info_ifconfig_netbsd(ifcfg_out) + elif subp.which('ip'): # Try iproute first of all - (ipaddr_out, _err) = util.subp(["ip", "addr", "show"]) + (ipaddr_out, _err) = subp.subp(["ip", "addr", "show"]) devs = _netdev_info_iproute(ipaddr_out) - elif util.which('ifconfig'): + elif subp.which('ifconfig'): # Fall back to net-tools if iproute2 is not present - (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) + (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1]) devs = _netdev_info_ifconfig(ifcfg_out) else: LOG.warning( @@ -235,10 +286,10 @@ def _netdev_route_info_iproute(iproute_data): entry['flags'] = ''.join(flags) routes['ipv4'].append(entry) try: - (iproute_data6, _err6) = util.subp( + (iproute_data6, _err6) = subp.subp( ["ip", "--oneline", "-6", "route", "list", "table", "all"], rcs=[0, 1]) - except util.ProcessExecutionError: + except subp.ProcessExecutionError: pass else: entries6 = iproute_data6.splitlines() @@ -307,9 +358,9 @@ def _netdev_route_info_netstat(route_data): routes['ipv4'].append(entry) try: - (route_data6, _err6) = util.subp( + (route_data6, _err6) = subp.subp( ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]) - except util.ProcessExecutionError: + except subp.ProcessExecutionError: pass else: entries6 = route_data6.splitlines() @@ -343,13 +394,13 @@ def _netdev_route_info_netstat(route_data): def route_info(): routes = {} - if util.which('ip'): + if subp.which('ip'): # Try iproute first of all - (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"]) + (iproute_out, _err) = subp.subp(["ip", "-o", "route", "list"]) routes = _netdev_route_info_iproute(iproute_out) - elif util.which('netstat'): + elif subp.which('netstat'): # Fall back to net-tools if iproute2 is not present - (route_out, _err) = util.subp( + (route_out, _err) = subp.subp( ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1]) routes = _netdev_route_info_netstat(route_out) else: diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py index e5dfab3..b8677c8 100644 --- a/cloudinit/reporting/events.py +++ b/cloudinit/reporting/events.py @@ -12,7 +12,7 @@ import base64 import os.path import time -from . import instantiated_handler_registry +from . import instantiated_handler_registry, available_handlers FINISH_EVENT_TYPE = 'finish' START_EVENT_TYPE = 'start' @@ -81,17 +81,32 @@ class FinishReportingEvent(ReportingEvent): return data -def report_event(event): - """Report an event to all registered event handlers. +def report_event(event, excluded_handler_types=None): + """Report an event to all registered event handlers + except those whose type is in excluded_handler_types. This should generally be called via one of the other functions in the reporting module. + :param excluded_handler_types: + List of handlers types to exclude from reporting the event to. :param event_type: The type of the event; this should be a constant from the reporting module. """ - for _, handler in instantiated_handler_registry.registered_items.items(): + + if not excluded_handler_types: + excluded_handler_types = {} + excluded_handler_classes = { + hndl_cls + for hndl_type, hndl_cls in available_handlers.registered_items.items() + if hndl_type in excluded_handler_types + } + + handlers = instantiated_handler_registry.registered_items.items() + for _, handler in handlers: + if type(handler) in excluded_handler_classes: + continue # skip this excluded handler handler.publish_event(event) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 6605e79..0a8c7af 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -1,25 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import uuid import fcntl import json -import six import os +import queue import struct import threading import time +import uuid +from datetime import datetime from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) -from datetime import datetime -from six.moves.queue import Empty as QueueEmptyError - -if six.PY2: - from multiprocessing.queues import JoinableQueue as JQueue -else: - from queue import Queue as JQueue LOG = logging.getLogger(__name__) @@ -28,8 +22,7 @@ class ReportException(Exception): pass -@six.add_metaclass(abc.ABCMeta) -class ReportingHandler(object): +class ReportingHandler(metaclass=abc.ABCMeta): """Base class for report handlers. Implement :meth:`~publish_event` for controlling what @@ -42,7 +35,6 @@ class ReportingHandler(object): def flush(self): """Ensure ReportingHandler has published all events""" - pass class LogHandler(ReportingHandler): @@ -121,6 +113,8 @@ class HyperVKvpReportingHandler(ReportingHandler): https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests """ HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048 + # The maximum value size expected in Azure + HV_KVP_AZURE_MAX_VALUE_SIZE = 1024 HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512 HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE) @@ -141,12 +135,13 @@ class HyperVKvpReportingHandler(ReportingHandler): self._kvp_file_path) self._event_types = event_types - self.q = JQueue() + self.q = queue.Queue() self.incarnation_no = self._get_incarnation_no() self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) self.publish_thread = threading.Thread( - target=self._publish_event_routine) + target=self._publish_event_routine + ) self.publish_thread.daemon = True self.publish_thread.start() @@ -202,17 +197,23 @@ class HyperVKvpReportingHandler(ReportingHandler): def _event_key(self, event): """ the event key format is: - CLOUD_INIT||||