diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..62caf9485a --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,13 @@ +# You can configure git to automatically use this file with the following config: +# git config --global blame.ignoreRevsFile .git-blame-ignore-revs + +c5b772db76c071e493a81105c7d8c0def08b2264 # trivial: Prepare for pyupgrade pre-commit hook +ed0314ac76ae58a6621077feb742efd5c14c3a62 # Blacken everything else +ac64fdb93c32972575a4523ccb23d0279ef584f5 # Blacken openstackclient.api +a3778109d0051a25901569e7bafe54915ab25f82 # Blacken openstack.common +53476e1f73d8af172207ac7089fb85fc0221859f # Blacken openstackclient.image +c51e4ef1bc350905a04690eb019d53e68fe3d633 # Blacken openstackclient.object +af6ea07703ed1c12b86995b97a9618bfa70721ee # Blacken openstackclient.identity +1face4f48b07fbc51824980e989687adaf453f0e # Blacken openstackclient.network +1ca77acc9118b433c14e03bf9fd28f2b1639c430 # Blacken openstackclient.volume +4bbf3bd3846cb3d85e243199910de82df25d35e0 # Blacken openstackclient.compute diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28ed93d8e7..b277bb059b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,30 +1,42 @@ --- -default_language_version: - # force all unspecified python hooks to run python3 - python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + rev: v6.0.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - - id: check-byte-order-marker + - id: fix-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ args: ['--unsafe'] - - repo: local + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.14.0 hooks: - - id: flake8 - name: flake8 + - id: ruff-check + args: ['--fix', '--unsafe-fixes'] + - id: ruff-format + - repo: https://opendev.org/openstack/hacking + rev: 7.0.0 + hooks: + - id: hacking + additional_dependencies: [] + exclude: '^(doc|releasenotes)/.*$' + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.18.2 + hooks: + - id: mypy additional_dependencies: - - hacking>=2.0.0 - - flake8-import-order>=0.13 - language: python - entry: flake8 - files: '^.*\.py$' - exclude: '^(doc|releasenotes|tools)/.*$' + - types-requests + # keep this in-sync with '[tool.mypy] exclude' in 'pyproject.toml' + exclude: | + (?x)( + doc/.* + | examples/.* + | hacking/.* + | releasenotes/.* + ) diff --git a/.zuul.yaml b/.zuul.yaml index 2c66c74af2..d63ee7c5f4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -6,6 +6,11 @@ Run unit tests for OpenStackClient with master branch of important libs. Takes advantage of the base tox job's install-siblings feature. + irrelevant-files: &common-irrelevant-files + - ^.*\.rst$ + - ^doc/.*$ + - ^releasenotes/.*$ + - ^\.pre-commit-config\.yaml$ required-projects: - openstack/cliff - openstack/keystoneauth @@ -18,14 +23,34 @@ zuul_work_dir: src/opendev.org/openstack/python-openstackclient - job: - name: osc-tox-py38-tips - parent: openstack-tox-py38 + name: osc-tox-py310-tips + parent: openstack-tox-py310 description: | Run unit tests for OpenStackClient with master branch of important libs. Takes advantage of the base tox job's install-siblings feature. # The job only tests the latest and shouldn't be run on the stable branches - branches: ^(?!stable) + branches: ^master$ + required-projects: + - openstack/cliff + - openstack/keystoneauth + - openstack/openstacksdk + - openstack/osc-lib + - openstack/python-openstackclient + vars: + # Set work dir to openstackclient so that if it's triggered by one of the + # other repos the tests will run in the same place + zuul_work_dir: src/opendev.org/openstack/python-openstackclient + +- job: + name: osc-tox-py313-tips + parent: openstack-tox-py313 + description: | + Run unit tests for OpenStackClient with master branch of important libs. + + Takes advantage of the base tox job's install-siblings feature. + # The job only tests the latest and shouldn't be run on the stable branches + branches: ^master$ required-projects: - openstack/cliff - openstack/keystoneauth @@ -61,13 +86,6 @@ # NOTE(amotoki): Some neutron features are enabled by devstack plugin neutron: https://opendev.org/openstack/neutron devstack_services: - ceilometer-acentral: false - ceilometer-acompute: false - ceilometer-alarm-evaluator: false - ceilometer-alarm-notifier: false - ceilometer-anotification: false - ceilometer-api: false - ceilometer-collector: false s-account: true s-container: true s-object: true @@ -119,72 +137,30 @@ tox_envlist: functional tox_install_siblings: true -- secret: - name: osc-dockerhub - data: - username: osclientzuul - password: !encrypted/pkcs1-oaep - - LbIZjJiVstRVXMpoLQ3+/JcNB6lKVUWJXXo5+Outf+PKAaO7mNnv8XLiFMKnJ6ftopLyu - hWbX9rA+NddvplLQkf1xxkh7QBBU8PToLr58quI2SENUclt4tpjxbZfZu451kFSNJvNvR - E58cHHpfJZpyRnS2htXmN/Qy24gbV2w7CQxSZD2YhlcrerD8uQ8rWEnlY1wcJEaEGomtS - ZTGxsdK2TsZC2cd4b7TG7+xbl2i+hjADzwSQAgUzlLlwuG71667+IWk4SOZ7OycJTv9NN - ZTak8+CGfiMKdmsxZ1Z8uD7DC+RIklDjMWyly6zuhWzfhOmsmU0CesR50moodRUvbK79p - NZM8u0hBex5cl2EpUEwJL/FSPJXUhDMPoMoTZT/SAuXf25R9eZ9JGrKsIAlmVhpl8ifoE - 8TpPyvIHGS3YelTQjhqOX0wGb9T4ZauQCcI5Ajzy9NuCTyD9xxme9OX1zz7gMACRnVHvz - q7U7Ue90MnmGH6E2SgKjIZhyzy9Efwb7JUvH1Zb3hlrjCjEhwi9MV5FnABTEeXyYwE10s - 3o/KZg2zvdWkVG6x0dEkjpoQaNuaB7T2Na7Sm421n/z3LCzhiQGuTUjENnL6cMEtuA6Pp - BfI5+Qlg7HMwkBXNB73EPfWHzbCR3VNrzGYTy9FvhGud0/cXsuBXgps4WH63ic= - - job: name: osc-build-image parent: opendev-build-docker-image description: Build Docker images. allowed-projects: openstack/python-openstackclient requires: - - python-builder-3.9-container-image - - python-base-3.9-container-image + - python-builder-3.11-bookworm-container-image + - python-base-3.11-bookworm-container-image provides: osc-container-image - vars: &osc_image_vars + vars: docker_images: - context: . - repository: osclient/python-openstackclient - -- job: - name: osc-upload-image - parent: opendev-upload-docker-image - description: Build Docker images and upload to Docker Hub. - allowed-projects: openstack/python-openstackclient - requires: - - python-builder-3.9-container-image - - python-base-3.9-container-image - provides: osc-container-image - secrets: - - name: docker_credentials - secret: osc-dockerhub - pass-to-parent: true - vars: *osc_image_vars - -- job: - name: osc-promote-image - parent: opendev-promote-docker-image - allowed-projects: openstack/python-openstackclient - description: Promote previously uploaded Docker images. - secrets: - - name: docker_credentials - secret: osc-dockerhub - pass-to-parent: true - nodeset: - nodes: [] - vars: *osc_image_vars + tags: [] - project-template: name: osc-tox-unit-tips check: jobs: - - osc-tox-py38-tips + - osc-tox-py310-tips + - osc-tox-py313-tips gate: jobs: - - osc-tox-py38-tips + - osc-tox-py310-tips + - osc-tox-py313-tips - project: templates: @@ -197,16 +173,15 @@ - release-notes-jobs-python3 check: jobs: - - osc-build-image + - openstackclient-check-plugins: + voting: true + - osc-build-image: + voting: false - osc-functional-devstack - osc-functional-devstack-tips: # The functional-tips job only tests the latest and shouldn't be run # on the stable branches - branches: ^(?!stable) + branches: ^master$ gate: jobs: - - osc-upload-image - osc-functional-devstack - promote: - jobs: - - osc-promote-image diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f8732b7211..74b442ef66 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -9,13 +9,13 @@ to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html -Bugs should be filed on StoryBoard: +Bugs should be filed on Launchpad: - https://storyboard.openstack.org/#!/project/openstack/python-openstackclient + https://bugs.launchpad.net/python-openstackclient Developers should also join the discussion on the mailing list, at: - http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss + https://lists.openstack.org/mailman3/lists/openstack-discuss.lists.openstack.org/ or join the IRC channel on diff --git a/Dockerfile b/Dockerfile index 90f7fd3be6..6709be7514 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,12 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM docker.io/opendevorg/python-builder:3.9 as builder +FROM docker.io/opendevorg/python-builder:3.12-bookworm AS builder COPY . /tmp/src RUN assemble -FROM docker.io/opendevorg/python-base:3.9 +FROM docker.io/opendevorg/python-base:3.12-bookworm + +LABEL org.opencontainers.image.title="python-openstackclient" +LABEL org.opencontainers.image.description="Client for OpenStack services." +LABEL org.opencontainers.image.licenses="Apache License 2.0" +LABEL org.opencontainers.image.url="https://www.openstack.org/" +LABEL org.opencontainers.image.documentation="https://docs.openstack.org/python-openstackclient/latest/" +LABEL org.opencontainers.image.source="https://opendev.org/openstack/python-openstackclient" COPY --from=builder /output/ /output RUN /output/install-from-bindep diff --git a/README.rst b/README.rst index 7f31bcdbea..af3837a351 100644 --- a/README.rst +++ b/README.rst @@ -1,12 +1,3 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/tc/badges/python-openstackclient.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html - -.. Change things from this point on - =============== OpenStackClient =============== @@ -15,96 +6,158 @@ OpenStackClient :target: https://pypi.org/project/python-openstackclient/ :alt: Latest Version -OpenStackClient (aka OSC) is a command-line client for OpenStack that brings +OpenStackClient (OSC) is a command-line client for OpenStack that brings the command set for Compute, Identity, Image, Network, Object Store and Block Storage APIs together in a single shell with a uniform command structure. +Support for additional service APIs is provided via plugins. The primary goal is to provide a unified shell command structure and a common language to describe operations in OpenStack. -* `PyPi`_ - package installation -* `Online Documentation`_ -* `Storyboard project`_ - bugs and feature requests -* `Blueprints`_ - feature specifications (historical only) -* `Source`_ -* `Developer`_ - getting started as a developer -* `Contributing`_ - contributing code -* `Testing`_ - testing code -* IRC: #openstack-sdks on OFTC (irc.oftc.net) -* License: Apache 2.0 - -.. _PyPi: https://pypi.org/project/python-openstackclient -.. _Online Documentation: https://docs.openstack.org/python-openstackclient/latest/ -.. _Blueprints: https://blueprints.launchpad.net/python-openstackclient -.. _`Storyboard project`: https://storyboard.openstack.org/#!/project/openstack/python-openstackclient -.. _Source: https://opendev.org/openstack/python-openstackclient -.. _Developer: https://docs.openstack.org/project-team-guide/project-setup/python.html -.. _Contributing: https://docs.openstack.org/infra/manual/developers.html -.. _Testing: https://docs.openstack.org/python-openstackclient/latest/contributor/developing.html#testing -.. _Release Notes: https://docs.openstack.org/releasenotes/python-openstackclient - Getting Started =============== -OpenStack Client can be installed from PyPI using pip:: +OpenStack Client can be installed from PyPI using pip: - pip install python-openstackclient +.. code-block:: shell -There are a few variants on getting help. A list of global options and supported -commands is shown with ``--help``:: + python3 -m pip install python-openstackclient - openstack --help +You can use ``--help`` or the ``help`` command to get a list of global options +and supported commands: -There is also a ``help`` command that can be used to get help text for a specific -command:: +.. code-block:: shell + openstack --help openstack help + +You can also get help for a specific command: + +.. code-block:: shell + + openstack server create --help openstack help server create -If you want to make changes to the OpenStackClient for testing and contribution, -make any changes and then run:: +You can add support for additional services by installing their clients. For +example, to add support for the DNS service (designate): - python setup.py develop +.. code-block:: shell -or:: + python3 -m pip install python3-designateclient - pip install -e . +A ``Dockerfile`` is provided for your convenience in the repository. You can +use this to build your own container images: -Configuration -============= +.. code-block:: shell -The CLI is configured via environment variables and command-line -options as listed in https://docs.openstack.org/python-openstackclient/latest/cli/authentication.html. + git clone https://opendev.org/openstack/python-openstackclient + cd python-openstackclient + podman build . -t example.com/myuser/openstackclient -Authentication using username/password is most commonly used: +For more information the available options and commands, refer to the `Users +Guide`__. -- For a local user, your configuration will look like the one below:: +.. __: https://docs.openstack.org/python-openstackclient/latest/cli/index.html + +Configuration +============= + +OpenStack Client must be configured with authentication information in order to +communicate with a given OpenStack cloud. This configuration can be achieved +via a ``clouds.yaml`` file, a set of environment variables (often shared via an +``openrc`` file), a set of command-line options, or a combination of all three. +Your cloud provider or deployment tooling will typically provide either a +``clouds.yaml`` file or ``openrc`` file for you. If using a ``clouds.yaml`` +file, OpenStack Client expects to find it in one of the following locations: + +* If set, the path indicated by the ``OS_CLIENT_CONFIG_FILE`` environment + variable +* ``.`` (the current directory) +* ``$HOME/.config/openstack`` +* ``/etc/openstack`` + +The options you should set will depend on the configuration of your cloud and +the authentication mechanism(s) supported. For example, consider a cloud that +supports username/password authentication. Configuration for this cloud using a +``clouds.yaml`` file would look like so: + +.. code-block:: yaml + + clouds: + my-cloud: + auth: + auth_url: '' + project_name: '' + project_domain_name: '' + username: '' + user_domain_name: '' + password: '' # (optional) + region_name: '' + +The corresponding environment variables would look very similar: + +.. code-block:: shell export OS_AUTH_URL= - export OS_IDENTITY_API_VERSION=3 + export OS_REGION_NAME= export OS_PROJECT_NAME= export OS_PROJECT_DOMAIN_NAME= export OS_USERNAME= export OS_USER_DOMAIN_NAME= export OS_PASSWORD= # (optional) - The corresponding command-line options look very similar:: +Likewise, the corresponding command-line options would look very similar: - --os-auth-url - --os-identity-api-version 3 +:: + + openstack + --os-auth-url + --os-region --os-project-name --os-project-domain-name --os-username --os-user-domain-name [--os-password ] -- For a federated user, your configuration will look the so:: +.. note:: + + If a password is not provided above (in plaintext), you will be + interactively prompted to provide one securely. + +Some clouds use federated authentication. If this is the case, your +configuration will be slightly more involved. For example, to configure +username/password authentication for a federated user using a ``clouds.yaml`` +file: + +.. code-block:: yaml + + clouds: + my-cloud: + auth: + auth_url: '' + project_name: '' + project_domain_name: '' + username: '' + user_domain_name: '' + password: '' + identity_provider: '' + client_id: '' + client_secret: '' + openid_scope: '' + protocol: '' + access_token_type: '' + discovery_endpoint: '' + auth_type: 'v3oidcpassword' + region_name: '' + +The corresponding environment variables would look very similar: + +.. code-block:: shell export OS_PROJECT_NAME= export OS_PROJECT_DOMAIN_NAME= export OS_AUTH_URL= export OS_IDENTITY_API_VERSION=3 - export OS_AUTH_PLUGIN=openid export OS_AUTH_TYPE=v3oidcpassword export OS_USERNAME= export OS_PASSWORD= @@ -116,7 +169,9 @@ Authentication using username/password is most commonly used: export OS_ACCESS_TOKEN_TYPE= export OS_DISCOVERY_ENDPOINT= - The corresponding command-line options look very similar:: +Likewise, the corresponding command-line options would look very similar: + +.. code-block:: shell --os-project-name --os-project-domain-name @@ -134,5 +189,41 @@ Authentication using username/password is most commonly used: --os-access-token-type --os-discovery-endpoint -If a password is not provided above (in plaintext), you will be interactively -prompted to provide one securely. +For more information on configuring authentication, including an overview of +the many authentication mechanisms supported, refer to the `Authentication +guide`__. For more information on configuration in general, refer to the +`Configuration guide`__. + +.. __: https://docs.openstack.org/python-openstackclient/latest/cli/authentication.html. +.. __: https://docs.openstack.org/python-openstackclient/latest/configuration/index.html + +Contributing +============ + +You can clone the repository from opendev.org:: + + git clone https://opendev.org/openstack/python-openstackclient + cd python-openstackclient + +OpenStack Client uses the same contributor process as other OpenStack projects. +For information on this process, including help on setting up you Gerrit +account and an overview of the CI process, refer to the `OpenStack Contributors +Guide`__. + +For more information on contributing to OpenStack Client itself, including +guidance on how to design new commands and how to report bugs, refer to the +`Contributors Guide`__. + +.. __: https://docs.openstack.org/python-openstackclient/latest/contributor/index.html +.. __: https://docs.opendev.org/opendev/infra-manual/latest/developers.html + +Links +----- + +* `Issue Tracker `_ +* `Code Review `_ +* `Documentation `_ +* `PyPi `_ +* `Mailing list `_ +* `Release Notes `_ +* `IRC (#openstack-sdks on OFTC (irc.oftc.net)) `_ diff --git a/bindep.txt b/bindep.txt index 4c90a026fe..8402431aed 100644 --- a/bindep.txt +++ b/bindep.txt @@ -8,3 +8,4 @@ libffi-dev [compile test platform:dpkg] libssl-dev [compile test platform:dpkg] python3-dev [compile test platform:dpkg] python3-devel [compile test platform:rpm] +libpcre2-dev [test platform:dpkg] diff --git a/doc/requirements.txt b/doc/requirements.txt index 93e4f04665..05a9bfa87c 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -4,7 +4,7 @@ sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD # redirect tests in docs -whereto>=0.4.0 # Apache-2.0 +whereto>=0.5.0 # Apache-2.0 # Install these to generate sphinx autodocs aodhclient>=0.9.0 # Apache-2.0 @@ -16,14 +16,11 @@ python-designateclient>=2.7.0 # Apache-2.0 python-heatclient>=1.10.0 # Apache-2.0 python-ironicclient>=2.3.0 # Apache-2.0 python-ironic-inspector-client>=1.5.0 # Apache-2.0 +python-magnumclient>=2.3.0 # Apache-2.0 python-manilaclient>=2.0.0 # Apache-2.0 python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0 -python-muranoclient>=0.8.2 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 python-octaviaclient>=1.11.0 # Apache-2.0 -python-rsdclient>=1.0.1 # Apache-2.0 -python-saharaclient>=1.4.0 # Apache-2.0 -python-senlinclient>=1.1.0 # Apache-2.0 python-troveclient>=3.1.0 # Apache-2.0 python-watcherclient>=2.5.0 # Apache-2.0 python-zaqarclient>=1.0.0 # Apache-2.0 diff --git a/doc/source/cli/_hidden/image.rst b/doc/source/cli/_hidden/image.rst index 85ffde6f39..06919e7ab9 100644 --- a/doc/source/cli/_hidden/image.rst +++ b/doc/source/cli/_hidden/image.rst @@ -3,7 +3,7 @@ image ===== .. NOTE(efried): This page is hidden from the main TOC; it's here so links in - the wild redirect somewhere sane, because previously identity v2 and v3 were + the wild redirect somewhere sane, because previously image v2 and v3 were combined in a single page. .. toctree:: diff --git a/doc/source/cli/authentication.rst b/doc/source/cli/authentication.rst index 2b3ad5dafc..8c09fc3648 100644 --- a/doc/source/cli/authentication.rst +++ b/doc/source/cli/authentication.rst @@ -145,5 +145,233 @@ credentials. This is useful in a Federated environment where one credential give access to many applications/services that the Federation supports. To check how to configure the OpenStackClient to allow Federated users to log in, please check -the -:ref:`Authentication using federation. ` +the :ref:`Authentication using federation. ` + +Examples +-------- + +.. todo: It would be nice to add more examples here, particularly for + complicated things like oauth2 + +``v3password`` +~~~~~~~~~~~~~~ + +Using ``clouds.yaml``: + +.. code-block:: yaml + + clouds: + demo: + auth: + auth_url: http://openstack.dev/identity + project_name: demo + project_domain_name: default + user_domain_name: default + username: demo + password: password + auth_type: v3password + +or, using command line options: + +.. code-block:: bash + + $ openstack \ + --os-auth-url "http://openstack.dev/identity" \ + --os-project-name demo \ + --os-project-domain-name default \ + --os-user-domain-name default \ + --os-auth-type=v3password \ + --os-username demo \ + --os-password password \ + server list + +or, using environment variables: + +.. code-block:: bash + + $ export OS_AUTH_URL="http://openstack.dev/identity" + $ export OS_PROJECT_NAME=demo + $ export OS_PROJECT_DOMAIN_NAME=default + $ export OS_AUTH_TYPE=v3password + $ export OS_USERNAME=demo + $ export OS_PASSWORD=password + $ openstack server list + +.. note:: + + If a password is not provided, you will be prompted for one. + +``v3applicationcredential`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using ``clouds.yaml``: + +.. code-block:: yaml + + clouds: + demo: + auth: + auth_url: http://openstack.dev/identity + application_credential_id: ${APP_CRED_ID} + application_credential_secret: ${APP_CRED_SECRET} + auth_type: v3applicationcredential + +or, using command line options: + +.. code-block:: bash + + $ openstack \ + --os-auth-url "http://openstack.dev/identity" \ + --os-auth-type=v3applicationcredential \ + --os-application-credential-id=${APP_CRED_ID} \ + --os-application-credential-secret=${APP_CRED_SECRET} + server list + +or, using environment variables: + +.. code-block:: bash + + $ export OS_AUTH_URL="http://openstack.dev/identity" + $ export OS_AUTH_TYPE=v3applicationcredential + $ export OS_APPLICATION_CREDENTIAL_ID=${APP_CRED_ID} + $ export OS_APPLICATION_CREDENTIAL_SECRET=${APP_CRED_SECRET} + $ openstack server list + +.. note:: + + You can generate application credentials using the :program:`openstack + application credential create` command: + + .. code-block:: bash + + $ readarray -t lines <<< $(openstack application credential create test -f value -c id -c secret) + $ APP_CRED_ID=${lines[0]} + $ APP_CRED_SECRET=${lines[1]} + +``v3token`` +~~~~~~~~~~~ + +Using ``clouds.yaml``: + +.. code-block:: yaml + + clouds: + demo: + auth: + auth_url: http://openstack.dev/identity + project_name: demo + project_domain_name: default + token: ${TOKEN} + auth_type: v3token + +or, using command line options: + +.. code-block:: bash + + $ openstack \ + --os-auth-url "http://openstack.dev/identity" \ + --os-project-name demo \ + --os-project-domain-name default \ + --os-auth-type=v3token \ + --os-token ${TOKEN} \ + server list + +or, using environment variables: + +.. code-block:: bash + + $ export OS_AUTH_URL="http://openstack.dev/identity" + $ export OS_PROJECT_NAME=demo + $ export OS_PROJECT_DOMAIN_NAME=default + $ export OS_AUTH_TYPE=v3token + $ export OS_TOKEN=${TOKEN} + $ openstack server list + +.. note:: + + You can generate tokens using the :program:`openstack token issue` command: + + .. code-block:: bash + + $ TOKEN=$(openstack token issue -f value -c id) + +.. note:: + + The above examples assume you require a project-scoped token. You can omit + the project-related configuration if your user has a default project ID set. + Conversely, if requesting domain-scoped or system-scoped, you should update + these examples accordingly. If the user does not have a default project + configured and no scoping information is provided, the resulting token will + be unscoped. + +``v3totp`` +~~~~~~~~~~ + +.. note:: + + The TOTP mechanism is poorly suited to command line-driven API + interactions. Where the TOTP mechanism is configured for a cloud, it is + expected that it is to be used for initial authentication and to create a + token or application credential, which can then be used for future + interactions. + +.. note:: + + The TOTP mechanism is often combined with other mechanisms to enable + Multi-Factor Authentication, or MFA. The authentication type + ``v3multifactor`` is used in this case, while the ``v3totp`` authentication + type is specified alongside the other mechanisms in ``auth_methods``. + +Using ``clouds.yaml``: + +.. code-block:: yaml + + clouds: + demo: + auth: + auth_url: http://openstack.dev/identity + project_name: demo + project_domain_name: default + user_domain_name: default + username: demo + passcode: ${PASSCODE} + auth_type: v3totp + +or, using command line options: + +.. code-block:: bash + + $ openstack \ + --os-auth-url "http://openstack.dev/identity" \ + --os-project-name demo \ + --os-project-domain-name default \ + --os-user-domain-name default \ + --os-auth-type=v3totp \ + --os-username demo \ + --os-passcode ${PASSCODE} \ + server list + +or, using environment variables: + +.. code-block:: bash + + $ export OS_AUTH_URL="http://openstack.dev/identity" + $ export OS_PROJECT_NAME=demo + $ export OS_PROJECT_DOMAIN_NAME=default + $ export OS_AUTH_TYPE=v3totp + $ export OS_USERNAME=demo + $ export OS_PASSCODE=${PASSCODE} + $ openstack server list + +.. note:: + + The passcode will be generated by an authenticator application such FreeOTP + or Google Authenticator. Refer to your cloud provider's documentation for + information on how to configure an authenticator application, or to the + `Keystone documentation`__ if you are configuring this for your own cloud. + + .. __: https://docs.openstack.org/keystone/latest/admin/auth-totp.html + +.. note:: + + If a passcode is not provided, you will be prompted for one. diff --git a/doc/source/cli/backwards-incompatible.rst b/doc/source/cli/backwards-incompatible.rst index 9d43754e01..3fbe65fae9 100644 --- a/doc/source/cli/backwards-incompatible.rst +++ b/doc/source/cli/backwards-incompatible.rst @@ -20,119 +20,129 @@ Release 4.0 ----------- 1. Remove ``ip fixed add|remove`` commands. + Use ``server add|remove fixed ip`` commands instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612781 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612781 2. Remove ``ip floating add|remove`` commands. + Use ``server add|remove floating ip`` commands instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612781 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612781 3. Remove ``service create`` option ``--type``. Service type is a positional argument. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612798 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612798 4. Remove ``role list`` options ``--project`` and ``--user``. + Use ``role assignment list`` options ``--project`` and ``--user`` instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612798 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612798 5. Remove ``user role list`` command. + Use ``role assignment list`` options ``--project`` and ``--user`` instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612798 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612798 6. Remove ``image create|set`` option ``--owner``. + Use ``--project`` option instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/659431 + * Removed in: 4.0 + * Commit: https://review.opendev.org/659431 7. Remove ``port create|set`` options ``--device-id`` and ``--host-id``. + Use ``--device`` and ``--host`` instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/613644 + * Removed in: 4.0 + * Commit: https://review.opendev.org/613644 8. Remove ``router set`` option ``--clear-routes``. + Use ``no-route`` option instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/613644 + * Removed in: 4.0 + * Commit: https://review.opendev.org/613644 9. Remove ``security group rule create`` options ``--src-ip`` and ``--src-group``. + Use ``--remote-ip`` and ``--remote-group`` options instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/613644 + * Removed in: 4.0 + * Commit: https://review.opendev.org/613644 10. Remove ``backup`` commands. + Use ``volume backup`` commands instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612751 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612751 11. Remove ``snapshot`` commands. Use ``volume snapshot`` commands instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612751 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612751 12. Remove ``volume create`` options ``--project``, ``--user``, ``--multi-attach``. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612751 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612751 13. Change ``volume transfer request accept`` to use new option ``--auth-key`` rather than a second positional argument. - * Removed in: 4.0 - * Commit: https://review.opendev.org/612751 + * Removed in: 4.0 + * Commit: https://review.opendev.org/612751 14. Remove 'Token/Endpoint' auth plugin support (type ``token_endpoint``). + This remained as a compatibility for the ``admin_token`` auth type to support the ``--url`` global option. That option is also now removed, use ``--endpoint`` instead. - * Removed in: 4.0 - * Commit: https://review.opendev.org/ + * Removed in: 4.0 + * Commit: https://review.opendev.org/ Release 3.12 ------------ 1. Replace ``Display Name`` by ``Name`` in volume list. - Change column name ``Display Name`` to ``Name`` in ``volume list`` output. - Current ``volume list --name`` command uses ``display_name`` as search_opts - to send to cinder API, and show the result table with ``Display Name`` - as column title. Replace all ``Display Name`` by ``Name`` to be consistent - with other list commands. + Change column name ``Display Name`` to ``Name`` in ``volume list`` output. + Current ``volume list --name`` command uses ``display_name`` as search_opts + to send to cinder API, and show the result table with ``Display Name`` + as column title. Replace all ``Display Name`` by ``Name`` to be consistent + with other list commands. - Support a mapping for volume list -c ``Display Name`` (Volume v1 and v2) - and volume create/show -c ``display_name`` (Volume v1) to maintain backward - compatibility until the next major release. + Support a mapping for volume list -c ``Display Name`` (Volume v1 and v2) + and volume create/show -c ``display_name`` (Volume v1) to maintain backward + compatibility until the next major release. - * In favor of: ``openstack volume list -c Name`` - * As of: 3.12.0 - * Removed in: n/a - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1657956 - * Commit: https://review.opendev.org/#/c/423081/ + * In favor of: ``openstack volume list -c Name`` + * As of: 3.12.0 + * Removed in: n/a + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1657956 + * Commit: https://review.opendev.org/#/c/423081/ Release 3.10 ------------ 1. The ``network create`` command now requires the ``--subnet`` option when used - with Nova-network clouds. + with nova-network clouds. - * As of: 3.10 - * Commit: https://review.opendev.org/460679 + * As of: 3.10 + * Commit: https://review.opendev.org/460679 2. The positional argument ```` of the ``volume snapshot create`` command is no longer optional. @@ -142,23 +152,23 @@ Release 3.10 ``--volume`` option is not present now it defaults to the value of ````. - * As of: 3.10 - * Bug: 1659894 - * Commit: https://review.opendev.org/440497 + * As of: 3.10 + * Bug: 1659894 + * Commit: https://review.opendev.org/440497 Release 3.0 ----------- 1. Remove the ``osc_password`` authentication plugin. - This was the 'last-resort' plugin default that worked around an old default - Keystone configuration for the ``admin_endpoint`` and ``public_endpoint``. + This was the 'last-resort' plugin default that worked around an old default + Keystone configuration for the ``admin_endpoint`` and ``public_endpoint``. - * In favor of: ``password`` - * As of: 3.0 - * Removed in: n/a - * Bug: n/a - * Commit: https://review.opendev.org/332938 + * In favor of: ``password`` + * As of: 3.0 + * Removed in: n/a + * Bug: n/a + * Commit: https://review.opendev.org/332938 Releases Before 3.0 @@ -166,209 +176,209 @@ Releases Before 3.0 1. Rename command `openstack project usage list` - The `project` part of the command was pointless. + The `project` part of the command was pointless. - * In favor of: `openstack usage list` instead. - * As of: 1.0.2 - * Removed in: TBD - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1406654 - * Commit: https://review.opendev.org/#/c/147379/ + * In favor of: `openstack usage list` instead. + * As of: 1.0.2 + * Removed in: TBD + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1406654 + * Commit: https://review.opendev.org/#/c/147379/ 2. should not be optional for command `openstack service create` - Previously, the command was `openstack service create --type `, - whereas now it is: `openstack service create --name `. - This bug also affected python-keystoneclient, and keystone. + Previously, the command was `openstack service create --type `, + whereas now it is: `openstack service create --name `. + This bug also affected python-keystoneclient, and keystone. - * In favor of: making a positional argument. - * As of: 1.0.2 - * Removed in: TBD - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1404073 - * Commit: https://review.opendev.org/#/c/143242/ + * In favor of: making a positional argument. + * As of: 1.0.2 + * Removed in: TBD + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1404073 + * Commit: https://review.opendev.org/#/c/143242/ 3. Command `openstack security group rule delete` now requires rule id - Previously, the command was `openstack security group rule delete --proto - [--src-ip --dst-port ] `, - whereas now it is: `openstack security group rule delete `. + Previously, the command was `openstack security group rule delete --proto + [--src-ip --dst-port ] `, + whereas now it is: `openstack security group rule delete `. - * In favor of: Using `openstack security group rule delete `. - * As of: 1.2.1 - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1450872 - * Commit: https://review.opendev.org/#/c/179446/ + * In favor of: Using `openstack security group rule delete `. + * As of: 1.2.1 + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1450872 + * Commit: https://review.opendev.org/#/c/179446/ 4. Command `openstack image create` does not update already existing image - Previously, the image create command updated already existing image if it had - same name. It disabled possibility to create multiple images with same name - and lead to potentially unwanted update of existing images by image create - command. - Now, update code was moved from create action to set action. + Previously, the image create command updated already existing image if it had + same name. It disabled possibility to create multiple images with same name + and lead to potentially unwanted update of existing images by image create + command. + Now, update code was moved from create action to set action. - * In favor of: Create multiple images with same name (as glance does). - * As of: 1.5.0 - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1461817 - * Commit: https://review.opendev.org/#/c/194654/ + * In favor of: Create multiple images with same name (as glance does). + * As of: 1.5.0 + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1461817 + * Commit: https://review.opendev.org/#/c/194654/ 5. Command `openstack network list --dhcp` has been removed - The --dhcp option to network list is not a logical use case of listing - networks, it lists agents. Another command should be added in the future - to provide this functionality. It is highly unlikely anyone uses this - feature as we don't support any other agent commands. Use neutron - dhcp-agent-list-hosting-net command instead. + The --dhcp option to network list is not a logical use case of listing + networks, it lists agents. Another command should be added in the future + to provide this functionality. It is highly unlikely anyone uses this + feature as we don't support any other agent commands. Use neutron + dhcp-agent-list-hosting-net command instead. - * In favor of: Create network agent list command in the future - * As of: 1.6.0 - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/472613 - * Commit: https://review.opendev.org/#/c/194654/ + * In favor of: Create network agent list command in the future + * As of: 1.6.0 + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/472613 + * Commit: https://review.opendev.org/#/c/194654/ 6. Plugin interface change for default API versions - Previously, the default version was set in the parsed arguments, - but this makes it impossible to tell what has been passed in at the - command line, set in an environment variable or is just the default. - Now, the module should have a DEFAULT_API_VERSION that contains the - value and it will be set after command line argument, environment - and OCC file processing. + Previously, the default version was set in the parsed arguments, + but this makes it impossible to tell what has been passed in at the + command line, set in an environment variable or is just the default. + Now, the module should have a DEFAULT_API_VERSION that contains the + value and it will be set after command line argument, environment + and OCC file processing. - * In favor of: DEFAULT_API_VERSION - * As of: 1.2.1 - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1453229 - * Commit: https://review.opendev.org/#/c/181514/ + * In favor of: DEFAULT_API_VERSION + * As of: 1.2.1 + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1453229 + * Commit: https://review.opendev.org/#/c/181514/ 7. `image set` commands will no longer return the modified resource - Previously, modifying an image would result in the new image being displayed - to the user. To keep things consistent with other `set` commands, we will - no longer be showing the modified resource. + Previously, modifying an image would result in the new image being displayed + to the user. To keep things consistent with other `set` commands, we will + no longer be showing the modified resource. - * In favor of: Use `set` then `show` - * As of: NA - * Removed in: NA - * Bug: NA - * Commit: NA + * In favor of: Use `set` then `show` + * As of: NA + * Removed in: NA + * Bug: NA + * Commit: NA 8. `region` commands no longer support `url` - The Keystone team removed support for the `url` attribute from the client - and server side. Changes to the `create`, `set` and `list` commands for - regions have been affected. + The Keystone team removed support for the `url` attribute from the client + and server side. Changes to the `create`, `set` and `list` commands for + regions have been affected. - * In favor of: NA - * As of 1.9.0 - * Removed in: NA - * Bug: https://launchpad.net/bugs/1506841 - * Commit: https://review.opendev.org/#/c/236736/ + * In favor of: NA + * As of 1.9.0 + * Removed in: NA + * Bug: https://launchpad.net/bugs/1506841 + * Commit: https://review.opendev.org/#/c/236736/ 9. `flavor set/unset` commands will no longer return the modified resource - Previously, modifying a flavor would result in the new flavor being displayed - to the user. To keep things consistent with other `set/unset` commands, we - will no longer be showing the modified resource. + Previously, modifying a flavor would result in the new flavor being displayed + to the user. To keep things consistent with other `set/unset` commands, we + will no longer be showing the modified resource. - * In favor of: Use `set/unset` then `show` - * As of: NA - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 - * Commit: https://review.opendev.org/#/c/280663/ + * In favor of: Use `set/unset` then `show` + * As of: NA + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 + * Commit: https://review.opendev.org/#/c/280663/ 10. `security group set` commands will no longer return the modified resource - Previously, modifying a security group would result in the new security group - being displayed to the user. To keep things consistent with other `set` - commands, we will no longer be showing the modified resource. + Previously, modifying a security group would result in the new security group + being displayed to the user. To keep things consistent with other `set` + commands, we will no longer be showing the modified resource. - * In favor of: Use `set` then `show` - * As of: NA - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 - * Commit: https://review.opendev.org/#/c/281087/ + * In favor of: Use `set` then `show` + * As of: NA + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 + * Commit: https://review.opendev.org/#/c/281087/ 11. `compute agent set` commands will no longer return the modified resource - Previously, modifying an agent would result in the new agent being displayed - to the user. To keep things consistent with other `set` commands, we will - no longer be showing the modified resource. + Previously, modifying an agent would result in the new agent being displayed + to the user. To keep things consistent with other `set` commands, we will + no longer be showing the modified resource. - * In favor of: Use `set` then `show` - * As of: NA - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 - * Commit: https://review.opendev.org/#/c/281088/ + * In favor of: Use `set` then `show` + * As of: NA + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 + * Commit: https://review.opendev.org/#/c/281088/ 12. ` ` should be optional for command `openstack compute agent set` - Previously, the command was `openstack compute agent set - `, whereas now it is: `openstack compute agent set --version - --url --md5hash `. + Previously, the command was `openstack compute agent set + `, whereas now it is: `openstack compute agent set --version + --url --md5hash `. - * In favor of: making optional. - * As of: NA - * Removed in: NA - * Bug: NA - * Commit: https://review.opendev.org/#/c/328819/ + * In favor of: making optional. + * As of: NA + * Removed in: NA + * Bug: NA + * Commit: https://review.opendev.org/#/c/328819/ 13. `aggregate set` commands will no longer return the modified resource - Previously, modifying an aggregate would result in the new aggregate being - displayed to the user. To keep things consistent with other `set` commands, - we will no longer be showing the modified resource. + Previously, modifying an aggregate would result in the new aggregate being + displayed to the user. To keep things consistent with other `set` commands, + we will no longer be showing the modified resource. - * In favor of: Use `set` then `show` - * As of: NA - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 - * Commit: https://review.opendev.org/#/c/281089/ + * In favor of: Use `set` then `show` + * As of: NA + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1546065 + * Commit: https://review.opendev.org/#/c/281089/ 14. Output of `ip floating list` command has changed. - When using Compute v2, the original output is: + When using Compute v2, the original output is: - .. code-block:: bash + .. code-block:: bash - # ip floating list + # ip floating list - +----+--------+------------+----------+-------------+ - | ID | Pool | IP | Fixed IP | Instance ID | - +----+--------+-----------------------+-------------+ - | 1 | public | 172.24.4.1 | None | None | - +----+--------+------------+----------+-------------+ + +----+--------+------------+----------+-------------+ + | ID | Pool | IP | Fixed IP | Instance ID | + +----+--------+-----------------------+-------------+ + | 1 | public | 172.24.4.1 | None | None | + +----+--------+------------+----------+-------------+ - Now it changes to: + Now it changes to: - .. code-block:: bash + .. code-block:: bash - # ip floating list + # ip floating list - +----+---------------------+------------------+-----------+--------+ - | ID | Floating IP Address | Fixed IP Address | Server ID | Pool | - +----+---------------------+------------------+-----------+--------+ - | 1 | 172.24.4.1 | None | None | public | - +----+---------------------+------------------+-----------+--------+ + +----+---------------------+------------------+-----------+--------+ + | ID | Floating IP Address | Fixed IP Address | Server ID | Pool | + +----+---------------------+------------------+-----------+--------+ + | 1 | 172.24.4.1 | None | None | public | + +----+---------------------+------------------+-----------+--------+ - When using Network v2, which is different from Compute v2. The output is: + When using Network v2, which is different from Compute v2. The output is: - .. code-block:: bash + .. code-block:: bash - # ip floating list + # ip floating list - +--------------------------------------+---------------------+------------------+------+ - | ID | Floating IP Address | Fixed IP Address | Port | - +--------------------------------------+---------------------+------------------+------+ - | 1976df86-e66a-4f96-81bd-c6ffee6407f1 | 172.24.4.3 | None | None | - +--------------------------------------+---------------------+------------------+------+ + +--------------------------------------+---------------------+------------------+------+ + | ID | Floating IP Address | Fixed IP Address | Port | + +--------------------------------------+---------------------+------------------+------+ + | 1976df86-e66a-4f96-81bd-c6ffee6407f1 | 172.24.4.3 | None | None | + +--------------------------------------+---------------------+------------------+------+ - * In favor of: Use `ip floating list` command - * As of: NA - * Removed in: NA - * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1519502 - * Commit: https://review.opendev.org/#/c/277720/ + * In favor of: Use `ip floating list` command + * As of: NA + * Removed in: NA + * Bug: https://bugs.launchpad.net/python-openstackclient/+bug/1519502 + * Commit: https://review.opendev.org/#/c/277720/ For Developers ============== diff --git a/doc/source/cli/command-objects/access-rules.rst b/doc/source/cli/command-objects/access-rules.rst index bc8458283f..6e811fc7df 100644 --- a/doc/source/cli/command-objects/access-rules.rst +++ b/doc/source/cli/command-objects/access-rules.rst @@ -9,53 +9,11 @@ rule comprises of a service type, a request path, and a request method. Access rules may only be created as attributes of application credentials, but they may be viewed and deleted independently. +.. autoprogram-cliff:: openstack.identity.v3 + :command: access rule delete -access rule delete ------------------- +.. autoprogram-cliff:: openstack.identity.v3 + :command: access rule list -Delete access rule(s) - -.. program:: access rule delete -.. code:: bash - - openstack access rule delete [ ...] - -.. describe:: - - Access rule(s) to delete (ID) - -access rule list ----------------- - -List access rules - -.. program:: access rule list -.. code:: bash - - openstack access rule list - [--user ] - [--user-domain ] - -.. option:: --user - - User whose access rules to list (name or ID). If not provided, looks up the - current user's access rules. - -.. option:: --user-domain - - Domain the user belongs to (name or ID). This can be - used in case collisions between user names exist. - -access rule show ---------------------------- - -Display access rule details - -.. program:: access rule show -.. code:: bash - - openstack access rule show - -.. describe:: - - Access rule to display (ID) +.. autoprogram-cliff:: openstack.identity.v3 + :command: access rule show diff --git a/doc/source/cli/command-objects/consistency-group-snapshot.rst b/doc/source/cli/command-objects/consistency-group-snapshot.rst index 29d5065663..51241685fd 100644 --- a/doc/source/cli/command-objects/consistency-group-snapshot.rst +++ b/doc/source/cli/command-objects/consistency-group-snapshot.rst @@ -2,95 +2,16 @@ consistency group snapshot ========================== -Block Storage v2 +Block Storage v2, v3 -consistency group snapshot create ---------------------------------- +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group snapshot create -Create new consistency group snapshot. +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group snapshot delete -.. program:: consistency group snapshot create -.. code:: bash +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group snapshot list - openstack consistency group snapshot create - [--consistency-group ] - [--description ] - [] - -.. option:: --consistency-group - - Consistency group to snapshot (name or ID) - (default to be the same as ) - -.. option:: --description - - Description of this consistency group snapshot - -.. _consistency_group_snapshot_create-snapshot-name: -.. describe:: - - Name of new consistency group snapshot (default to None) - -consistency group snapshot delete ---------------------------------- - -Delete consistency group snapshot(s) - -.. program:: consistency group snapshot delete -.. code:: bash - - openstack consistency group snapshot delete - [ ...] - -.. _consistency_group_snapshot_delete-consistency-group-snapshot: -.. describe:: - - Consistency group snapshot(s) to delete (name or ID) - -consistency group snapshot list -------------------------------- - -List consistency group snapshots. - -.. program:: consistency group snapshot list -.. code:: bash - - openstack consistency group snapshot list - [--all-projects] - [--long] - [--status ] - [--consistency-group ] - -.. option:: --all-projects - - Show detail for all projects. Admin only. - (defaults to False) - -.. option:: --long - - List additional fields in output - -.. option:: --status - - Filters results by a status - ("available", "error", "creating", "deleting" or "error_deleting") - -.. option:: --consistency-group - - Filters results by a consistency group (name or ID) - -consistency group snapshot show -------------------------------- - -Display consistency group snapshot details. - -.. program:: consistency group snapshot show -.. code:: bash - - openstack consistency group snapshot show - - -.. _consistency_group_snapshot_show-consistency-group-snapshot: -.. describe:: - - Consistency group snapshot to display (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group snapshot show diff --git a/doc/source/cli/command-objects/consistency-group.rst b/doc/source/cli/command-objects/consistency-group.rst index 57082c6df8..9ff207ff79 100644 --- a/doc/source/cli/command-objects/consistency-group.rst +++ b/doc/source/cli/command-objects/consistency-group.rst @@ -2,172 +2,25 @@ consistency group ================= -Block Storage v2 +Block Storage v2, v3 -consistency group add volume ----------------------------- +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group add volume -Add volume(s) to consistency group. +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group create -.. program:: consistency group add volume -.. code:: bash +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group delete - openstack consistency group add volume - - [ ...] +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group list -.. _consistency_group_add_volume: -.. describe:: +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group remove volume - Consistency group to contain (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group set -.. describe:: - - Volume(s) to add to (name or ID) - (repeat option to add multiple volumes) - -consistency group create ------------------------- - -Create new consistency group. - -.. program:: consistency group create -.. code:: bash - - openstack consistency group create - --volume-type | --consistency-group-source | --consistency-group-snapshot - [--description ] - [--availability-zone ] - [] - -.. option:: --volume-type - - Volume type of this consistency group (name or ID) - -.. option:: --consistency-group-source - - Existing consistency group (name or ID) - -.. option:: --consistency-group-snapshot - - Existing consistency group snapshot (name or ID) - -.. option:: --description - - Description of this consistency group - -.. option:: --availability-zone - - Availability zone for this consistency group - (not available if creating consistency group from source) - -.. _consistency_group_create-name: -.. describe:: - - Name of new consistency group (default to None) - -consistency group delete ------------------------- - -Delete consistency group(s). - -.. program:: consistency group delete -.. code:: bash - - openstack consistency group delete - [--force] - [ ...] - -.. option:: --force - - Allow delete in state other than error or available - -.. _consistency_group_delete-consistency-group: -.. describe:: - - Consistency group(s) to delete (name or ID) - -consistency group list ----------------------- - -List consistency groups. - -.. program:: consistency group list -.. code:: bash - - openstack consistency group list - [--all-projects] - [--long] - -.. option:: --all-projects - - Show detail for all projects. Admin only. - (defaults to False) - -.. option:: --long - - List additional fields in output - -consistency group remove volume -------------------------------- - -Remove volume(s) from consistency group. - -.. program:: consistency group remove volume -.. code:: bash - - openstack consistency group remove volume - - [ ...] - -.. _consistency_group_remove_volume: -.. describe:: - - Consistency group containing (name or ID) - -.. describe:: - - Volume(s) to remove from (name or ID) - (repeat option to remove multiple volumes) - -consistency group set ---------------------- - -Set consistency group properties. - -.. program:: consistency group set -.. code:: bash - - openstack consistency group set - [--name ] - [--description ] - - -.. option:: --name - - New consistency group name - -.. option:: --description - - New consistency group description - -.. _consistency_group_set-consistency-group: -.. describe:: - - Consistency group to modify (name or ID) - -consistency group show ----------------------- - -Display consistency group details. - -.. program:: consistency group show -.. code:: bash - - openstack consistency group show - - -.. _consistency_group_show-consistency-group: -.. describe:: - - Consistency group to display (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: consistency group show diff --git a/doc/source/cli/command-objects/console-connection.rst b/doc/source/cli/command-objects/console-connection.rst new file mode 100644 index 0000000000..c3358050fb --- /dev/null +++ b/doc/source/cli/command-objects/console-connection.rst @@ -0,0 +1,10 @@ +================== +console connection +================== + +Server console connection information + +Compute v2 + +.. autoprogram-cliff:: openstack.compute.v2 + :command: console connection show diff --git a/doc/source/cli/command-objects/default-security-group-rule.rst b/doc/source/cli/command-objects/default-security-group-rule.rst new file mode 100644 index 0000000000..1905614bfc --- /dev/null +++ b/doc/source/cli/command-objects/default-security-group-rule.rst @@ -0,0 +1,11 @@ +=========================== +default security group rule +=========================== + +A **default security group rule** specifies the template of the security group +rules which will be used by neutron to create rules in every new security group. + +Network v2 + +.. autoprogram-cliff:: openstack.network.v2 + :command: default security group rule * diff --git a/doc/source/cli/command-objects/image-member.rst b/doc/source/cli/command-objects/image-member.rst new file mode 100644 index 0000000000..4b48991cb3 --- /dev/null +++ b/doc/source/cli/command-objects/image-member.rst @@ -0,0 +1,14 @@ +============ +image member +============ + +Image v2 + +.. autoprogram-cliff:: openstack.image.v2 + :command: image add project + +.. autoprogram-cliff:: openstack.image.v2 + :command: image remove project + +.. autoprogram-cliff:: openstack.image.v2 + :command: image member list diff --git a/doc/source/cli/command-objects/image-metadef.rst b/doc/source/cli/command-objects/image-metadef.rst new file mode 100644 index 0000000000..ed36d95f75 --- /dev/null +++ b/doc/source/cli/command-objects/image-metadef.rst @@ -0,0 +1,50 @@ +============= +image metadef +============= + +Image v2 + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef namespace create + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef namespace delete + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef namespace list + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef namespace set + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef namespace show + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef resource type list + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef object create + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef object show + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef object list + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef object delete + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef object update + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef object property show + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef property create + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef property list + +.. autoprogram-cliff:: openstack.image.v2 + :command: image metadef property show diff --git a/doc/source/cli/command-objects/image-task.rst b/doc/source/cli/command-objects/image-task.rst new file mode 100644 index 0000000000..0021192042 --- /dev/null +++ b/doc/source/cli/command-objects/image-task.rst @@ -0,0 +1,11 @@ +========== +image task +========== + +Image v2 + +.. autoprogram-cliff:: openstack.image.v2 + :command: image task list + +.. autoprogram-cliff:: openstack.image.v2 + :command: image task show diff --git a/doc/source/cli/command-objects/image-v2.rst b/doc/source/cli/command-objects/image-v2.rst index 473b26d07b..4a2a4d0f90 100644 --- a/doc/source/cli/command-objects/image-v2.rst +++ b/doc/source/cli/command-objects/image-v2.rst @@ -1,6 +1,32 @@ -======== -image v2 -======== +===== +image +===== + +Image v2 + +.. autoprogram-cliff:: openstack.image.v2 + :command: image create + +.. autoprogram-cliff:: openstack.image.v2 + :command: image delete + +.. autoprogram-cliff:: openstack.image.v2 + :command: image list + +.. autoprogram-cliff:: openstack.image.v2 + :command: image save + +.. autoprogram-cliff:: openstack.image.v2 + :command: image set + +.. autoprogram-cliff:: openstack.image.v2 + :command: image unset + +.. autoprogram-cliff:: openstack.image.v2 + :command: image show + +.. autoprogram-cliff:: openstack.image.v2 + :command: image stage .. autoprogram-cliff:: openstack.image.v2 - :command: image * + :command: image import diff --git a/doc/source/cli/command-objects/limits.rst b/doc/source/cli/command-objects/limits.rst index 3a0f99b376..11d53802c6 100644 --- a/doc/source/cli/command-objects/limits.rst +++ b/doc/source/cli/command-objects/limits.rst @@ -4,7 +4,7 @@ limits The Compute and Block Storage APIs have resource usage limits. -Compute v2, Block Storage v1 +Block Storage v2, v3; Compute v2 .. autoprogram-cliff:: openstack.common diff --git a/doc/source/cli/command-objects/project-purge.rst b/doc/source/cli/command-objects/project-purge.rst deleted file mode 100644 index 8f10a77452..0000000000 --- a/doc/source/cli/command-objects/project-purge.rst +++ /dev/null @@ -1,11 +0,0 @@ -============= -project purge -============= - -Clean resources associated with a specific project. - -Block Storage v1, v2; Compute v2; Image v1, v2 - - -.. autoprogram-cliff:: openstack.common - :command: project purge diff --git a/doc/source/cli/command-objects/quota.rst b/doc/source/cli/command-objects/quota.rst index cab1265240..59a8a9bb4e 100644 --- a/doc/source/cli/command-objects/quota.rst +++ b/doc/source/cli/command-objects/quota.rst @@ -5,7 +5,7 @@ quota Resource quotas appear in multiple APIs, OpenStackClient presents them as a single object with multiple properties. -Block Storage v1, v2, Compute v2, Network v2 +Block Storage v1, v3; Compute v2; Network v2 .. autoprogram-cliff:: openstack.common :command: quota * diff --git a/doc/source/cli/command-objects/role-assignment.rst b/doc/source/cli/command-objects/role-assignment.rst index b29f32c690..aa618d4dd3 100644 --- a/doc/source/cli/command-objects/role-assignment.rst +++ b/doc/source/cli/command-objects/role-assignment.rst @@ -4,103 +4,5 @@ role assignment Identity v2, v3 -role assignment list --------------------- - -List role assignments - -.. program:: role assignment list -.. code:: bash - - openstack role assignment list - [--role ] - [--role-domain ] - [--user ] - [--user-domain ] - [--group ] - [--group-domain ] - [--domain ] - [--project ] - [--project-domain ] - [--effective] - [--inherited] - [--names] - -.. option:: --role - - Role to filter (name or ID) - - .. versionadded:: 3 - -.. option:: --role-domain - - Domain the role belongs to (name or ID). - This can be used in case collisions between role names exist. - - .. versionadded:: 3 - -.. option:: --user - - User to filter (name or ID) - -.. option:: --user-domain - - Domain the user belongs to (name or ID). - This can be used in case collisions between user names exist. - - .. versionadded:: 3 - -.. option:: --group - - Group to filter (name or ID) - - .. versionadded:: 3 - -.. option:: --group-domain - - Domain the group belongs to (name or ID). - This can be used in case collisions between group names exist. - - .. versionadded:: 3 - -.. option:: --domain - - Domain to filter (name or ID) - - .. versionadded:: 3 - -.. option:: --project - - Project to filter (name or ID) - -.. option:: --project-domain - - Domain the project belongs to (name or ID). - This can be used in case collisions between project names exist. - - .. versionadded:: 3 - -.. option:: --effective - - Returns only effective role assignments (defaults to False) - - .. versionadded:: 3 - -.. option:: --inherited - - Specifies if the role grant is inheritable to the sub projects - - .. versionadded:: 3 - -.. option:: --names - - Returns role assignments with names instead of IDs - -.. option:: --auth-user - - Returns role assignments for the authenticated user. - -.. option:: --auth-project - - Returns role assignments for the project to which the authenticated user - is scoped. +.. autoprogram-cliff:: openstack.identity.v3 + :command: role assignment list diff --git a/doc/source/cli/command-objects/volume-backend.rst b/doc/source/cli/command-objects/volume-backend.rst index 4766ecabb2..f47efd1976 100644 --- a/doc/source/cli/command-objects/volume-backend.rst +++ b/doc/source/cli/command-objects/volume-backend.rst @@ -2,7 +2,7 @@ volume backend ============== -Block Storage v2 +Block Storage v2, v3 -.. autoprogram-cliff:: openstack.volume.v2 +.. autoprogram-cliff:: openstack.volume.v3 :command: volume backend * diff --git a/doc/source/cli/command-objects/volume-backup.rst b/doc/source/cli/command-objects/volume-backup.rst index 1c26921197..7b036ca0ce 100644 --- a/doc/source/cli/command-objects/volume-backup.rst +++ b/doc/source/cli/command-objects/volume-backup.rst @@ -2,8 +2,8 @@ volume backup ============= -Block Storage v1, v2 +Block Storage v2, v3 -.. autoprogram-cliff:: openstack.volume.v2 +.. autoprogram-cliff:: openstack.volume.v3 :command: volume backup * diff --git a/doc/source/cli/command-objects/volume-host.rst b/doc/source/cli/command-objects/volume-host.rst index 350d6dec7c..acd0866356 100644 --- a/doc/source/cli/command-objects/volume-host.rst +++ b/doc/source/cli/command-objects/volume-host.rst @@ -2,51 +2,7 @@ volume host =========== -Block Storage v2 +Block Storage v2, v3 -volume host failover --------------------- - -Failover volume host to different backend - -.. program:: volume host failover -.. code:: bash - - openstack volume host failover - --volume-backend - - -.. option:: --volume-backend - - The ID of the volume backend replication - target where the host will failover to (required) - -.. _volume_host_failover-host-name: -.. describe:: - - Name of volume host - -volume host set ---------------- - -Set volume host properties - -.. program:: volume host set -.. code:: bash - - openstack volume host set - [--enable | --disable] - - -.. option:: --enable - - Thaw and enable the specified volume host. - -.. option:: --disable - - Freeze and disable the specified volume host - -.. _volume_host_set-host-name: -.. describe:: - - Name of volume host +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume host * diff --git a/doc/source/cli/command-objects/volume-qos.rst b/doc/source/cli/command-objects/volume-qos.rst index 8fdbc12284..82c4d540c7 100644 --- a/doc/source/cli/command-objects/volume-qos.rst +++ b/doc/source/cli/command-objects/volume-qos.rst @@ -2,165 +2,7 @@ volume qos ========== -Block Storage v1, v2 +Block Storage v2, v3 -volume qos associate --------------------- - -Associate a QoS specification to a volume type - -.. program:: volume qos associate -.. code:: bash - - openstack volume qos associate - - - -.. _volume_qos_associate: -.. describe:: - - QoS specification to modify (name or ID) - -.. describe:: - - Volume type to associate the QoS (name or ID) - -volume qos create ------------------ - -Create new QoS Specification - -.. program:: volume qos create -.. code:: bash - - openstack volume qos create - [--consumer ] - [--property [...] ] - - -.. option:: --consumer - - Consumer of the QoS. Valid consumers: 'front-end', 'back-end', 'both' (defaults to 'both') - -.. option:: --property - - Set a property on this QoS specification (repeat option to set multiple properties) - -.. _volume_qos_create-name: -.. describe:: - - New QoS specification name - -volume qos delete ------------------ - -Delete QoS specification - -.. program:: volume qos delete -.. code:: bash - - openstack volume qos delete - [--force] - [ ...] - -.. option:: --force - - Allow to delete in-use QoS specification(s) - -.. _volume_qos_delete-qos-spec: -.. describe:: - - QoS specification(s) to delete (name or ID) - -volume qos disassociate ------------------------ - -Disassociate a QoS specification from a volume type - -.. program:: volume qos disassociate -.. code:: bash - - openstack volume qos disassociate - --volume-type | --all - - -.. option:: --volume-type - - Volume type to disassociate the QoS from (name or ID) - -.. option:: --all - - Disassociate the QoS from every volume type - -.. _volume_qos_disassociate-qos-spec: -.. describe:: - - QoS specification to modify (name or ID) - -volume qos list ---------------- - -List QoS specifications - -.. program:: volume qos list -.. code:: bash - - openstack volume qos list - -volume qos set --------------- - -Set QoS specification properties - -.. program:: volume qos set -.. code:: bash - - openstack volume qos set - [--property [...] ] - - -.. option:: --property - - Property to add or modify for this QoS specification (repeat option to set multiple properties) - -.. _volume_qos_set-qos-spec: -.. describe:: - - QoS specification to modify (name or ID) - -volume qos show ---------------- - -Display QoS specification details - -.. program:: volume qos show -.. code:: bash - - openstack volume qos show - - -.. _volume_qos_show-qos-spec: -.. describe:: - - QoS specification to display (name or ID) - -volume qos unset ----------------- - -Unset QoS specification properties - -.. program:: volume qos unset -.. code:: bash - - openstack volume qos unset - [--property [...] ] - - -.. option:: --property - - Property to remove from QoS specification (repeat option to remove multiple properties) - -.. _volume_qos_unset-qos-spec: -.. describe:: - - QoS specification to modify (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume qos * diff --git a/doc/source/cli/command-objects/volume-service.rst b/doc/source/cli/command-objects/volume-service.rst index 0499fb9062..43d455ff5a 100644 --- a/doc/source/cli/command-objects/volume-service.rst +++ b/doc/source/cli/command-objects/volume-service.rst @@ -2,65 +2,7 @@ volume service ============== -Block Storage v1, v2 +Block Storage v2, v3 -volume service list -------------------- - -List volume service - -.. program:: volume service list -.. code:: bash - - openstack volume service list - [--host ] - [--service ] - [--long] - -.. option:: --host - - List services on specified host (name only) - -.. option:: --service - - List only specified service (name only) - -.. option:: --long - - List additional fields in output - -volume service set ------------------- - -Set volume service properties - -.. program:: volume service set -.. code:: bash - - openstack volume service set - [--enable | --disable] - [--disable-reason ] - - - -.. option:: --enable - - Enable volume service - -.. option:: --disable - - Disable volume service - -.. option:: --disable-reason - - Reason for disabling the service - (should be used with :option:`--disable` option) - -.. _volume_service_set-host: -.. describe:: - - Name of host - -.. describe:: - - Name of service (Binary name) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume service * diff --git a/doc/source/cli/command-objects/volume-snapshot.rst b/doc/source/cli/command-objects/volume-snapshot.rst index 21a8937018..e63e436dff 100644 --- a/doc/source/cli/command-objects/volume-snapshot.rst +++ b/doc/source/cli/command-objects/volume-snapshot.rst @@ -2,222 +2,7 @@ volume snapshot =============== -Block Storage v1, v2 +Block Storage v2, v3 -volume snapshot create ----------------------- - -Create new volume snapshot - -.. program:: volume snapshot create -.. code:: bash - - openstack volume snapshot create - [--volume ] - [--description ] - [--force] - [--property [...] ] - [--remote-source [...]] - - -.. option:: --volume - - Volume to snapshot (name or ID) (default is ) - -.. option:: --description - - Description of the snapshot - -.. option:: --force - - Create a snapshot attached to an instance. Default is False - -.. option:: --property - - Set a property to this snapshot (repeat option to set multiple properties) - - *Volume version 2 only* - -.. option:: --remote-source - - The attribute(s) of the exsiting remote volume snapshot - (admin required) (repeat option to specify multiple attributes) - e.g.: '--remote-source source-name=test_name --remote-source source-id=test_id' - - *Volume version 2 only* - -.. _volume_snapshot_create-snapshot-name: -.. describe:: - - Name of the new snapshot - -volume snapshot delete ----------------------- - -Delete volume snapshot(s) - -.. program:: volume snapshot delete -.. code:: bash - - openstack volume snapshot delete - [--force] - [ ...] - -.. option:: --force - - Attempt forced removal of snapshot(s), regardless of state (defaults to False) - -.. _volume_snapshot_delete-snapshot: -.. describe:: - - Snapshot(s) to delete (name or ID) - -volume snapshot list --------------------- - -List volume snapshots - -.. program:: volume snapshot list -.. code:: bash - - openstack volume snapshot list - [--all-projects] - [--project [--project-domain ]] - [--long] - [--limit ] - [--marker ] - [--name ] - [--status ] - [--volume ] - -.. option:: --all-projects - - Include all projects (admin only) - -.. option:: --project - - Filter results by project (name or ID) (admin only) - - *Volume version 2 only* - -.. option:: --project-domain - - Domain the project belongs to (name or ID). - - This can be used in case collisions between project names exist. - - *Volume version 2 only* - -.. option:: --long - - List additional fields in output - -.. option:: --status - - Filters results by a status. - ('available', 'error', 'creating', 'deleting' or 'error_deleting') - -.. option:: --name - - Filters results by a name. - -.. option:: --volume - - Filters results by a volume (name or ID). - -.. option:: --limit - - Maximum number of snapshots to display - - *Volume version 2 only* - -.. option:: --marker - - The last snapshot ID of the previous page - - *Volume version 2 only* - -volume snapshot set -------------------- - -Set volume snapshot properties - -.. program:: volume snapshot set -.. code:: bash - - openstack volume snapshot set - [--name ] - [--description ] - [--no-property] - [--property [...] ] - [--state ] - - -.. option:: --name - - New snapshot name - -.. option:: --description - - New snapshot description - -.. option:: --no-property - - Remove all properties from :ref:`\ ` - (specify both :option:`--no-property` and :option:`--property` to - remove the current properties before setting new properties.) - -.. option:: --property - - Property to add or modify for this snapshot (repeat option to set multiple properties) - -.. option:: --state - - New snapshot state. - ("available", "error", "creating", "deleting", or "error_deleting") (admin only) - (This option simply changes the state of the snapshot in the database with - no regard to actual status, exercise caution when using) - - *Volume version 2 only* - -.. _volume_snapshot_set-snapshot: -.. describe:: - - Snapshot to modify (name or ID) - -volume snapshot show --------------------- - -Display volume snapshot details - -.. program:: volume snapshot show -.. code:: bash - - openstack volume snapshot show - - -.. _volume_snapshot_show-snapshot: -.. describe:: - - Snapshot to display (name or ID) - -volume snapshot unset ---------------------- - -Unset volume snapshot properties - -.. program:: volume snapshot unset -.. code:: bash - - openstack volume snapshot unset - [--property ] - - -.. option:: --property - - Property to remove from snapshot (repeat option to remove multiple properties) - -.. _volume_snapshot_unset-snapshot: -.. describe:: - - Snapshot to modify (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume snapshot * diff --git a/doc/source/cli/command-objects/volume-transfer-request.rst b/doc/source/cli/command-objects/volume-transfer-request.rst index 23cd3d3e1b..97dac02a0a 100644 --- a/doc/source/cli/command-objects/volume-transfer-request.rst +++ b/doc/source/cli/command-objects/volume-transfer-request.rst @@ -2,95 +2,7 @@ volume transfer request ======================= -Block Storage v1, v2 +Block Storage v2, v3 -volume transfer request accept ------------------------------- - -Accept volume transfer request - -.. program:: volume transfer request accept -.. code:: bash - - openstack volume transfer request accept - --auth-key - - -.. option:: --auth-key - - Volume transfer request authentication key - -.. _volume_transfer_request_accept: -.. describe:: - - Volume transfer request to accept (ID only) - - Non-admin users are only able to specify the transfer request by ID. - -volume transfer request create ------------------------------- - -Create volume transfer request - -.. program:: volume transfer request create -.. code:: bash - - openstack volume transfer request create - [--name ] - - -.. option:: --name - - New transfer request name (default to None) - -.. _volume_transfer_request_create-volume: -.. describe:: - - Volume to transfer (name or ID) - -volume transfer request delete ------------------------------- - -Delete volume transfer request(s) - -.. program:: volume transfer request delete -.. code:: bash - - openstack volume transfer request delete - [ ...] - -.. _volume_transfer_request_delete-transfer-request: -.. describe:: - - Volume transfer request(s) to delete (name or ID) - -volume transfer request list ----------------------------- - -Lists all volume transfer requests - -.. program:: volume transfer request list -.. code:: bash - - openstack volume transfer request list - --all-projects - -.. option:: --all-projects - - Include all projects (admin only) - -volume transfer request show ----------------------------- - -Show volume transfer request details - -.. program:: volume transfer request show -.. code:: bash - - openstack volume transfer request show - - -.. _volume_transfer_request_show-transfer-request: -.. describe:: - - Volume transfer request to display (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume transfer request * diff --git a/doc/source/cli/command-objects/volume-type.rst b/doc/source/cli/command-objects/volume-type.rst index 2b5aff9940..003ee67306 100644 --- a/doc/source/cli/command-objects/volume-type.rst +++ b/doc/source/cli/command-objects/volume-type.rst @@ -2,290 +2,7 @@ volume type =========== -Block Storage v1, v2 +Block Storage v2, v3 -volume type create ------------------- - -Create new volume type - -.. program:: volume type create -.. code:: bash - - openstack volume type create - [--description ] - [--public | --private] - [--property [...] ] - [--project ] - [--project-domain ] - [--encryption-provider ] - [--encryption-cipher ] - [--encryption-key-size ] - [--encryption-control-location ] - - -.. option:: --description - - Volume type description - - .. versionadded:: 2 - -.. option:: --public - - Volume type is accessible to the public - - .. versionadded:: 2 - -.. option:: --private - - Volume type is not accessible to the public - - .. versionadded:: 2 - -.. option:: --property - - Set a property on this volume type (repeat option to set multiple properties) - -.. option:: --project - - Allow to access private type (name or ID) - (Must be used with :option:`--private` option) - - *Volume version 2 only* - -.. option:: --project-domain - - Domain the project belongs to (name or ID). - This can be used in case collisions between project names exist. - - *Volume version 2 only* - -.. option:: --encryption-provider - - Set the encryption provider format for this volume type - (e.g "luks" or "plain") (admin only) - - This option is required when setting encryption type of a volume. - Consider using other encryption options such as: :option:`--encryption-cipher`, - :option:`--encryption-key-size` and :option:`--encryption-control-location` - -.. option:: --encryption-cipher - - Set the encryption algorithm or mode for this volume type - (e.g "aes-xts-plain64") (admin only) - -.. option:: --encryption-key-size - - Set the size of the encryption key of this volume type - (e.g "128" or "256") (admin only) - -.. option:: --encryption-control-location - - Set the notional service where the encryption is performed - ("front-end" or "back-end") (admin only) - - The default value for this option is "front-end" when setting encryption type of - a volume. Consider using other encryption options such as: :option:`--encryption-cipher`, - :option:`--encryption-key-size` and :option:`--encryption-provider` - -.. _volume_type_create-name: -.. describe:: - - Volume type name - -volume type delete ------------------- - -Delete volume type(s) - -.. program:: volume type delete -.. code:: bash - - openstack volume type delete - [ ...] - -.. _volume_type_delete-volume-type: -.. describe:: - - Volume type(s) to delete (name or ID) - -volume type list ----------------- - -List volume types - -.. program:: volume type list -.. code:: bash - - openstack volume type list - [--long] - [--default | --public | --private] - [--encryption-type] - -.. option:: --long - - List additional fields in output - -.. option:: --public - - List only public types - - *Volume version 2 only* - -.. option:: --private - - List only private types (admin only) - - *Volume version 2 only* - -.. option:: --default - - List the default volume type - - *Volume version 2 only* - -.. option:: --encryption-type - - Display encryption information for each volume type (admin only) - -volume type set ---------------- - -Set volume type properties - -.. program:: volume type set -.. code:: bash - - openstack volume type set - [--name ] - [--description ] - [--property [...] ] - [--project ] - [--project-domain ] - [--encryption-provider ] - [--encryption-cipher ] - [--encryption-key-size ] - [--encryption-control-location ] - - -.. option:: --name - - Set volume type name - - .. versionadded:: 2 - -.. option:: --description - - Set volume type description - - .. versionadded:: 2 - -.. option:: --project - - Set volume type access to project (name or ID) (admin only) - - *Volume version 2 only* - -.. option:: --project-domain - - Domain the project belongs to (name or ID). - This can be used in case collisions between project names exist. - -.. option:: --property - - Set a property on this volume type (repeat option to set multiple properties) - -.. option:: --encryption-provider - - Set the encryption provider format for this volume type - (e.g "luks" or "plain") (admin only) - - This option is required when setting encryption type of a volume for the first time. - Consider using other encryption options such as: :option:`--encryption-cipher`, - :option:`--encryption-key-size` and :option:`--encryption-control-location` - -.. option:: --encryption-cipher - - Set the encryption algorithm or mode for this volume type - (e.g "aes-xts-plain64") (admin only) - -.. option:: --encryption-key-size - - Set the size of the encryption key of this volume type - (e.g "128" or "256") (admin only) - -.. option:: --encryption-control-location - - Set the notional service where the encryption is performed - ("front-end" or "back-end") (admin only) - - The default value for this option is "front-end" when setting encryption type of - a volume for the first time. Consider using other encryption options such as: - :option:`--encryption-cipher`, :option:`--encryption-key-size` and :option:`--encryption-provider` - -.. _volume_type_set-volume-type: -.. describe:: - - Volume type to modify (name or ID) - -volume type show ----------------- - -Display volume type details - -.. program:: volume type show -.. code:: bash - - openstack volume type show - [--encryption-type] - - -.. option:: --encryption-type - - Display encryption information of this volume type (admin only) - -.. _volume_type_show-volume-type: -.. describe:: - - Volume type to display (name or ID) - -volume type unset ------------------ - -Unset volume type properties - -.. program:: volume type unset -.. code:: bash - - openstack volume type unset - [--property [...] ] - [--project ] - [--project-domain ] - [--encryption-type] - - -.. option:: --property - - Property to remove from volume type (repeat option to remove multiple properties) - -.. option:: --project - - Removes volume type access from project (name or ID) (admin only) - - *Volume version 2 only* - -.. option:: --project-domain - - Domain the project belongs to (name or ID). - This can be used in case collisions between project names exist. - - *Volume version 2 only* - -.. option:: --encryption-type - - Remove the encryption type for this volume type (admin only) - -.. _volume_type_unset-volume-type: -.. describe:: - - Volume type to modify (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume type * diff --git a/doc/source/cli/command-objects/volume.rst b/doc/source/cli/command-objects/volume.rst index 9b49177268..337bb9fa2b 100644 --- a/doc/source/cli/command-objects/volume.rst +++ b/doc/source/cli/command-objects/volume.rst @@ -2,397 +2,33 @@ volume ====== -Block Storage v1, v2 +Block Storage v2, v3 -volume create -------------- +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume create -Create new volume +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume delete -.. program:: volume create -.. code:: bash +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume list - openstack volume create - [--size ] - [--type ] - [--image | --snapshot | --source ] - [--description ] - [--availability-zone ] - [--consistency-group ] - [--property [...] ] - [--hint [...] ] - [--bootable | --non-bootable] - [--read-only | --read-write] - +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume migrate -.. option:: --size +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume set - Volume size in GB - (Required unless --snapshot or --source is specified) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume show -.. option:: --type - - Set the type of volume - - Select ```` from the available types as shown - by ``volume type list``. - -.. option:: --image - - Use ```` as source of volume (name or ID) - - This is commonly used to create a boot volume for a server. - -.. option:: --snapshot - - Use ```` as source of volume (name or ID) - -.. option:: --source - - Volume to clone (name or ID) - -.. option:: --description - - Volume description - -.. option:: --availability-zone - - Create volume in ```` - -.. option:: --consistency-group - - Consistency group where the new volume belongs to - -.. option:: --property - - Set a property on this volume (repeat option to set multiple properties) - -.. option:: --hint - - Arbitrary scheduler hint key-value pairs to help boot an instance - (repeat option to set multiple hints) - -.. option:: --bootable - - Mark volume as bootable - -.. option:: --non-bootable - - Mark volume as non-bootable (default) - -.. option:: --read-only - - Set volume to read-only access mode - -.. option:: --read-write - - Set volume to read-write access mode (default) - -.. _volume_create-name: -.. describe:: - - Volume name - -volume delete -------------- - -Delete volume(s) - -.. program:: volume delete -.. code:: bash - - openstack volume delete - [--force | --purge] - [ ...] - -.. option:: --force - - Attempt forced removal of volume(s), regardless of state (defaults to False) - -.. option:: --purge - - Remove any snapshots along with volume(s) (defaults to False) - - *Volume version 2 only* - -.. _volume_delete-volume: -.. describe:: - - Volume(s) to delete (name or ID) - -volume list ------------ - -List volumes - -.. program:: volume list -.. code:: bash - - openstack volume list - [--project [--project-domain ]] - [--user [--user-domain ]] - [--name ] - [--status ] - [--all-projects] - [--long] - [--limit ] - [--marker ] - -.. option:: --project - - Filter results by ```` (name or ID) (admin only) - - *Volume version 2 only* - -.. option:: --project-domain - - Domain the project belongs to (name or ID). - - This can be used in case collisions between project names exist. - - *Volume version 2 only* - -.. option:: --user - - Filter results by ```` (name or ID) (admin only) - - *Volume version 2 only* - -.. option:: --user-domain - - Domain the user belongs to (name or ID). - - This can be used in case collisions between user names exist. - - *Volume version 2 only* - -.. option:: --name - - Filter results by volume name - -.. option:: --status - - Filter results by status - -.. option:: --all-projects - - Include all projects (admin only) - -.. option:: --long - - List additional fields in output - -.. option:: --limit - - Maximum number of volumes to display - -.. option:: --marker - - The last volume ID of the previous page - - *Volume version 2 only* - -volume migrate --------------- - -Migrate volume to a new host - -.. program:: volume migrate -.. code:: bash - - openstack volume migrate - --host - [--force-host-copy] - [--lock-volume] - - -.. option:: --host - - Destination host (takes the form: host@backend-name#pool) (required) - -.. option:: --force-host-copy - - Enable generic host-based force-migration, - which bypasses driver optimizations - -.. option:: --lock-volume - - If specified, the volume state will be locked and will not allow - a migration to be aborted (possibly by another operation) - - *Volume version 2 only* - -.. _volume_migrate-volume: -.. describe:: - - Volume to migrate (name or ID) - -volume set ----------- - -Set volume properties - -.. program:: volume set -.. code:: bash - - openstack volume set - [--name ] - [--size ] - [--description ] - [--no-property] - [--property [...] ] - [--image-property [...] ] - [--state ] - [--attached | --detached ] - [--type ] - [--retype-policy ] - [--bootable | --non-bootable] - [--read-only | --read-write] - - -.. option:: --name - - New volume name - -.. option:: --size - - Extend volume size in GB - -.. option:: --description - - New volume description - -.. option:: --no-property - - Remove all properties from :ref:`\ ` - (specify both :option:`--no-property` and :option:`--property` to - remove the current properties before setting new properties.) - -.. option:: --property - - Set a property on this volume (repeat option to set multiple properties) - -.. option:: --type - - New volume type (name or ID) - - *Volume version 2 only* - -.. option:: --retype-policy - - Migration policy while re-typing volume - ("never" or "on-demand", default is "never" ) - (available only when :option:`--type` option is specified) - - *Volume version 2 only* - -.. option:: --bootable - - Mark volume as bootable - -.. option:: --non-bootable - - Mark volume as non-bootable - -.. option:: --read-only - - Set volume to read-only access mode - -.. option:: --read-write - - Set volume to read-write access mode - -.. option:: --image-property - - Set an image property on this volume - (repeat option to set multiple image properties) - - Image properties are copied along with the image when creating a volume - using ``--image``. Note that these properties are immutable on the image - itself, this option updates the copy attached to this volume. - - *Volume version 2 only* - -.. option:: --state - - New volume state - ("available", "error", "creating", "deleting", "in-use", - "attaching", "detaching", "error_deleting" or "maintenance") (admin only) - (This option simply changes the state of the volume in the database with - no regard to actual status, exercise caution when using) - - *Volume version 2 only* - -.. option:: --attached - - Set volume attachment status to "attached" (admin only) - (This option simply changes the state of the volume in the database with - no regard to actual status, exercise caution when using) - - *Volume version 2 only* - -.. option:: --deattach - - Set volume attachment status to "detached" (admin only) - (This option simply changes the state of the volume in the database with - no regard to actual status, exercise caution when using) - - *Volume version 2 only* - -.. _volume_set-volume: -.. describe:: - - Volume to modify (name or ID) - -volume show ------------ - -Show volume details - -.. program:: volume show -.. code:: bash - - openstack volume show - - -.. _volume_show-volume: -.. describe:: - - Volume to display (name or ID) - -volume unset ------------- - -Unset volume properties - -.. program:: volume unset -.. code:: bash - - openstack volume unset - [--property ] - [--image-property ] - - -.. option:: --property - - Remove a property from volume (repeat option to remove multiple properties) - -.. option:: --image-property - - Remove an image property from volume - (repeat option to remove multiple image properties) - - *Volume version 2 only* - -.. _volume_unset-volume: -.. describe:: - - Volume to modify (name or ID) +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume unset Block Storage v3 - .. autoprogram-cliff:: openstack.volume.v3 - :command: volume summary +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume summary - .. autoprogram-cliff:: openstack.volume.v3 - :command: volume revert +.. autoprogram-cliff:: openstack.volume.v3 + :command: volume revert diff --git a/doc/source/cli/commands.rst b/doc/source/cli/commands.rst index d789eceb51..653e2a50bd 100644 --- a/doc/source/cli/commands.rst +++ b/doc/source/cli/commands.rst @@ -191,23 +191,14 @@ conflicts when creating new plugins. For a complete list check out * ``appcontainer service``: (**Application Container (Zun)**) * ``baremetal``: (**Baremetal (Ironic)**) * ``claim``: (**Messaging (Zaqar)**) -* ``cluster``: (**Clustering (Senlin)**) -* ``cluster action``: (**Clustering (Senlin)**) -* ``cluster event``: (**Clustering (Senlin)**) -* ``cluster members``: (**Clustering (Senlin)**) -* ``cluster node``: (**Clustering (Senlin)**) -* ``cluster policy``: (**Clustering (Senlin)**) -* ``cluster policy binding``: (**Clustering (Senlin)**) -* ``cluster policy type``: (**Clustering (Senlin)**) -* ``cluster profile``: (**Clustering (Senlin)**) -* ``cluster profile type``: (**Clustering (Senlin)**) -* ``cluster receiver``: (**Clustering (Senlin)**) +* ``coe ca``: (**Container Orchestration Engine (Magnum)**) +* ``coe cluster``: (**Container Orchestration Engine (Magnum)**) +* ``coe cluster template``: (**Container Orchestration Engine (Magnum)**) +* ``coe quotas``: (**Container Orchestration Engine (Magnum)**) +* ``coe service``: (**Container Orchestration Engine (Magnum)**) +* ``coe stats``: (**Container Orchestration Engine (Magnum)**) * ``cron trigger``: (**Workflow Engine (Mistral)**) * ``database flavor``: (**Database (Trove)**) -* ``dataprocessing data source``: (**Data Processing (Sahara)**) -* ``dataprocessing image``: (**Data Processing (Sahara)**) -* ``dataprocessing image tags``: (**Data Processing (Sahara)**) -* ``dataprocessing plugin``: (**Data Processing (Sahara)**) * ``loadbalancer``: (**Load Balancer (Octavia)**) * ``loadbalancer healthmonitor``: (**Load Balancer (Octavia)**) * ``loadbalancer l7policy``: (**Load Balancer (Octavia)**) @@ -226,13 +217,35 @@ conflicts when creating new plugins. For a complete list check out * ``ptr record``: (**DNS (Designate)**) * ``queue``: (**Messaging (Zaqar)**) * ``recordset``: (**DNS (Designate)**) -* ``rsd``: (**Disaggregated Hardware Resource Management (RSD)**) -* ``search`` (**Search (Searchlight)**) -* ``search facet`` (**Search (Searchlight)**) -* ``search resource type`` (**Search (Searchlight)**) * ``secret``: (**Key Manager (Barbican)**) * ``secret container``: (**Key Manager (Barbican)**) * ``secret order``: (**Key Manager (Barbican)**) +* ``share``: (**Share (Manila)**) +* ``share access``: (**Share (Manila)**) +* ``share availability zone``: (**Share (Manila)**) +* ``share backup``: (**Share (Manila)**) +* ``share export location``: (**Share (Manila)**) +* ``share group``: (**Share (Manila)**) +* ``share group snapshot``: (**Share (Manila)**) +* ``share group type``: (**Share (Manila)**) +* ``share instance``: (**Share (Manila)**) +* ``share limits show``: (**Share (Manila)**) +* ``share lock``: (**Share (Manila)**) +* ``share message``: (**Share (Manila)**) +* ``share migration``: (**Share (Manila)**) +* ``share network``: (**Share (Manila)**) +* ``share quota``: (**Share (Manila)**) +* ``share replica``: (**Share (Manila)**) +* ``share security service``: (**Share (Manila)**) +* ``share server``: (**Share (Manila)**) +* ``share server migration``: (**Share (Manila)**) +* ``share service``: (**Share (Manila)**) +* ``share snapshot``: (**Share (Manila)**) +* ``share snapshot access``: (**Share (Manila)**) +* ``share snapshot export location``: (**Share (Manila)**) +* ``share snapshot instance``: (**Share (Manila)**) +* ``share transfer``: (**Share (Manila)**) +* ``share type``: (**Share (Manila)**) * ``software config``: (**Orchestration (Heat)**) * ``software deployment``: (**Orchestration (Heat)**) * ``stack event``: (**Orchestration (Heat)**) @@ -276,7 +289,6 @@ Those actions with an opposite action are noted in parens if applicable. live server migration if possible * ``pause`` (``unpause``) - stop one or more servers and leave them in memory * ``query`` - Query resources by Elasticsearch query string or json format DSL. -* ``purge`` - clean resources associated with a specific project * ``cleanup`` - flexible clean resources associated with a specific project * ``reboot`` - forcibly reboot a server * ``rebuild`` - rebuild a server using (most of) the same arguments as in the original create diff --git a/doc/source/cli/data/cinder.csv b/doc/source/cli/data/cinder.csv index 84ea409e25..1b199400b4 100644 --- a/doc/source/cli/data/cinder.csv +++ b/doc/source/cli/data/cinder.csv @@ -108,17 +108,17 @@ service-list,volume service list,Lists all services. Filter by host and service service-set-log,block storage log level set,(Supported by API versions 3.32 - 3.latest) set-bootable,volume set --bootable / --not-bootable,Update bootable status of a volume. show,volume show,Shows volume details. -snapshot-create,snapshot create,Creates a snapshot. -snapshot-delete,snapshot delete,Remove one or more snapshots. -snapshot-list,snapshot list,Lists all snapshots. +snapshot-create,volume snapshot create,Creates a snapshot. +snapshot-delete,volume snapshot delete,Remove one or more snapshots. +snapshot-list,volume snapshot list,Lists all snapshots. snapshot-manage,volume snapshot create --remote-source ,Manage an existing snapshot. snapshot-manageable-list,block storage snapshot manageable list,Lists all manageable snapshots. (Supported by API versions 3.8 - 3.latest) -snapshot-metadata,snapshot set --property k=v / snapshot unset --property k,Sets or deletes snapshot metadata. -snapshot-metadata-show,snapshot show,Shows snapshot metadata. -snapshot-metadata-update-all,snapshot set --property k=v,Updates snapshot metadata. -snapshot-rename,snapshot set --name,Renames a snapshot. -snapshot-reset-state,snapshot set --state,Explicitly updates the snapshot state. -snapshot-show,snapshot show,Shows snapshot details. +snapshot-metadata,volume snapshot set --property k=v / snapshot unset --property k,Sets or deletes snapshot metadata. +snapshot-metadata-show,volume snapshot show,Shows snapshot metadata. +snapshot-metadata-update-all,volume snapshot set --property k=v,Updates snapshot metadata. +snapshot-rename,volume snapshot set --name,Renames a snapshot. +snapshot-reset-state,volume snapshot set --state,Explicitly updates the snapshot state. +snapshot-show,volume snapshot show,Shows snapshot details. snapshot-unmanage,volume snapshot delete --remote,Stop managing a snapshot. summary,volume summary,Get volumes summary. (Supported by API versions 3.12 - 3.latest) thaw-host,volume host set --enable,Thaw and enable the specified cinder-volume host. diff --git a/doc/source/cli/data/glance.csv b/doc/source/cli/data/glance.csv index adca8c0e52..f978962274 100644 --- a/doc/source/cli/data/glance.csv +++ b/doc/source/cli/data/glance.csv @@ -1,7 +1,7 @@ -cache-clear,,"Clear all images from cache, queue or both." -cache-delete,,Delete image from cache/caching queue. -cache-list,,Get cache state. -cache-queue,,Queue image(s) for caching. +cache-clear,cached image clear,"Clear all images from cache, queue or both." +cache-delete,cached image delete,Delete image from cache/caching queue. +cache-list,cached image list,Get cache state. +cache-queue,cached image queue,Queue image(s) for caching. explain,WONTFIX,Describe a specific model. image-create,image create,Create a new image. image-create-via-import, image create --import,"EXPERIMENTAL: Create a new image via image import using glance-direct import method. Missing support for web-download, copy-image and glance-download import methods. The OSC command is also missing support for importing image to specified store as well as all stores (--store, --stores, --all-stores) and skip or stop processing if import fails to one of the store (--allow-failure)" @@ -18,34 +18,34 @@ image-tag-update,image set --tag ,Update an image with the given tag. image-tasks,,Get tasks associated with image. image-update,image set,Update an existing image. image-upload,,Upload data for a specific image. -import-info,,Print import methods available from Glance. +import-info,image import info,Show available import methods from Glance. location-add,,Add a location (and related metadata) to an image. location-delete,,Remove locations (and related metadata) from an image. location-update,,Update metadata of an image's location. -md-namespace-create,,Create a new metadata definitions namespace. -md-namespace-delete,,Delete specified metadata definitions namespace with its contents. -md-namespace-import,,Import a metadata definitions namespace from file or standard input. -md-namespace-list,,List metadata definitions namespaces. -md-namespace-objects-delete,,Delete all metadata definitions objects inside a specific namespace. -md-namespace-properties-delete,,Delete all metadata definitions property inside a specific namespace. -md-namespace-resource-type-list,,List resource types associated to specific namespace. -md-namespace-show,,Describe a specific metadata definitions namespace. +md-namespace-create,image metadef namespace create,Create a new metadata definitions namespace. +md-namespace-delete,image metadef namespace delete,Delete specified metadata definitions namespace with its contents. +md-namespace-import,WONTFIX,Import a metadata definitions namespace from file or standard input. +md-namespace-list,image metadef namespace list,List metadata definitions namespaces. +md-namespace-objects-delete,image metadef object delete,Delete all metadata definitions objects inside a specific namespace. +md-namespace-properties-delete,image metadef property delete,Delete all metadata definitions property inside a specific namespace. +md-namespace-resource-type-list,image metadef resource type association list,List resource types associated to specific namespace. +md-namespace-show,image metadef namespace show,Describe a specific metadata definitions namespace. md-namespace-tags-delete,,Delete all metadata definitions tags inside a specific namespace. md-namespace-update,,Update an existing metadata definitions namespace. -md-object-create,,Create a new metadata definitions object inside a namespace. -md-object-delete,,Delete a specific metadata definitions object inside a namespace. -md-object-list,,List metadata definitions objects inside a specific namespace. -md-object-property-show,,Describe a specific metadata definitions property inside an object. -md-object-show,,Describe a specific metadata definitions object inside a namespace. -md-object-update,,Update metadata definitions object inside a namespace. -md-property-create,,Create a new metadata definitions property inside a namespace. -md-property-delete,,Delete a specific metadata definitions property inside a namespace. -md-property-list,,List metadata definitions properties inside a specific namespace. -md-property-show,,Describe a specific metadata definitions property inside a namespace. -md-property-update,,Update metadata definitions property inside a namespace. -md-resource-type-associate,,Associate resource type with a metadata definitions namespace. -md-resource-type-deassociate,,Deassociate resource type with a metadata definitions namespace. -md-resource-type-list,,List available resource type names. +md-object-create,image metadef object create,Create a new metadata definitions object inside a namespace. +md-object-show,image metadef object show,Describe a specific metadata definitions object inside a namespace. +md-object-list,image metadef object list,List metadata definitions objects inside a specific namespace. +md-object-delete,image metadef object delete,Delete a specific metadata definitions object inside a namespace. +md-object-property-show,image metadef object property show,Describe a specific metadata definitions property inside an object. +md-object-update,image metadef object update,Update metadata definitions object inside a namespace. +md-property-create,image metadef property create,Create a new metadata definitions property inside a namespace. +md-property-delete,image metadef property delete,Delete a specific metadata definitions property inside a namespace. +md-property-list,image metadef property list,List metadata definitions properties inside a specific namespace. +md-property-show,image metadef property show,Describe a specific metadata definitions property inside a namespace. +md-property-update,image metadef property set,Update metadata definitions property inside a namespace. +md-resource-type-associate,image metadef resource type association create,Associate resource type with a metadata definitions namespace. +md-resource-type-deassociate,image metadef resource type association delete,Deassociate resource type with a metadata definitions namespace. +md-resource-type-list,image metadef resource type list,List available resource type names. md-tag-create,,Add a new metadata definitions tag inside a namespace. md-tag-create-multiple,,Create new metadata definitions tags inside a namespace. md-tag-delete,,Delete a specific metadata definitions tag inside a namespace. @@ -54,10 +54,10 @@ md-tag-show,,Describe a specific metadata definitions tag inside a namespace. md-tag-update,,Rename a metadata definitions tag inside a namespace. member-create,image add project,Create member for a given image. member-delete,image remove project,Delete image member. -member-get,,Show details of an image member +member-get,image member get,Show details of an image member member-list,image member list,Describe sharing permissions by image. member-update,image set --accept --reject --status,Update the status of a member for a given image. -stores-delete,,Delete image from specific store. +stores-delete,image delete --store,Delete image from specific store. stores-info,,Print available backends from Glance. task-create,WONTFIX,Create a new task. task-list,image task list,List tasks you can access. diff --git a/doc/source/cli/data/keystone.csv b/doc/source/cli/data/keystone.csv index bcc305515f..2a7e7f5690 100644 --- a/doc/source/cli/data/keystone.csv +++ b/doc/source/cli/data/keystone.csv @@ -1,37 +1,37 @@ -catalog,catalog show,"List service catalog, possibly filtered by service." -ec2-credentials-create,ec2 credentials create,Create EC2-compatible credentials for user per tenant. -ec2-credentials-delete,ec2 credentials delete,Delete EC2-compatible credentials. -ec2-credentials-get,ec2 credentials show,Display EC2-compatible credentials. -ec2-credentials-list,ec2 credentials list,List EC2-compatible credentials for a user. -endpoint-create,endpoint create,Create a new endpoint associated with a service. -endpoint-delete,endpoint delete,Delete a service endpoint. -endpoint-get,endpoint get,Find endpoint filtered by a specific attribute or service type. -endpoint-list,endpoint list,List configured service endpoints. -password-update,user password set,Update own password. -role-create,role create,Create new role. -role-delete,role delete,Delete role. -role-get,role show,Display role details. -role-list,role list,List all roles. -service-create,service create,Add service to Service Catalog. -service-delete,service delete,Delete service from Service Catalog. -service-get,service show,Display service from Service Catalog. -service-list,service list,List all services in Service Catalog. -tenant-create,project create,Create new tenant. -tenant-delete,project delete,Delete tenant. -tenant-get,project show,Display tenant details. -tenant-list,project list,List all tenants. -tenant-update,project set,"Update tenant name, description, enabled status." -token-get,token issue,Display the current user token. -user-create,user create,Create new user. -user-delete,user delete,Delete user. -user-get,user show,Display user details. -user-list,user list,List users. -user-password-update,user set --password,Update user password. -user-role-add,role add --user --project,Add role to user. -user-role-list,role assignment list --user --project,List roles granted to a user. -user-role-remove,role remove --user --project,Remove role from user. -user-update,user set,"Update user's name, email, and enabled status." -discover,WONTFIX,"Discover Keystone servers, supported API versions and extensions." -bootstrap,WONTFIX,"Grants a new role to a new user on a new tenant, after creating each." -bash-completion,complete,Prints all of the commands and options to stdout. -help,help,Display help about this program or one of its subcommands. +catalog,catalog show,"List service catalog, possibly filtered by service." +ec2-credentials-create,ec2 credentials create,Create EC2-compatible credentials for user per tenant. +ec2-credentials-delete,ec2 credentials delete,Delete EC2-compatible credentials. +ec2-credentials-get,ec2 credentials show,Display EC2-compatible credentials. +ec2-credentials-list,ec2 credentials list,List EC2-compatible credentials for a user. +endpoint-create,endpoint create,Create a new endpoint associated with a service. +endpoint-delete,endpoint delete,Delete a service endpoint. +endpoint-get,endpoint get,Find endpoint filtered by a specific attribute or service type. +endpoint-list,endpoint list,List configured service endpoints. +password-update,user password set,Update own password. +role-create,role create,Create new role. +role-delete,role delete,Delete role. +role-get,role show,Display role details. +role-list,role list,List all roles. +service-create,service create,Add service to Service Catalog. +service-delete,service delete,Delete service from Service Catalog. +service-get,service show,Display service from Service Catalog. +service-list,service list,List all services in Service Catalog. +tenant-create,project create,Create new tenant. +tenant-delete,project delete,Delete tenant. +tenant-get,project show,Display tenant details. +tenant-list,project list,List all tenants. +tenant-update,project set,"Update tenant name, description, enabled status." +token-get,token issue,Display the current user token. +user-create,user create,Create new user. +user-delete,user delete,Delete user. +user-get,user show,Display user details. +user-list,user list,List users. +user-password-update,user set --password,Update user password. +user-role-add,role add --user --project,Add role to user. +user-role-list,role assignment list --user --project,List roles granted to a user. +user-role-remove,role remove --user --project,Remove role from user. +user-update,user set,"Update user's name, email, and enabled status." +discover,WONTFIX,"Discover Keystone servers, supported API versions and extensions." +bootstrap,WONTFIX,"Grants a new role to a new user on a new tenant, after creating each." +bash-completion,complete,Prints all of the commands and options to stdout. +help,help,Display help about this program or one of its subcommands. diff --git a/doc/source/cli/data/swift.csv b/doc/source/cli/data/swift.csv index 681474125b..87d781e271 100644 --- a/doc/source/cli/data/swift.csv +++ b/doc/source/cli/data/swift.csv @@ -1,10 +1,10 @@ -delete,object delete / container delete,Delete a container or objects within a container. -download,object save / container save,Download objects from containers. -list,object list / container list,Lists the containers for the account or the objects for a container. -post,container create / object set / container set / object store account set,"Updates meta information for the account, container, or object." -copy,,"Copies object, optionally adds meta." -stat,object show / container show / object store account show,"Displays information for the account, container, or object." -upload,object create,Uploads files or directories to the given container. -capabilities,,List cluster capabilities. -tempurl,,Create a temporary URL. -auth,WONTFIX,Display auth related environment variables. +delete,object delete / container delete,Delete a container or objects within a container. +download,object save / container save,Download objects from containers. +list,object list / container list,Lists the containers for the account or the objects for a container. +post,container create / object set / container set / object store account set,"Updates meta information for the account, container, or object." +copy,,"Copies object, optionally adds meta." +stat,object show / container show / object store account show,"Displays information for the account, container, or object." +upload,object create,Uploads files or directories to the given container. +capabilities,,List cluster capabilities. +tempurl,,Create a temporary URL. +auth,WONTFIX,Display auth related environment variables. diff --git a/doc/source/cli/man/openstack.rst b/doc/source/cli/man/openstack.rst index 29db064199..8f7124dd6d 100644 --- a/doc/source/cli/man/openstack.rst +++ b/doc/source/cli/man/openstack.rst @@ -593,8 +593,8 @@ The following environment variables can be set to alter the behaviour of :progra BUGS ==== -Bug reports are accepted at the python-openstackclient StoryBoard project -"https://storyboard.openstack.org/#!/project/975". +Bug reports are accepted at the python-openstackclient Launchpad project +"https://bugs.launchpad.net/python-openstackclient". AUTHORS diff --git a/doc/source/cli/plugin-commands/aodh.rst b/doc/source/cli/plugin-commands/aodh.rst new file mode 100644 index 0000000000..5d8b4332cf --- /dev/null +++ b/doc/source/cli/plugin-commands/aodh.rst @@ -0,0 +1,4 @@ +aodh +---- + +.. autoprogram-cliff:: openstack.alarming.v2 diff --git a/doc/source/cli/plugin-commands/index.rst b/doc/source/cli/plugin-commands/index.rst index e2e0dfa4d1..2622ee58b3 100644 --- a/doc/source/cli/plugin-commands/index.rst +++ b/doc/source/cli/plugin-commands/index.rst @@ -7,6 +7,7 @@ Plugin Commands .. toctree:: :maxdepth: 1 + aodh barbican cyborg designate @@ -14,36 +15,13 @@ Plugin Commands heat ironic ironic-inspector + magnum manila mistral neutron octavia placement - rsd - sahara - senlin trove watcher zaqar zun - -.. TODO(efried): Make pages for the following once they're fixed. - -.. aodh -.. # aodhclient docs build is failing with recent pyparsing -.. # autoprogram-cliff:: openstack.alarming.v2 - -.. cue -.. # cueclient is not in global-requirements -.. # list-plugins:: openstack.mb.v1 -.. # :detailed: - -.. murano -.. # the murano docs cause warnings and a broken docs build -.. # .. list-plugins:: openstack.application_catalog.v1 -.. # :detailed: - -.. tripleo -.. # tripleoclient is not in global-requirements -.. # list-plugins:: openstack.tripleoclient.v1 -.. # :detailed: diff --git a/doc/source/cli/plugin-commands/magnum.rst b/doc/source/cli/plugin-commands/magnum.rst new file mode 100644 index 0000000000..9b3f9f7e8a --- /dev/null +++ b/doc/source/cli/plugin-commands/magnum.rst @@ -0,0 +1,4 @@ +magnum +------ + +.. autoprogram-cliff:: openstack.container_infra.v1 diff --git a/doc/source/cli/plugin-commands/rsd.rst b/doc/source/cli/plugin-commands/rsd.rst deleted file mode 100644 index d28cea316f..0000000000 --- a/doc/source/cli/plugin-commands/rsd.rst +++ /dev/null @@ -1,4 +0,0 @@ -rsd ---- - -.. autoprogram-cliff:: openstack.rsd.v2 diff --git a/doc/source/cli/plugin-commands/sahara.rst b/doc/source/cli/plugin-commands/sahara.rst deleted file mode 100644 index 7c51756a3a..0000000000 --- a/doc/source/cli/plugin-commands/sahara.rst +++ /dev/null @@ -1,4 +0,0 @@ -sahara ------- - -.. autoprogram-cliff:: openstack.data_processing.v1 diff --git a/doc/source/cli/plugin-commands/senlin.rst b/doc/source/cli/plugin-commands/senlin.rst deleted file mode 100644 index 90929058f8..0000000000 --- a/doc/source/cli/plugin-commands/senlin.rst +++ /dev/null @@ -1,4 +0,0 @@ -senlin ------- - -.. autoprogram-cliff:: openstack.clustering.v1 diff --git a/doc/source/conf.py b/doc/source/conf.py index 5eb5f59f69..6ee3145558 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # OpenStack Command Line Client documentation build configuration file, created # by sphinx-quickstart on Wed May 16 12:05:58 2012. @@ -15,22 +14,22 @@ # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.todo', - 'openstackdocstheme', - 'stevedore.sphinxext', - 'cliff.sphinxext', - 'sphinxcontrib.apidoc', - ] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.todo', + 'openstackdocstheme', + 'stevedore.sphinxext', + 'cliff.sphinxext', + 'sphinxcontrib.apidoc', +] # openstackdocstheme options openstackdocs_repo_name = 'openstack/python-openstackclient' -openstackdocs_use_storyboard = True openstackdocs_auto_name = False # Add project 'foo' to this list to enable the :foo-doc: role @@ -39,13 +38,13 @@ ] # Add any paths that contain templates here, relative to this directory. -#templates_path = ['_templates'] +# templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' @@ -56,13 +55,13 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -70,18 +69,18 @@ # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' @@ -94,75 +93,75 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -#html_theme_path = ["."] -#html_theme = '_theme' +# html_theme_path = ["."] +# html_theme = '_theme' html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'OpenStackCommandLineClientdoc' @@ -174,44 +173,46 @@ # -- Options for LaTeX output ------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) # . latex_documents = [ - ('index', 'OpenStackCommandLineClient.tex', - 'OpenStack Command Line Client Documentation', - 'OpenStack', 'manual'), + ( + 'index', + 'OpenStackCommandLineClient.tex', + 'OpenStack Command Line Client Documentation', + 'OpenStack', + 'manual', + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output ------------------------------------------- @@ -229,7 +230,7 @@ ] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- @@ -238,21 +239,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'OpenStackCommandLineClient', - 'OpenStack Command Line Client Documentation', - 'OpenStack', 'OpenStackCommandLineClient', - 'One line description of project.', - 'Miscellaneous'), + ( + 'index', + 'OpenStackCommandLineClient', + 'OpenStack Command Line Client Documentation', + 'OpenStack', + 'OpenStackCommandLineClient', + 'One line description of project.', + 'Miscellaneous', + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # -- Options for cliff.sphinxext plugin --------------------------------------- @@ -260,8 +265,16 @@ autoprogram_cliff_application = 'openstack' autoprogram_cliff_ignored = [ - '--help', '--format', '--column', '--max-width', '--fit-width', - '--print-empty', '--prefix', '--noindent', '--quote'] + '--help', + '--format', + '--column', + '--max-width', + '--fit-width', + '--print-empty', + '--prefix', + '--noindent', + '--quote', +] # Prevent cliff from generating "This command is provided by the # python-openstackclient plugin." diff --git a/doc/source/contributor/command-errors.rst b/doc/source/contributor/command-errors.rst index c4adb7d190..f47dfec7c3 100644 --- a/doc/source/contributor/command-errors.rst +++ b/doc/source/contributor/command-errors.rst @@ -29,8 +29,9 @@ Example This example is taken from ``keypair create`` where the ``--public-key`` option specifies a file containing the public key to upload. If the file is not found, -the IOError exception is trapped and a more specific CommandError exception is -raised that includes the name of the file that was attempted to be opened. +the ``IOError`` exception is trapped and a more specific ``CommandError`` +exception is raised that includes the name of the file that was attempted to be +opened. .. code-block:: python @@ -45,7 +46,7 @@ raised that includes the name of the file that was attempted to be opened. public_key = parsed_args.public_key if public_key: try: - with io.open( + with open( os.path.expanduser(parsed_args.public_key), "rb" ) as p: @@ -56,8 +57,8 @@ raised that includes the name of the file that was attempted to be opened. msg % (parsed_args.public_key, e), ) - keypair = compute_client.keypairs.create( - parsed_args.name, + keypair = compute_client.create_keypair( + name=parsed_args.name, public_key=public_key, ) diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst index 2aa9498f1b..445aac92d1 100644 --- a/doc/source/contributor/index.rst +++ b/doc/source/contributor/index.rst @@ -11,7 +11,6 @@ command-wrappers command-errors command-logs - specs/commands plugins humaninterfaceguide api/modules diff --git a/doc/source/contributor/plugins.rst b/doc/source/contributor/plugins.rst index 35d8d2070d..beebb03107 100644 --- a/doc/source/contributor/plugins.rst +++ b/doc/source/contributor/plugins.rst @@ -31,28 +31,21 @@ The following is a list of projects that are an OpenStackClient plugin. - python-heatclient - python-ironicclient - python-ironic-inspector-client +- python-magnumclient +- python-manilaclient - python-mistralclient -- python-muranoclient -- python-neutronclient\*\*\* +- python-neutronclient\*\* - python-octaviaclient -- python-rsdclient -- python-saharaclient -- python-senlinclient -- python-tripleoclient\*\* - python-troveclient - python-watcherclient - python-zaqarclient - python-zunclient -\*\* Note that some clients are not listed in global-requirements. - -\*\*\* Project contains advanced network services. +\*\* Project contains advanced network services. The following is a list of projects that are not an OpenStackClient plugin. -- python-magnumclient - python-monascaclient -- python-solumclient Implementation ============== diff --git a/doc/source/contributor/specs/command-objects/example.rst b/doc/source/contributor/specs/command-objects/example.rst deleted file mode 100644 index fa559433e2..0000000000 --- a/doc/source/contributor/specs/command-objects/example.rst +++ /dev/null @@ -1,86 +0,0 @@ -======= -example -======= - -This is a specification for the ``example`` command object. It is not intended -to be a complete template for new commands since other actions, options -and/or arguments may be used. You can include general specification information -before the commands below. This information could include links to related material -or descriptions of similar commands. - -[example API name] [example API version] - -example create --------------- - -Create new example - -.. program:: example create -.. code:: bash - - openstack example create - - -.. describe:: - - New example name - -example delete --------------- - -Delete example(s) - -.. program:: example delete -.. code:: bash - - openstack example delete - [ ...] - -.. describe:: - - Example(s) to delete (name or ID) - -example list ------------- - -List examples - -.. program:: example list -.. code:: bash - - openstack example list - -example set ------------ - -Set example properties - -.. program:: example set -.. code:: bash - - openstack example set - [--name ] - - -.. option:: --name - - New example name - -.. describe:: - - Example to modify (name or ID) - -example show ------------- - -Display example details - -.. program:: example show -.. code:: bash - - openstack example show - - -.. describe:: - - Example to display (name or ID) diff --git a/doc/source/contributor/specs/commands.rst b/doc/source/contributor/specs/commands.rst deleted file mode 100644 index f9d757e785..0000000000 --- a/doc/source/contributor/specs/commands.rst +++ /dev/null @@ -1,44 +0,0 @@ -============= -Command Specs -============= - -Specifications for new commands, objects and actions are listed below. -These specifications have not been implemented. See -:ref:`command-list` for implemented commands and -:ref:`command-structure` for implemented objects and actions. - -It is optional to propose a specifications patch for new commands, -objects and actions here before submitting the implementation. Once your -specifications patch merges then you may proceed with the implementation. -Your implementation patches should move applicable portions of the -specifications patch to the official :ref:`command-list` -and :ref:`command-structure` documentation. - -Objects Specs -------------- - -Add specifications for new objects based on the ``example`` object. - -Actions Specs -------------- - -Add specifications for new actions based on the ``example`` action. - -.. toctree:: - :maxdepth: 1 - - network-topology - -Commands Specs --------------- - -Add specifications for new commands based on the commands for the -``example`` object. The ``example`` commands are not intended to -be a complete template for new commands since other actions, options -and/or arguments may be used. - -.. toctree:: - :glob: - :maxdepth: 2 - - command-objects/* diff --git a/doc/source/contributor/specs/network-topology.rst b/doc/source/contributor/specs/network-topology.rst deleted file mode 100644 index 6789ee975f..0000000000 --- a/doc/source/contributor/specs/network-topology.rst +++ /dev/null @@ -1,44 +0,0 @@ -================ -network topology -================ - -A **network topology** shows a topological graph about -devices which connect to the specific network. Also, it -will return availability information for each individual -device within the network as well. One other thing to note -is that it is the intention for OSC to collect data from -existing REST APIs - -Network v2 - -network topology list ---------------------- - -List network topologies - -.. program:: network topology list -.. code:: bash - - openstack network topology list - [--project ] - -.. option:: --project - - List network topologies for given project - (name or ID) - -network topology show ---------------------- - -Show network topology details - -.. program:: network topology show -.. code:: bash - - openstack network topology show - - -.. _network_topology_show-network: -.. describe:: - - Show network topology for a specific network (name or ID) diff --git a/doc/source/index.rst b/doc/source/index.rst index 7675d6c379..21ae8efc31 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -54,16 +54,15 @@ Contributing ============ OpenStackClient utilizes all of the usual OpenStack processes and requirements for -contributions. The code is hosted `on OpenStack's Git server`_. `Bug reports`_ -may be submitted to the :code:`python-openstackclient` `Storyboard project`_. +contributions. The code is hosted `on OpenStack's Git server`_. Bug reports +may be submitted to the :code:`python-openstackclient` `Launchpad project`_. Code may be submitted to the :code:`openstack/python-openstackclient` project using `Gerrit`_. Developers may also be found in the `IRC channel`_ ``#openstack-sdks``. .. _`on OpenStack's Git server`: https://opendev.org/openstack/python-openstackclient/ -.. _`Storyboard project`: https://storyboard.openstack.org/#!/project/openstack/python-openstackclient +.. _`Launchpad project`: https://bugs.launchpad.net/python-openstackclient .. _Gerrit: http://docs.openstack.org/infra/manual/developers.html#development-workflow -.. _Bug reports: https://storyboard.openstack.org/#!/project/975 .. _PyPi: https://pypi.org/project/python-openstackclient .. _tarball: http://tarballs.openstack.org/python-openstackclient .. _IRC channel: https://wiki.openstack.org/wiki/IRC diff --git a/examples/common.py b/examples/common.py index d472fe6b91..650139ec27 100755 --- a/examples/common.py +++ b/examples/common.py @@ -57,6 +57,7 @@ # Generally useful stuff often found in a utils module + def env(*vars, **kwargs): """Search for the first defined of possibly many env vars @@ -73,6 +74,7 @@ def env(*vars, **kwargs): # Common Example functions + def base_parser(parser): """Set up some of the common CLI options @@ -128,7 +130,8 @@ def base_parser(parser): help="Print API call timing info", ) parser.add_argument( - '-v', '--verbose', + '-v', + '--verbose', action='count', dest='verbose_level', default=1, @@ -225,24 +228,22 @@ def make_session(opts, **kwargs): ) auth_p = auth_plugin.load_from_options(**auth_params) - session = ks_session.Session( - auth=auth_p, - **kwargs - ) + session = ks_session.Session(auth=auth_p, **kwargs) return session # Top-level functions + def run(opts): """Default run command""" # Do some basic testing here sys.stdout.write("Default run command\n") - sys.stdout.write("Verbose level: %s\n" % opts.verbose_level) - sys.stdout.write("Debug: %s\n" % opts.debug) - sys.stdout.write("dump_stack_trace: %s\n" % dump_stack_trace) + sys.stdout.write(f"Verbose level: {opts.verbose_level}\n") + sys.stdout.write(f"Debug: {opts.debug}\n") + sys.stdout.write(f"dump_stack_trace: {dump_stack_trace}\n") def setup(): diff --git a/examples/object_api.py b/examples/object_api.py index 577fc052fa..5917d35fa6 100755 --- a/examples/object_api.py +++ b/examples/object_api.py @@ -94,14 +94,14 @@ def run(opts): c_list = obj_api.container_list() print("Name\tCount\tBytes") for c in c_list: - print("%s\t%d\t%d" % (c['name'], c['count'], c['bytes'])) + print(f"{c['name']}\t{c['count']}\t{c['bytes']}") if len(c_list) > 0: # See what is in the first container o_list = obj_api.object_list(c_list[0]['name']) print("\nObject") for o in o_list: - print("%s" % o) + print(f"{o}") if __name__ == "__main__": diff --git a/examples/osc-lib.py b/examples/osc-lib.py index cc04bc70fe..fb8ea2dc3a 100755 --- a/examples/osc-lib.py +++ b/examples/osc-lib.py @@ -87,20 +87,20 @@ def run(opts): c_list = client_manager.object_store.container_list() print("Name\tCount\tBytes") for c in c_list: - print("%s\t%d\t%d" % (c['name'], c['count'], c['bytes'])) + print(f"{c['name']}\t{c['count']}\t{c['bytes']}") if len(c_list) > 0: # See what is in the first container o_list = client_manager.object_store.object_list(c_list[0]['name']) print("\nObject") for o in o_list: - print("%s" % o) + print(f"{o}") # Look at the compute flavors flavor_list = client_manager.compute.flavors.list() print("\nFlavors:") for f in flavor_list: - print("%s" % f) + print(f"{f}") if __name__ == "__main__": diff --git a/hacking/checks.py b/hacking/checks.py new file mode 100644 index 0000000000..0eb485e7e2 --- /dev/null +++ b/hacking/checks.py @@ -0,0 +1,179 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import os +import re + +from hacking import core + +""" +Guidelines for writing new hacking checks + + - Use only for python-openstackclient specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range O4xx. Find the current test with the highest + allocated number and then pick the next value. +""" + + +@core.flake8ext +def assert_no_oslo(logical_line): + """Check for use of oslo libraries. + + O400 + """ + if re.match(r'(from|import) oslo_.*', logical_line): + yield (0, "0400: oslo libraries should not be used in SDK projects") + + +@core.flake8ext +def assert_no_duplicated_setup(logical_line, filename): + """Check for use of various unnecessary test duplications. + + O401 + """ + if os.path.join('openstackclient', 'tests', 'unit') not in filename: + return + + if re.match(r'self.app = .*\(self.app, self.namespace\)', logical_line): + yield ( + 0, + 'O401: It is not necessary to create dummy Namespace objects', + ) + + if os.path.basename(filename) != 'fakes.py': + if re.match( + r'self.[a-z_]+_client = self.app.client_manager.*', logical_line + ): + yield ( + 0, + "O401: Aliases for mocks of the service client are already " + "provided by the respective service's FakeClientMixin class", + ) + + if match := re.match( + r'self.app.client_manager.([a-z_]+) = mock.Mock', logical_line + ): + service = match.group(1) + if service == 'auth_ref': + return + yield ( + 0, + f"O401: client_manager.{service} mocks are already provided " + f"by the {service} service's FakeClientMixin class", + ) + + +@core.flake8ext +def assert_use_of_client_aliases(logical_line): + """Ensure we use $service_client instead of $sdk_connection.service. + + O402 + """ + # we should expand the list of services as we drop legacy clients + if match := re.match( + r'self\.app\.client_manager\.sdk_connnection\.(compute|network|image)', + logical_line, + ): + service = match.group(1) + yield (0, f"0402: prefer {service}_client to sdk_connection.{service}") + + if match := re.match( + r'(self\.app\.client_manager\.(compute|network|image)+\.[a-z_]+) = mock.Mock', # noqa: E501 + logical_line, + ): + yield ( + 0, + f"O402: {match.group(1)} is already a mock: there's no need to " + f"assign a new mock.Mock instance.", + ) + + if match := re.match( + r'(self\.(compute|network|image)_client\.[a-z_]+) = mock.Mock', + logical_line, + ): + yield ( + 0, + f"O402: {match.group(1)} is already a mock: there's no need to " + f"assign a new mock.Mock instance.", + ) + + +class SDKProxyFindChecker(ast.NodeVisitor): + """NodeVisitor to find ``*_client.find_*`` statements.""" + + def __init__(self): + self.error = False + + def visit_Call(self, node): + # No need to keep visiting the AST if we already found something. + if self.error: + return + + self.generic_visit(node) + + if not ( + isinstance(node.func, ast.Attribute) + and node.func.attr.startswith('find_') # and + # isinstance(node.func.value, ast.Attribute) and + # node.func.value.attr.endswith('_client') + ): + # print(f'skipping: got {node.func}') + return + + if not ( + ( + # handle calls like 'identity_client.find_project' + isinstance(node.func.value, ast.Name) + and node.func.value.id.endswith('client') + ) + or ( + # handle calls like 'self.app.client_manager.image.find_image' + isinstance(node.func.value, ast.Attribute) + and node.func.value.attr + in ('identity', 'network', 'image', 'compute') + ) + ): + return + + if not any(kw.arg == 'ignore_missing' for kw in node.keywords): + self.error = True + + +@core.flake8ext +def assert_find_ignore_missing_kwargs(logical_line, filename): + """Ensure ignore_missing is always used for ``find_*`` SDK proxy calls. + + Okay: self.compute_client.find_server(foo, ignore_missing=True) + Okay: self.image_client.find_server(foo, ignore_missing=False) + Okay: self.volume_client.volumes.find(name='foo') + O403: self.network_client.find_network(parsed_args.network) + O403: self.compute_client.find_flavor(flavor_id, get_extra_specs=True) + """ + if 'tests' in filename: + return + + checker = SDKProxyFindChecker() + try: + parsed_logical_line = ast.parse(logical_line) + except SyntaxError: + # let flake8 catch this itself + # https://github.com/PyCQA/flake8/issues/1948 + return + checker.visit(parsed_logical_line) + if checker.error: + yield ( + 0, + 'O403: Calls to find_* proxy methods must explicitly set ' + 'ignore_missing', + ) diff --git a/openstackclient/api/api.py b/openstackclient/api/api.py index d4772f94c3..5f78b0ee54 100644 --- a/openstackclient/api/api.py +++ b/openstackclient/api/api.py @@ -16,12 +16,12 @@ from keystoneauth1 import exceptions as ks_exceptions from keystoneauth1 import session as ks_session from osc_lib import exceptions -import simplejson as json +import requests from openstackclient.i18n import _ -class KeystoneSession(object): +class KeystoneSession: """Wrapper for the Keystone Session Restore some requests.session.Session compatibility; @@ -30,12 +30,7 @@ class KeystoneSession(object): """ - def __init__( - self, - session=None, - endpoint=None, - **kwargs - ): + def __init__(self, session=None, endpoint=None, **kwargs): """Base object that contains some common API objects and methods :param Session session: @@ -45,7 +40,7 @@ def __init__( requests on this API. """ - super(KeystoneSession, self).__init__() + super().__init__() # a requests.Session-style interface self.session = session @@ -87,11 +82,7 @@ class BaseAPI(KeystoneSession): """Base API""" def __init__( - self, - session=None, - service_type=None, - endpoint=None, - **kwargs + self, session=None, service_type=None, endpoint=None, **kwargs ): """Base object that contains some common API objects and methods @@ -104,19 +95,13 @@ def __init__( requests on this API. """ - super(BaseAPI, self).__init__(session=session, endpoint=endpoint) + super().__init__(session=session, endpoint=endpoint) self.service_type = service_type # The basic action methods all take a Session and return dict/lists - def create( - self, - url, - session=None, - method=None, - **params - ): + def create(self, url, session=None, method=None, **params): """Create a new resource :param string url: @@ -133,15 +118,10 @@ def create( # Should this move into _requests()? try: return ret.json() - except json.JSONDecodeError: + except requests.JSONDecodeError: return ret - def delete( - self, - url, - session=None, - **params - ): + def delete(self, url, session=None, **params): """Delete a resource :param string url: @@ -152,14 +132,7 @@ def delete( return self._request('DELETE', url, **params) - def list( - self, - path, - session=None, - body=None, - detailed=False, - **params - ): + def list(self, path, session=None, body=None, detailed=False, **params): """Return a list of resources GET ${ENDPOINT}/${PATH}?${PARAMS} @@ -196,7 +169,7 @@ def list( ) try: return ret.json() - except json.JSONDecodeError: + except requests.JSONDecodeError: return ret # Layered actions built on top of the basic action methods do not @@ -255,9 +228,7 @@ def getlist(kw): if len(data) > 1: msg = _("Multiple %(resource)s exist with %(attr)s='%(value)s'") raise exceptions.CommandError( - msg % {'resource': resource, - 'attr': attr, - 'value': value} + msg % {'resource': resource, 'attr': attr, 'value': value} ) # Search by id @@ -267,16 +238,10 @@ def getlist(kw): return data[0] msg = _("No %(resource)s with a %(attr)s or ID of '%(value)s' found") raise exceptions.CommandError( - msg % {'resource': resource, - 'attr': attr, - 'value': value} + msg % {'resource': resource, 'attr': attr, 'value': value} ) - def find_bulk( - self, - path, - **kwargs - ): + def find_bulk(self, path, **kwargs): """Bulk load and filter locally :param string path: @@ -302,11 +267,7 @@ def find_bulk( return ret - def find_one( - self, - path, - **kwargs - ): + def find_one(self, path, **kwargs): """Find a resource by name or ID :param string path: @@ -342,11 +303,11 @@ def find( """ try: - ret = self._request('GET', "/%s/%s" % (path, value)).json() + ret = self._request('GET', f"/{path}/{value}").json() except ks_exceptions.NotFound: kwargs = {attr: value} try: - ret = self.find_one("/%s/detail" % (path), **kwargs) + ret = self.find_one(f"/{path}/detail", **kwargs) except ks_exceptions.NotFound: msg = _("%s not found") % value raise exceptions.NotFound(msg) diff --git a/openstackclient/api/compute_v2.py b/openstackclient/api/compute_v2.py index e30177a240..41e1b685b0 100644 --- a/openstackclient/api/compute_v2.py +++ b/openstackclient/api/compute_v2.py @@ -9,648 +9,359 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -"""Compute v2 API Library""" +"""Compute v2 API Library + +A collection of wrappers for deprecated Compute v2 APIs that are not +intentionally supported by SDK. Most of these are proxy APIs. +""" + +import http -from keystoneauth1 import exceptions as ksa_exceptions -from osc_lib.api import api +from openstack import exceptions as sdk_exceptions from osc_lib import exceptions -from osc_lib.i18n import _ - - -# TODO(dtroyer): Mingrate this to osc-lib -class InvalidValue(Exception): - """An argument value is not valid: wrong type, out of range, etc""" - message = "Supplied value is not valid" - - -class APIv2(api.BaseAPI): - """Compute v2 API""" - - def __init__(self, **kwargs): - super(APIv2, self).__init__(**kwargs) - - # Overrides - - def _check_integer(self, value, msg=None): - """Attempt to convert value to an integer - - Raises InvalidValue on failure - - :param value: - Convert this to an integer. None is converted to 0 (zero). - :param msg: - An alternate message for the exception, must include exactly - one substitution to receive the attempted value. - """ - - if value is None: - return 0 - - try: - value = int(value) - except (TypeError, ValueError): - if not msg: - msg = _("%s is not an integer") % value - raise InvalidValue(msg) - return value - - # TODO(dtroyer): Override find() until these fixes get into an osc-lib - # minimum release - def find( - self, - path, - value=None, - attr=None, - ): - """Find a single resource by name or ID - - :param string path: - The API-specific portion of the URL path - :param string value: - search expression (required, really) - :param string attr: - name of attribute for secondary search - """ - - try: - ret = self._request('GET', "/%s/%s" % (path, value)).json() - if isinstance(ret, dict): - # strip off the enclosing dict - key = list(ret.keys())[0] - ret = ret[key] - except ( - ksa_exceptions.NotFound, - ksa_exceptions.BadRequest, - ): - kwargs = {attr: value} - try: - ret = self.find_one(path, **kwargs) - except ksa_exceptions.NotFound: - msg = _("%s not found") % value - raise exceptions.NotFound(msg) - - return ret - - # Floating IPs - - def floating_ip_add( - self, - server, - address, - fixed_address=None, - ): - """Add a floating IP to a server - - :param server: - The :class:`Server` (or its ID) to add an IP to. - :param address: - The FloatingIP or string floating address to add. - :param fixed_address: - The FixedIP the floatingIP should be associated with (optional) - """ - - url = '/servers' - - server = self.find( - url, - attr='name', - value=server, - ) - - address = address.ip if hasattr(address, 'ip') else address - if fixed_address: - if hasattr(fixed_address, 'ip'): - fixed_address = fixed_address.ip - - body = { - 'address': address, - 'fixed_address': fixed_address, - } - else: - body = { - 'address': address, - } - - return self._request( - "POST", - "/%s/%s/action" % (url, server['id']), - json={'addFloatingIp': body}, - ) - - def floating_ip_create( - self, - pool=None, - ): - """Create a new floating ip - - https://docs.openstack.org/api-ref/compute/#create-allocate-floating-ip-address - - :param pool: Name of floating IP pool - """ - - url = "/os-floating-ips" - - try: - return self.create( - url, - json={'pool': pool}, - )['floating_ip'] - except ( - ksa_exceptions.NotFound, - ksa_exceptions.BadRequest, - ): - msg = _("%s not found") % pool - raise exceptions.NotFound(msg) - - def floating_ip_delete( - self, - floating_ip_id=None, - ): - """Delete a floating IP - - https://docs.openstack.org/api-ref/compute/#delete-deallocate-floating-ip-address - - :param string floating_ip_id: - Floating IP ID - """ - - url = "/os-floating-ips" - - if floating_ip_id is not None: - return self.delete('/%s/%s' % (url, floating_ip_id)) - - return None - - def floating_ip_find( - self, - floating_ip=None, - ): - """Return a security group given name or ID - - https://docs.openstack.org/api-ref/compute/#list-floating-ip-addresses - - :param string floating_ip: - Floating IP address - :returns: A dict of the floating IP attributes - """ - - url = "/os-floating-ips" - - return self.find( - url, - attr='ip', - value=floating_ip, - ) - - def floating_ip_list( - self, - ): - """Get floating IPs - - https://docs.openstack.org/api-ref/compute/#show-floating-ip-address-details - - :returns: - list of floating IPs - """ - - url = "/os-floating-ips" - - return self.list(url)["floating_ips"] - - def floating_ip_remove( - self, - server, - address, - ): - """Remove a floating IP from a server - - :param server: - The :class:`Server` (or its ID) to add an IP to. - :param address: - The FloatingIP or string floating address to add. - """ - - url = '/servers' - - server = self.find( - url, - attr='name', - value=server, - ) - - address = address.ip if hasattr(address, 'ip') else address - body = { - 'address': address, - } - - return self._request( - "POST", - "/%s/%s/action" % (url, server['id']), - json={'removeFloatingIp': body}, - ) - - # Floating IP Pools - - def floating_ip_pool_list( - self, - ): - """Get floating IP pools - - https://docs.openstack.org/api-ref/compute/?expanded=#list-floating-ip-pools - - :returns: - list of floating IP pools - """ - - url = "/os-floating-ip-pools" - - return self.list(url)["floating_ip_pools"] - - # Hosts - - def host_list( - self, - zone=None, - ): - """Lists hypervisor Hosts - - https://docs.openstack.org/api-ref/compute/#list-hosts - Valid for Compute 2.0 - 2.42 - - :param string zone: - Availability zone - :returns: A dict of the floating IP attributes - """ - - url = "/os-hosts" - if zone: - url = '/os-hosts?zone=%s' % zone - - return self.list(url)["hosts"] - - def host_set( - self, - host=None, - status=None, - maintenance_mode=None, - **params - ): - """Modify host properties - - https://docs.openstack.org/api-ref/compute/#update-host-status - Valid for Compute 2.0 - 2.42 - - status - maintenance_mode - """ - - url = "/os-hosts" - - params = {} - if status: - params['status'] = status - if maintenance_mode: - params['maintenance_mode'] = maintenance_mode - if params == {}: - # Don't bother calling if nothing given - return None - else: - return self._request( - "PUT", - "/%s/%s" % (url, host), - json=params, - ).json() - - def host_show( - self, - host=None, - ): - """Show host - - https://docs.openstack.org/api-ref/compute/#show-host-details - Valid for Compute 2.0 - 2.42 - """ - - url = "/os-hosts" - - r_host = self.find( - url, - attr='host_name', - value=host, - ) - - data = [] - for h in r_host: - data.append(h['resource']) - return data - - # Networks - - def network_create( - self, - name=None, - subnet=None, - share_subnet=None, - ): - """Create a new network - - https://docs.openstack.org/api-ref/compute/#create-network - - :param string name: - Network label (required) - :param integer subnet: - Subnet for IPv4 fixed addresses in CIDR notation (required) - :param integer share_subnet: - Shared subnet between projects, True or False - :returns: A dict of the network attributes - """ - - url = "/os-networks" - - params = { - 'label': name, - 'cidr': subnet, - } - if share_subnet is not None: - params['share_address'] = share_subnet - - return self.create( - url, - json={'network': params}, - )['network'] - - def network_delete( - self, - network=None, - ): - """Delete a network - - https://docs.openstack.org/api-ref/compute/#delete-network - - :param string network: - Network name or ID - """ - - url = "/os-networks" - - network = self.find( - url, - attr='label', - value=network, - )['id'] - if network is not None: - return self.delete('/%s/%s' % (url, network)) - - return None - - def network_find( - self, - network=None, - ): - """Return a network given name or ID - - https://docs.openstack.org/api-ref/compute/#show-network-details - - :param string network: - Network name or ID - :returns: A dict of the network attributes - """ - - url = "/os-networks" - - return self.find( - url, - attr='label', - value=network, - ) - - def network_list( - self, - ): - """Get networks - - https://docs.openstack.org/api-ref/compute/#list-networks - - :returns: - list of networks - """ - - url = "/os-networks" - - return self.list(url)["networks"] - - # Security Groups - - def security_group_create( - self, - name=None, - description=None, - ): - """Create a new security group - - https://docs.openstack.org/api-ref/compute/#create-security-group - - :param string name: - Security group name - :param integer description: - Security group description - """ - - url = "/os-security-groups" - - params = { - 'name': name, - 'description': description, - } - - return self.create( - url, - json={'security_group': params}, - )['security_group'] - - def security_group_delete( - self, - security_group=None, - ): - """Delete a security group - - https://docs.openstack.org/api-ref/compute/#delete-security-group - - :param string security_group: - Security group name or ID - """ - - url = "/os-security-groups" - - security_group = self.find( - url, - attr='name', - value=security_group, - )['id'] - if security_group is not None: - return self.delete('/%s/%s' % (url, security_group)) - - return None - - def security_group_find( - self, - security_group=None, - ): - """Return a security group given name or ID - - https://docs.openstack.org/api-ref/compute/#show-security-group-details - - :param string security_group: - Security group name or ID - :returns: A dict of the security group attributes - """ - - url = "/os-security-groups" - - return self.find( - url, - attr='name', - value=security_group, - ) - - def security_group_list( - self, - limit=None, - marker=None, - search_opts=None, - ): - """Get security groups - - https://docs.openstack.org/api-ref/compute/#list-security-groups - - :param integer limit: - query return count limit - :param string marker: - query marker - :param search_opts: - (undocumented) Search filter dict - all_tenants: True|False - return all projects - :returns: - list of security groups names - """ - - params = {} - if search_opts is not None: - params = dict((k, v) for (k, v) in search_opts.items() if v) - if limit: - params['limit'] = limit - if marker: - params['offset'] = marker - - url = "/os-security-groups" - return self.list(url, **params)["security_groups"] - - def security_group_set( - self, - security_group=None, - # name=None, - # description=None, - **params - ): - """Update a security group - - https://docs.openstack.org/api-ref/compute/#update-security-group - - :param string security_group: - Security group name or ID - - TODO(dtroyer): Create an update method in osc-lib - """ - - # Short-circuit no-op - if params is None: - return None - - url = "/os-security-groups" - - security_group = self.find( - url, - attr='name', - value=security_group, - ) - if security_group is not None: - for (k, v) in params.items(): - # Only set a value if it is already present - if k in security_group: - security_group[k] = v - return self._request( - "PUT", - "/%s/%s" % (url, security_group['id']), - json={'security_group': security_group}, - ).json()['security_group'] - return None - - # Security Group Rules - - def security_group_rule_create( - self, - security_group_id=None, - ip_protocol=None, - from_port=None, - to_port=None, - remote_ip=None, - remote_group=None, - ): - """Create a new security group rule - - https://docs.openstack.org/api-ref/compute/#create-security-group-rule - - :param string security_group_id: - Security group ID - :param ip_protocol: - IP protocol, 'tcp', 'udp' or 'icmp' - :param from_port: - Source port - :param to_port: - Destination port - :param remote_ip: - Source IP address in CIDR notation - :param remote_group: - Remote security group - """ - - url = "/os-security-group-rules" - - if ip_protocol.lower() not in ['icmp', 'tcp', 'udp']: - raise InvalidValue( - "%(s) is not one of 'icmp', 'tcp', or 'udp'" % ip_protocol - ) - - params = { - 'parent_group_id': security_group_id, - 'ip_protocol': ip_protocol, - 'from_port': self._check_integer(from_port), - 'to_port': self._check_integer(to_port), - 'cidr': remote_ip, - 'group_id': remote_group, - } - - return self.create( - url, - json={'security_group_rule': params}, - )['security_group_rule'] - - def security_group_rule_delete( - self, - security_group_rule_id=None, - ): - """Delete a security group rule - - https://docs.openstack.org/api-ref/compute/#delete-security-group-rule - - :param string security_group_rule_id: - Security group rule ID - """ - - url = "/os-security-group-rules" - if security_group_rule_id is not None: - return self.delete('/%s/%s' % (url, security_group_rule_id)) - - return None + + +# security groups + + +def create_security_group(compute_client, name=None, description=None): + """Create a new security group + + https://docs.openstack.org/api-ref/compute/#create-security-group + + :param compute_client: A compute client + :param str name: Security group name + :param str description: Security group description + :returns: A security group object + """ + data = { + 'name': name, + 'description': description, + } + response = compute_client.post( + '/os-security-groups', data=data, microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + return response.json()['security_group'] + + +def list_security_groups(compute_client, all_projects=None): + """Get all security groups + + https://docs.openstack.org/api-ref/compute/#list-security-groups + + :param compute_client: A compute client + :param bool all_projects: If true, list from all projects + :returns: A list of security group objects + """ + url = '/os-security-groups' + if all_projects is not None: + url += f'?all_tenants={all_projects}' + response = compute_client.get(url, microversion='2.1') + sdk_exceptions.raise_from_response(response) + return response.json()['security_groups'] + + +def find_security_group(compute_client, name_or_id): + """Find the security group for a given name or ID + + https://docs.openstack.org/api-ref/compute/#show-security-group-details + + :param compute_client: A compute client + :param name_or_id: The name or ID of the security group to look up + :returns: A security group object + :raises exception.NotFound: If a matching security group could not be + found or more than one match was found + """ + response = compute_client.get( + f'/os-security-groups/{name_or_id}', microversion='2.1' + ) + if response.status_code != http.HTTPStatus.NOT_FOUND: + # there might be other, non-404 errors + sdk_exceptions.raise_from_response(response) + return response.json()['security_group'] + + response = compute_client.get('/os-security-groups', microversion='2.1') + sdk_exceptions.raise_from_response(response) + found = None + security_groups = response.json()['security_groups'] + for security_group in security_groups: + if security_group['name'] == name_or_id: + if found: + raise exceptions.NotFound( + f'multiple matches found for {name_or_id}' + ) + found = security_group + + if not found: + raise exceptions.NotFound(f'{name_or_id} not found') + + return found + + +def update_security_group( + compute_client, security_group_id, name=None, description=None +): + """Update an existing security group + + https://docs.openstack.org/api-ref/compute/#update-security-group + + :param compute_client: A compute client + :param str security_group_id: The ID of the security group to update + :param str name: Security group name + :param str description: Security group description + :returns: A security group object + """ + data = {} + if name: + data['name'] = name + if description: + data['description'] = description + response = compute_client.put( + f'/os-security-groups/{security_group_id}', + data=data, + microversion='2.1', + ) + sdk_exceptions.raise_from_response(response) + return response.json()['security_group'] + + +def delete_security_group(compute_client, security_group_id=None): + """Delete a security group + + https://docs.openstack.org/api-ref/compute/#delete-security-group + + :param compute_client: A compute client + :param str security_group_id: Security group ID + :returns: None + """ + response = compute_client.delete( + f'/os-security-groups/{security_group_id}', microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + + +# security group rules + + +def create_security_group_rule( + compute_client, + security_group_id=None, + ip_protocol=None, + from_port=None, + to_port=None, + remote_ip=None, + remote_group=None, +): + """Create a new security group rule + + https://docs.openstack.org/api-ref/compute/#create-security-group-rule + + :param compute_client: A compute client + :param str security_group_id: Security group ID + :param str ip_protocol: IP protocol, 'tcp', 'udp' or 'icmp' + :param int from_port: Source port + :param int to_port: Destination port + :param str remote_ip: Source IP address in CIDR notation + :param str remote_group: Remote security group + :returns: A security group object + """ + data = { + 'parent_group_id': security_group_id, + 'ip_protocol': ip_protocol, + 'from_port': from_port, + 'to_port': to_port, + 'cidr': remote_ip, + 'group_id': remote_group, + } + response = compute_client.post( + '/os-security-group-rules', data=data, microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + return response.json()['security_group_rule'] + + +def delete_security_group_rule(compute_client, security_group_rule_id=None): + """Delete a security group rule + + https://docs.openstack.org/api-ref/compute/#delete-security-group-rule + + :param compute_client: A compute client + :param str security_group_rule_id: Security group rule ID + :returns: None + """ + response = compute_client.delete( + f'/os-security-group-rules/{security_group_rule_id}', + microversion='2.1', + ) + sdk_exceptions.raise_from_response(response) + + +# networks + + +def create_network(compute_client, name, subnet, share_subnet=None): + """Create a new network + + https://docs.openstack.org/api-ref/compute/#create-network + + :param compute_client: A compute client + :param str name: Network label + :param int subnet: Subnet for IPv4 fixed addresses in CIDR notation + :param bool share_subnet: Shared subnet between projects + :returns: A network object + """ + data = { + 'label': name, + 'cidr': subnet, + } + if share_subnet is not None: + data['share_address'] = share_subnet + + response = compute_client.post( + '/os-networks', data=data, microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + return response.json()['network'] + + +def list_networks(compute_client): + """Get all networks + + https://docs.openstack.org/api-ref/compute/#list-networks + + :param compute_client: A compute client + :returns: A list of network objects + """ + response = compute_client.get('/os-networks', microversion='2.1') + sdk_exceptions.raise_from_response(response) + return response.json()['networks'] + + +def find_network(compute_client, name_or_id): + """Find the network for a given name or ID + + https://docs.openstack.org/api-ref/compute/#show-network-details + + :param compute_client: A compute client + :param name_or_id: The name or ID of the network to look up + :returns: A network object + :raises exception.NotFound: If a matching network could not be found or + more than one match was found + """ + response = compute_client.get( + f'/os-networks/{name_or_id}', microversion='2.1' + ) + if response.status_code != http.HTTPStatus.NOT_FOUND: + # there might be other, non-404 errors + sdk_exceptions.raise_from_response(response) + return response.json()['network'] + + response = compute_client.get('/os-networks', microversion='2.1') + sdk_exceptions.raise_from_response(response) + found = None + networks = response.json()['networks'] + for network in networks: + if network['label'] == name_or_id: + if found: + raise exceptions.NotFound( + f'multiple matches found for {name_or_id}' + ) + found = network + + if not found: + raise exceptions.NotFound(f'{name_or_id} not found') + + return found + + +def delete_network(compute_client, network_id): + """Delete a network + + https://docs.openstack.org/api-ref/compute/#delete-network + + :param compute_client: A compute client + :param string network_id: The network ID + :returns: None + """ + response = compute_client.delete( + f'/os-networks/{network_id}', microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + + +# floating ips + + +def create_floating_ip(compute_client, network): + """Create a new floating ip + + https://docs.openstack.org/api-ref/compute/#create-allocate-floating-ip-address + + :param network: Name of floating IP pool + :returns: A floating IP object + """ + response = compute_client.post( + '/os-floating-ips', data={'pool': network}, microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + return response.json()['floating_ip'] + + +def list_floating_ips(compute_client): + """Get all floating IPs + + https://docs.openstack.org/api-ref/compute/#list-floating-ip-addresses + + :returns: A list of floating IP objects + """ + response = compute_client.get('/os-floating-ips', microversion='2.1') + sdk_exceptions.raise_from_response(response) + return response.json()['floating_ips'] + + +def get_floating_ip(compute_client, floating_ip_id): + """Get a floating IP + + https://docs.openstack.org/api-ref/compute/#show-floating-ip-address-details + + :param string floating_ip_id: The floating IP address + :returns: A floating IP object + """ + response = compute_client.get( + f'/os-floating-ips/{floating_ip_id}', microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + return response.json()['floating_ip'] + + +def delete_floating_ip(compute_client, floating_ip_id): + """Delete a floating IP + + https://docs.openstack.org/api-ref/compute/#delete-deallocate-floating-ip-address + + :param string floating_ip_id: The floating IP address + :returns: None + """ + response = compute_client.delete( + f'/os-floating-ips/{floating_ip_id}', microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + + +# floating ip pools + + +def list_floating_ip_pools(compute_client): + """Get all floating IP pools + + https://docs.openstack.org/api-ref/compute/#list-floating-ip-pools + + :param compute_client: A compute client + :returns: A list of floating IP pool objects + """ + response = compute_client.get('/os-floating-ip-pools', microversion='2.1') + sdk_exceptions.raise_from_response(response) + + return response.json()['floating_ip_pools'] diff --git a/openstackclient/api/image_v1.py b/openstackclient/api/image_v1.py index e15d825a31..a8b61aca96 100644 --- a/openstackclient/api/image_v1.py +++ b/openstackclient/api/image_v1.py @@ -22,7 +22,7 @@ class APIv1(api.BaseAPI): _endpoint_suffix = '/v1' def __init__(self, endpoint=None, **kwargs): - super(APIv1, self).__init__(endpoint=endpoint, **kwargs) + super().__init__(endpoint=endpoint, **kwargs) self.endpoint = self.endpoint.rstrip('/') self._munge_url() @@ -33,11 +33,7 @@ def _munge_url(self): self.endpoint = self.endpoint + self._endpoint_suffix def image_list( - self, - detailed=False, - public=False, - private=False, - **filter + self, detailed=False, public=False, private=False, **filter ): """Get available images diff --git a/openstackclient/api/image_v2.py b/openstackclient/api/image_v2.py index d016318957..9b0e9b1f8e 100644 --- a/openstackclient/api/image_v2.py +++ b/openstackclient/api/image_v2.py @@ -33,7 +33,7 @@ def image_list( private=False, community=False, shared=False, - **filter + **filter, ): """Get available images diff --git a/openstackclient/api/object_store_v1.py b/openstackclient/api/object_store_v1.py index 67c7923023..933b01b836 100644 --- a/openstackclient/api/object_store_v1.py +++ b/openstackclient/api/object_store_v1.py @@ -13,7 +13,6 @@ """Object Store v1 API Library""" -import io import logging import os import sys @@ -33,13 +32,10 @@ class APIv1(api.BaseAPI): """Object Store v1 API""" def __init__(self, **kwargs): - super(APIv1, self).__init__(**kwargs) + super().__init__(**kwargs) def container_create( - self, - container=None, - public=False, - storage_policy=None + self, container=None, public=False, storage_policy=None ): """Create a container @@ -62,7 +58,8 @@ def container_create( headers['x-storage-policy'] = storage_policy response = self.create( - urllib.parse.quote(container), method='PUT', headers=headers) + urllib.parse.quote(container), method='PUT', headers=headers + ) data = { 'account': self._find_account_id(), @@ -92,7 +89,7 @@ def container_list( marker=None, end_marker=None, prefix=None, - **params + **params, ): """Get containers in an account @@ -119,7 +116,7 @@ def container_list( marker=marker, end_marker=end_marker, prefix=prefix, - **params + **params, ) while listing: marker = listing[-1]['name'] @@ -128,7 +125,7 @@ def container_list( marker=marker, end_marker=end_marker, prefix=prefix, - **params + **params, ) if listing: data.extend(listing) @@ -192,9 +189,7 @@ def container_show( data = { 'account': self._find_account_id(), 'container': container, - 'object_count': response.headers.get( - 'x-container-object-count' - ), + 'object_count': response.headers.get('x-container-object-count'), 'bytes_used': response.headers.get('x-container-bytes-used'), 'storage_policy': response.headers.get('x-storage-policy'), } @@ -208,8 +203,9 @@ def container_show( if 'x-container-sync-key' in response.headers: data['sync_key'] = response.headers.get('x-container-sync-key') - properties = self._get_properties(response.headers, - 'x-container-meta-') + properties = self._get_properties( + response.headers, 'x-container-meta-' + ) if properties: data['properties'] = properties @@ -228,8 +224,9 @@ def container_unset( properties to remove from the container """ - headers = self._unset_properties(properties, - 'X-Remove-Container-Meta-%s') + headers = self._unset_properties( + properties, 'X-Remove-Container-Meta-%s' + ) if headers: self.create(urllib.parse.quote(container), headers=headers) @@ -259,9 +256,11 @@ def object_create( # object's name in the container. object_name_str = name if name else object - full_url = "%s/%s" % (urllib.parse.quote(container), - urllib.parse.quote(object_name_str)) - with io.open(object, 'rb') as f: + full_url = ( + f"{urllib.parse.quote(container)}/" + f"{urllib.parse.quote(object_name_str)}" + ) + with open(object, 'rb') as f: response = self.create( full_url, method='PUT', @@ -293,8 +292,9 @@ def object_delete( if container is None or object is None: return - self.delete("%s/%s" % (urllib.parse.quote(container), - urllib.parse.quote(object))) + self.delete( + f"{urllib.parse.quote(container)}/{urllib.parse.quote(object)}" + ) def object_list( self, @@ -305,7 +305,7 @@ def object_list( end_marker=None, delimiter=None, prefix=None, - **params + **params, ): """List objects in a container @@ -340,7 +340,7 @@ def object_list( end_marker=end_marker, prefix=prefix, delimiter=delimiter, - **params + **params, ) while listing: if delimiter: @@ -354,7 +354,7 @@ def object_list( end_marker=end_marker, prefix=prefix, delimiter=delimiter, - **params + **params, ) if listing: data.extend(listing) @@ -394,8 +394,7 @@ def object_save( response = self._request( 'GET', - "%s/%s" % (urllib.parse.quote(container), - urllib.parse.quote(object)), + f"{urllib.parse.quote(container)}/{urllib.parse.quote(object)}", stream=True, ) if response.status_code == 200: @@ -429,9 +428,10 @@ def object_set( headers = self._set_properties(properties, 'X-Object-Meta-%s') if headers: - self.create("%s/%s" % (urllib.parse.quote(container), - urllib.parse.quote(object)), - headers=headers) + self.create( + f"{urllib.parse.quote(container)}/{urllib.parse.quote(object)}", + headers=headers, + ) def object_unset( self, @@ -451,9 +451,10 @@ def object_unset( headers = self._unset_properties(properties, 'X-Remove-Object-Meta-%s') if headers: - self.create("%s/%s" % (urllib.parse.quote(container), - urllib.parse.quote(object)), - headers=headers) + self.create( + f"{urllib.parse.quote(container)}/{urllib.parse.quote(object)}", + headers=headers, + ) def object_show( self, @@ -473,9 +474,10 @@ def object_show( if container is None or object is None: return {} - response = self._request('HEAD', "%s/%s" % - (urllib.parse.quote(container), - urllib.parse.quote(object))) + response = self._request( + 'HEAD', + f"{urllib.parse.quote(container)}/{urllib.parse.quote(object)}", + ) data = { 'account': self._find_account_id(), @@ -484,9 +486,7 @@ def object_show( 'content-type': response.headers.get('content-type'), } if 'content-length' in response.headers: - data['content-length'] = response.headers.get( - 'content-length' - ) + data['content-length'] = response.headers.get('content-length') if 'last-modified' in response.headers: data['last-modified'] = response.headers.get('last-modified') if 'etag' in response.headers: @@ -549,8 +549,9 @@ def account_unset( properties to remove from the account """ - headers = self._unset_properties(properties, - 'X-Remove-Account-Meta-%s') + headers = self._unset_properties( + properties, 'X-Remove-Account-Meta-%s' + ) if headers: self.create("", headers=headers) @@ -596,5 +597,5 @@ def _get_properties(self, headers, header_tag): properties = {} for k, v in headers.items(): if k.lower().startswith(header_tag): - properties[k[len(header_tag):]] = v + properties[k[len(header_tag) :]] = v return properties diff --git a/openstackclient/api/volume_v2.py b/openstackclient/api/volume_v2.py new file mode 100644 index 0000000000..9575379c69 --- /dev/null +++ b/openstackclient/api/volume_v2.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Volume v2 API Library + +A collection of wrappers for deprecated Block Storage v2 APIs that are not +intentionally supported by SDK. +""" + +import http + +from openstack import exceptions as sdk_exceptions +from osc_lib import exceptions + + +# consistency groups + + +def find_consistency_group(compute_client, name_or_id): + """Find the consistency group for a given name or ID + + https://docs.openstack.org/api-ref/block-storage/v3/#show-a-consistency-group-s-details + + :param volume_client: A volume client + :param name_or_id: The name or ID of the consistency group to look up + :returns: A consistency group object + :raises exception.NotFound: If a matching consistency group could not be + found or more than one match was found + """ + response = compute_client.get(f'/consistencygroups/{name_or_id}') + if response.status_code != http.HTTPStatus.NOT_FOUND: + # there might be other, non-404 errors + sdk_exceptions.raise_from_response(response) + return response.json()['consistencygroup'] + + response = compute_client.get('/consistencygroups') + sdk_exceptions.raise_from_response(response) + found = None + consistency_groups = response.json()['consistencygroups'] + for consistency_group in consistency_groups: + if consistency_group['name'] == name_or_id: + if found: + raise exceptions.NotFound( + f'multiple matches found for {name_or_id}' + ) + found = consistency_group + + if not found: + raise exceptions.NotFound(f'{name_or_id} not found') + + return found diff --git a/openstackclient/api/volume_v3.py b/openstackclient/api/volume_v3.py new file mode 100644 index 0000000000..1a3f25fa01 --- /dev/null +++ b/openstackclient/api/volume_v3.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Volume v3 API Library + +A collection of wrappers for deprecated Block Storage v3 APIs that are not +intentionally supported by SDK. +""" + +import http + +from openstack import exceptions as sdk_exceptions +from osc_lib import exceptions + + +# consistency groups + + +def find_consistency_group(compute_client, name_or_id): + """Find the consistency group for a given name or ID + + https://docs.openstack.org/api-ref/block-storage/v3/#show-a-consistency-group-s-details + + :param volume_client: A volume client + :param name_or_id: The name or ID of the consistency group to look up + :returns: A consistency group object + :raises exception.NotFound: If a matching consistency group could not be + found or more than one match was found + """ + response = compute_client.get(f'/consistencygroups/{name_or_id}') + if response.status_code != http.HTTPStatus.NOT_FOUND: + # there might be other, non-404 errors + sdk_exceptions.raise_from_response(response) + return response.json()['consistencygroup'] + + response = compute_client.get('/consistencygroups') + sdk_exceptions.raise_from_response(response) + found = None + consistency_groups = response.json()['consistencygroups'] + for consistency_group in consistency_groups: + if consistency_group['name'] == name_or_id: + if found: + raise exceptions.NotFound( + f'multiple matches found for {name_or_id}' + ) + found = consistency_group + + if not found: + raise exceptions.NotFound(f'{name_or_id} not found') + + return found diff --git a/openstackclient/command.py b/openstackclient/command.py new file mode 100644 index 0000000000..124d755aa9 --- /dev/null +++ b/openstackclient/command.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cliff import lister +from cliff import show +from osc_lib.command import command + +from openstackclient import shell + + +class Command(command.Command): + app: shell.OpenStackShell + + +class Lister(Command, lister.Lister): ... + + +class ShowOne(Command, show.ShowOne): ... diff --git a/openstackclient/common/availability_zone.py b/openstackclient/common/availability_zone.py index 3b2fa848a0..6f5e4fd455 100644 --- a/openstackclient/common/availability_zone.py +++ b/openstackclient/common/availability_zone.py @@ -16,36 +16,30 @@ import copy import logging -from novaclient import exceptions as nova_exceptions -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -def _xform_common_availability_zone(az, zone_info): - if hasattr(az, 'zoneState'): - zone_info['zone_status'] = ('available' if az.zoneState['available'] - else 'not available') - if hasattr(az, 'zoneName'): - zone_info['zone_name'] = az.zoneName - - zone_info['zone_resource'] = '' - - def _xform_compute_availability_zone(az, include_extra): result = [] - zone_info = {} - _xform_common_availability_zone(az, zone_info) + zone_info = { + 'zone_name': az.name, + 'zone_status': ( + 'available' if az.state['available'] else 'not available' + ), + } if not include_extra: result.append(zone_info) return result - if hasattr(az, 'hosts') and az.hosts: + if az.hosts: for host, services in az.hosts.items(): host_info = copy.deepcopy(zone_info) host_info['host_name'] = host @@ -53,10 +47,11 @@ def _xform_compute_availability_zone(az, include_extra): for svc, state in services.items(): info = copy.deepcopy(host_info) info['service_name'] = svc - info['service_status'] = '%s %s %s' % ( + info['service_status'] = '{} {} {}'.format( 'enabled' if state['active'] else 'disabled', ':-)' if state['available'] else 'XXX', - state['updated_at']) + state['updated_at'], + ) result.append(info) else: zone_info['host_name'] = '' @@ -68,8 +63,12 @@ def _xform_compute_availability_zone(az, include_extra): def _xform_volume_availability_zone(az): result = [] - zone_info = {} - _xform_common_availability_zone(az, zone_info) + zone_info = { + 'zone_name': az.name, + 'zone_status': ( + 'available' if az.state['available'] else 'not available' + ), + } result.append(zone_info) return result @@ -77,11 +76,11 @@ def _xform_volume_availability_zone(az): def _xform_network_availability_zone(az): result = [] zone_info = {} - zone_info['zone_name'] = getattr(az, 'name', '') - zone_info['zone_status'] = getattr(az, 'state', '') + zone_info['zone_name'] = az.name + zone_info['zone_status'] = az.state if 'unavailable' == zone_info['zone_status']: zone_info['zone_status'] = 'not available' - zone_info['zone_resource'] = getattr(az, 'resource', '') + zone_info['zone_resource'] = az.resource result.append(zone_info) return result @@ -90,7 +89,7 @@ class ListAvailabilityZone(command.Lister): _description = _("List availability zones and their status") def get_parser(self, prog_name): - parser = super(ListAvailabilityZone, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--compute', action='store_true', @@ -120,29 +119,30 @@ def get_parser(self, prog_name): def _get_compute_availability_zones(self, parsed_args): compute_client = self.app.client_manager.compute try: - data = compute_client.availability_zones.list() - except nova_exceptions.Forbidden: # policy doesn't allow + data = list(compute_client.availability_zones(details=True)) + except sdk_exceptions.ForbiddenException: # policy doesn't allow try: - data = compute_client.availability_zones.list(detailed=False) + data = compute_client.availability_zones(details=False) except Exception: raise - # Argh, the availability zones are not iterable... result = [] for zone in data: result += _xform_compute_availability_zone(zone, parsed_args.long) return result def _get_volume_availability_zones(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume data = [] try: - data = volume_client.availability_zones.list() + data = list(volume_client.availability_zones()) except Exception as e: LOG.debug('Volume availability zone exception: %s', e) if parsed_args.volume: - message = _("Availability zones list not supported by " - "Block Storage API") + message = _( + "Availability zones list not supported by " + "Block Storage API" + ) LOG.warning(message) result = [] @@ -154,13 +154,15 @@ def _get_network_availability_zones(self, parsed_args): network_client = self.app.client_manager.network try: # Verify that the extension exists. - network_client.find_extension('Availability Zone', - ignore_missing=False) + network_client.find_extension( + 'Availability Zone', ignore_missing=False + ) except Exception as e: LOG.debug('Network availability zone exception: ', e) if parsed_args.network: - message = _("Availability zones list not supported by " - "Network API") + message = _( + "Availability zones list not supported by Network API" + ) LOG.warning(message) return [] @@ -170,17 +172,21 @@ def _get_network_availability_zones(self, parsed_args): return result def take_action(self, parsed_args): - + columns: tuple[str, ...] = ('Zone Name', 'Zone Status') if parsed_args.long: - columns = ('Zone Name', 'Zone Status', 'Zone Resource', - 'Host Name', 'Service Name', 'Service Status') - else: - columns = ('Zone Name', 'Zone Status') + columns += ( + 'Zone Resource', + 'Host Name', + 'Service Name', + 'Service Status', + ) # Show everything by default. - show_all = (not parsed_args.compute and - not parsed_args.volume and - not parsed_args.network) + show_all = ( + not parsed_args.compute + and not parsed_args.volume + and not parsed_args.network + ) result = [] if parsed_args.compute or show_all: @@ -190,7 +196,7 @@ def take_action(self, parsed_args): if parsed_args.network or show_all: result += self._get_network_availability_zones(parsed_args) - return (columns, - (utils.get_dict_properties( - s, columns - ) for s in result)) + return ( + columns, + (utils.get_dict_properties(s, columns) for s in result), + ) diff --git a/openstackclient/common/clientmanager.py b/openstackclient/common/clientmanager.py index 1ed6aa2400..51911aaf99 100644 --- a/openstackclient/common/clientmanager.py +++ b/openstackclient/common/clientmanager.py @@ -15,18 +15,29 @@ """Manage access to the clients, including authenticating when needed.""" +import argparse +from collections.abc import Callable import importlib import logging import sys +import typing as ty +from osc_lib.cli import client_config from osc_lib import clientmanager from osc_lib import shell import stevedore +if ty.TYPE_CHECKING: + from keystoneauth1 import access as ksa_access + from openstack.compute.v2 import _proxy as compute_proxy + from openstack.image.v2 import _proxy as image_proxy + from openstack.network.v2 import _proxy as network_proxy + + from openstackclient.api import object_store_v1 LOG = logging.getLogger(__name__) -PLUGIN_MODULES = [] +PLUGIN_MODULES: list[ty.Any] = [] USER_AGENT = 'python-openstackclient' @@ -39,11 +50,23 @@ class ClientManager(clientmanager.ClientManager): in osc-lib so we need to maintain a transition period. """ - # A simple incrementing version for the plugin to know what is available - PLUGIN_INTERFACE_VERSION = "2" - - # Let the commands set this - _auth_required = False + if ty.TYPE_CHECKING: + # we know this will be set by us and will not be nullable + auth_ref: ksa_access.AccessInfo + + # this is a hack to keep mypy happy: the actual attributes are set in + # get_plugin_modules below + # TODO(stephenfin): Change the types of identity and volume once we've + # migrated everything to SDK. Hopefully by then we'll have figured out + # how to statically distinguish between the v2 and v3 versions of both + # services... + # TODO(stephenfin): We also need to migrate object storage... + compute: compute_proxy.Proxy + identity: ty.Any + image: image_proxy.Proxy + network: network_proxy.Proxy + object_store: object_store_v1.APIv1 + volume: ty.Any def __init__( self, @@ -51,7 +74,7 @@ def __init__( api_version=None, pw_func=None, ): - super(ClientManager, self).__init__( + super().__init__( cli_options=cli_options, api_version=api_version, pw_func=pw_func, @@ -77,44 +100,58 @@ def setup_auth(self): # CloudConfig.__init__() and we'll die if it was not # passed. if ( - self._auth_required and - self._cli_options._openstack_config is not None + self._auth_required + and self._cli_options._openstack_config is not None ): - self._cli_options._openstack_config._pw_callback = \ + if not isinstance( + self._cli_options._openstack_config, client_config.OSC_Config + ): + # programmer error + raise TypeError('unexpected type for _openstack_config') + + self._cli_options._openstack_config._pw_callback = ( shell.prompt_for_password + ) try: # We might already get auth from SDK caching if not self._cli_options._auth: - self._cli_options._auth = \ + self._cli_options._auth = ( self._cli_options._openstack_config.load_auth_plugin( self._cli_options.config, ) + ) except TypeError as e: self._fallback_load_auth_plugin(e) - return super(ClientManager, self).setup_auth() + return super().setup_auth() def _fallback_load_auth_plugin(self, e): # NOTES(RuiChen): Hack to avoid auth plugins choking on data they don't # expect, delete fake token and endpoint, then try to # load auth plugin again with user specified options. # We know it looks ugly, but it's necessary. - if self._cli_options.config['auth']['token'] == 'x': + if self._cli_options.config['auth']['token'] == 'x': # noqa: S105 # restore original auth_type - self._cli_options.config['auth_type'] = \ - self._original_auth_type + self._cli_options.config['auth_type'] = self._original_auth_type del self._cli_options.config['auth']['token'] del self._cli_options.config['auth']['endpoint'] - self._cli_options._auth = \ + + if not isinstance( + self._cli_options._openstack_config, client_config.OSC_Config + ): + # programmer error + raise TypeError('unexpected type for _openstack_config') + + self._cli_options._auth = ( self._cli_options._openstack_config.load_auth_plugin( self._cli_options.config, ) + ) else: raise e def is_network_endpoint_enabled(self): """Check if the network endpoint is enabled""" - # NOTE(dtroyer): is_service_available() can also return None if # there is no Service Catalog, callers here are # not expecting that so fold None into True to @@ -123,55 +160,72 @@ def is_network_endpoint_enabled(self): def is_compute_endpoint_enabled(self): """Check if Compute endpoint is enabled""" - return self.is_service_available('compute') is not False - def is_volume_endpoint_enabled(self, volume_client): + # TODO(stephenfin): Drop volume_client argument in OSC 8.0 or later. + def is_volume_endpoint_enabled(self, volume_client=None): """Check if volume endpoint is enabled""" - # NOTE(jcross): Cinder did some interesting things with their service - # name so we need to figure out which version to look - # for when calling is_service_available() - volume_version = volume_client.api_version.ver_major - if self.is_service_available( - "volumev%s" % volume_version) is not False: - return True - elif self.is_service_available('volume') is not False: - return True - else: - return False + # We check against the service type and all aliases defined by the + # Service Types Authority + # https://service-types.openstack.org/service-types.json + return ( + self.is_service_available('block-storage') is not False + or self.is_service_available('volume') is not False + or self.is_service_available('volumev3') is not False + or self.is_service_available('volumev2') is not False + or self.is_service_available('block-store') is not False + ) # Plugin Support +ArgumentParserT = ty.TypeVar('ArgumentParserT', bound=argparse.ArgumentParser) + + +@ty.runtime_checkable # Optional: allows usage with isinstance() +class PluginModule(ty.Protocol): + DEFAULT_API_VERSION: str + API_VERSION_OPTION: str + API_NAME: str + API_VERSIONS: tuple[str] + + make_client: Callable[..., ty.Any] + build_option_parser: Callable[[ArgumentParserT], ArgumentParserT] + check_api_version: Callable[[str], bool] + + +def _on_load_failure_callback( + manager: stevedore.ExtensionManager, + ep: importlib.metadata.EntryPoint, + err: BaseException, +) -> None: + sys.stderr.write( + f"WARNING: Failed to import plugin {ep.group}:{ep.name}: {err}.\n" + ) + + def get_plugin_modules(group): """Find plugin entry points""" mod_list = [] - mgr = stevedore.ExtensionManager(group) + mgr: stevedore.ExtensionManager[PluginModule] + mgr = stevedore.ExtensionManager( + group, on_load_failure_callback=_on_load_failure_callback + ) for ep in mgr: LOG.debug('Found plugin %s', ep.name) - # Different versions of stevedore use different - # implementations of EntryPoint from other libraries, which - # are not API-compatible. - try: - module_name = ep.entry_point.module_name - except AttributeError: - try: - module_name = ep.entry_point.module - except AttributeError: - module_name = ep.entry_point.value + module_name = ep.entry_point.module try: module = importlib.import_module(module_name) except Exception as err: sys.stderr.write( - "WARNING: Failed to import plugin %s: %s.\n" % (ep.name, err)) + f"WARNING: Failed to import plugin " + f"{ep.module_name}:{ep.name}: {err}.\n" + ) continue mod_list.append(module) - init_func = getattr(module, 'Initialize', None) - if init_func: - init_func('x') # Add the plugin to the ClientManager setattr( @@ -198,6 +252,8 @@ def build_plugin_option_parser(parser): 'openstack.cli.base', ) # Append list of external plugin modules -PLUGIN_MODULES.extend(get_plugin_modules( - 'openstack.cli.extension', -)) +PLUGIN_MODULES.extend( + get_plugin_modules( + 'openstack.cli.extension', + ) +) diff --git a/openstackclient/common/configuration.py b/openstackclient/common/configuration.py index cb4155059b..4637ad22b8 100644 --- a/openstackclient/common/configuration.py +++ b/openstackclient/common/configuration.py @@ -14,8 +14,8 @@ """Configuration action implementations""" from keystoneauth1.loading import base -from osc_lib.command import command +from openstackclient import command from openstackclient.i18n import _ REDACTED = "" @@ -27,7 +27,7 @@ class ShowConfiguration(command.ShowOne): auth_required = False def get_parser(self, prog_name): - parser = super(ShowConfiguration, self).get_parser(prog_name) + parser = super().get_parser(prog_name) mask_group = parser.add_mutually_exclusive_group() mask_group.add_argument( "--mask", @@ -53,7 +53,8 @@ def take_action(self, parsed_args): if getattr(self.app.client_manager, "auth_plugin_name", None): auth_plg_name = self.app.client_manager.auth_plugin_name secret_opts = [ - o.dest for o in base.get_plugin_options(auth_plg_name) + o.dest + for o in base.get_plugin_options(auth_plg_name) if o.secret ] diff --git a/openstackclient/common/envvars.py b/openstackclient/common/envvars.py new file mode 100644 index 0000000000..5f702ff3d4 --- /dev/null +++ b/openstackclient/common/envvars.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from openstackclient.i18n import _ + + +def bool_from_str(value, strict=False): + true_strings = ('1', 't', 'true', 'on', 'y', 'yes') + false_strings = ('0', 'f', 'false', 'off', 'n', 'no') + + if isinstance(value, bool): + return value + + lowered = value.strip().lower() + if lowered in true_strings: + return True + elif lowered in false_strings or not strict: + return False + + msg = _( + "Unrecognized value '%(value)s'; acceptable values are: %(valid)s" + ) % { + 'value': value, + 'valid': ', '.join( + f"'{s}'" for s in sorted(true_strings + false_strings) + ), + } + raise ValueError(msg) + + +def boolenv(*vars, default=False): + """Search for the first defined of possibly many bool-like env vars. + + Returns the first environment variable defined in vars, or returns the + default. + + :param vars: Arbitrary strings to search for. Case sensitive. + :param default: The default to return if no value found. + :returns: A boolean corresponding to the value found, else the default if + no value found. + """ + for v in vars: + value = os.environ.get(v, None) + if value: + return bool_from_str(value) + return default diff --git a/openstackclient/common/extension.py b/openstackclient/common/extension.py index 1ed2012c10..3f9b257bf3 100644 --- a/openstackclient/common/extension.py +++ b/openstackclient/common/extension.py @@ -17,20 +17,29 @@ import logging -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) +def _get_extension_columns(item): + column_map = { + 'updated': 'updated_at', + } + hidden_columns = ['id', 'links', 'location'] + return utils.get_osc_show_columns_for_sdk_resource( + item, column_map, hidden_columns + ) + + class ListExtension(command.Lister): _description = _("List API extensions") def get_parser(self, prog_name): - parser = super(ListExtension, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--compute', action='store_true', @@ -64,21 +73,21 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): + columns: tuple[str, ...] = ('Name', 'Alias', 'Description') if parsed_args.long: - columns = ('Name', 'Alias', 'Description', - 'Namespace', 'Updated', 'Links') - else: - columns = ('Name', 'Alias', 'Description') + columns += ('Namespace', 'Updated At', 'Links') data = [] # by default we want to show everything, unless the # user specifies one or more of the APIs to show # for now, only identity and compute are supported. - show_all = (not parsed_args.identity and - not parsed_args.compute and - not parsed_args.volume and - not parsed_args.network) + show_all = ( + not parsed_args.identity + and not parsed_args.compute + and not parsed_args.volume + and not parsed_args.network + ) if parsed_args.identity or show_all: identity_client = self.app.client_manager.identity @@ -89,7 +98,7 @@ def take_action(self, parsed_args): LOG.warning(message) if parsed_args.compute or show_all: - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute try: data += compute_client.extensions() except Exception: @@ -97,12 +106,13 @@ def take_action(self, parsed_args): LOG.warning(message) if parsed_args.volume or show_all: - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume try: - data += volume_client.list_extensions.show_all() + data += volume_client.extensions() except Exception: - message = _("Extensions list not supported by " - "Block Storage API") + message = _( + "Extensions list not supported by Block Storage API" + ) LOG.warning(message) if parsed_args.network or show_all: @@ -110,15 +120,17 @@ def take_action(self, parsed_args): try: data += network_client.extensions() except Exception: - message = _("Failed to retrieve extensions list " - "from Network API") + message = _( + "Failed to retrieve extensions list from Network API" + ) LOG.warning(message) extension_tuples = ( utils.get_item_properties( s, columns, - ) for s in data + ) + for s in data ) return (columns, extension_tuples) @@ -128,19 +140,26 @@ class ShowExtension(command.ShowOne): _description = _("Show API extension") def get_parser(self, prog_name): - parser = super(ShowExtension, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'extension', metavar='', - help=_('Extension to display. ' - 'Currently, only network extensions are supported. ' - '(Name or Alias)'), + help=_( + 'Extension to display. ' + 'Currently, only network extensions are supported. ' + '(Name or Alias)' + ), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - ext = str(parsed_args.extension) - obj = client.find_extension(ext, ignore_missing=False).to_dict() - return zip(*sorted(obj.items())) + extension = client.find_extension( + parsed_args.extension, + ignore_missing=False, + ) + + display_columns, columns = _get_extension_columns(extension) + data = utils.get_dict_properties(extension, columns) + return display_columns, data diff --git a/openstackclient/common/limits.py b/openstackclient/common/limits.py index 19db35d7df..6512e0fcd1 100644 --- a/openstackclient/common/limits.py +++ b/openstackclient/common/limits.py @@ -17,18 +17,43 @@ import itertools -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common +def _format_absolute_limit(absolute_limits): + info = {} + + for key in set(absolute_limits): + if key in ('id', 'name', 'location'): + continue + + info[key] = absolute_limits[key] + + return info + + +def _format_rate_limit(rate_limits): + # flatten this: + # + # {'uri': '', 'limit': [{'value': '', ...], ...} + # + # to this: + # + # {'uri': '', 'value': '', ...}, ...} + return itertools.chain( + *[[{'uri': x['uri'], **y} for y in x['limit']] for x in rate_limits] + ) + + class ShowLimits(command.Lister): _description = _("Show compute and block storage limits") def get_parser(self, prog_name): - parser = super(ShowLimits, self).get_parser(prog_name) + parser = super().get_parser(prog_name) type_group = parser.add_mutually_exclusive_group(required=True) type_group.add_argument( "--absolute", @@ -42,75 +67,107 @@ def get_parser(self, prog_name): dest="is_rate", action="store_true", default=False, - help=_("Show rate limits"), + help=_( + 'Show rate limits. This is not supported by the compute ' + 'service since the 12.0.0 (Liberty) release and is only ' + 'supported by the block storage service when the ' + 'rate-limiting middleware is enabled. It is therefore a no-op ' + 'in most deployments.' + ), ) parser.add_argument( "--reserved", dest="is_reserved", action="store_true", default=False, - help=_("Include reservations count [only valid with --absolute]"), + help=_("Include reservations count (only valid with --absolute)"), ) parser.add_argument( '--project', metavar='', - help=_('Show limits for a specific project (name or ID)' - ' [only valid with --absolute]'), + help=_( + 'Show limits for a specific project (name or ID) ' + '(only valid with --absolute)' + ), ) parser.add_argument( '--domain', metavar='', - help=_('Domain the project belongs to (name or ID)' - ' [only valid with --absolute]'), + help=_( + 'Domain the project belongs to (name or ID) ' + '(only valid with --absolute)' + ), ) return parser def take_action(self, parsed_args): - - compute_client = self.app.client_manager.compute - volume_client = self.app.client_manager.volume - project_id = None if parsed_args.project is not None: identity_client = self.app.client_manager.identity if parsed_args.domain is not None: - domain = identity_common.find_domain(identity_client, - parsed_args.domain) - project_id = utils.find_resource(identity_client.projects, - parsed_args.project, - domain_id=domain.id).id + domain = identity_common.find_domain( + identity_client, parsed_args.domain + ) + project_id = utils.find_resource( + identity_client.projects, + parsed_args.project, + domain_id=domain.id, + ).id else: - project_id = utils.find_resource(identity_client.projects, - parsed_args.project).id + project_id = utils.find_resource( + identity_client.projects, parsed_args.project + ).id compute_limits = None volume_limits = None if self.app.client_manager.is_compute_endpoint_enabled(): - compute_limits = compute_client.limits.get(parsed_args.is_reserved, - tenant_id=project_id) + compute_client = self.app.client_manager.compute + compute_limits = compute_client.get_limits( + reserved=parsed_args.is_reserved, tenant_id=project_id + ) - if self.app.client_manager.is_volume_endpoint_enabled(volume_client): - volume_limits = volume_client.limits.get() + if self.app.client_manager.is_volume_endpoint_enabled(): + volume_client = self.app.client_manager.sdk_connection.volume + volume_limits = volume_client.get_limits( + project=project_id, + ) - data = [] if parsed_args.is_absolute: + columns = ["Name", "Value"] + info = {} if compute_limits: - data.append(compute_limits.absolute) + info.update(_format_absolute_limit(compute_limits.absolute)) if volume_limits: - data.append(volume_limits.absolute) - columns = ["Name", "Value"] - return (columns, (utils.get_item_properties(s, columns) - for s in itertools.chain(*data))) + info.update(_format_absolute_limit(volume_limits.absolute)) - elif parsed_args.is_rate: + return (columns, sorted(info.items(), key=lambda x: x[0])) + else: # parsed_args.is_rate + data = [] if compute_limits: - data.append(compute_limits.rate) + data.extend(_format_rate_limit(compute_limits.rate)) if volume_limits: - data.append(volume_limits.rate) - columns = ["Verb", "URI", "Value", "Remain", "Unit", - "Next Available"] - return (columns, (utils.get_item_properties(s, columns) - for s in itertools.chain(*data))) - else: - return {}, {} + data.extend(_format_rate_limit(volume_limits.rate)) + columns = [ + "Verb", + "URI", + "Value", + "Remain", + "Unit", + "Next Available", + ] + + return ( + columns, + [ + ( + s['verb'], + s['uri'], + s['value'], + s['remaining'], + s['unit'], + s.get('next-available') or s['next_available'], + ) + for s in data + ], + ) diff --git a/openstackclient/common/module.py b/openstackclient/common/module.py index f55fdce048..6ca5dc2315 100644 --- a/openstackclient/common/module.py +++ b/openstackclient/common/module.py @@ -17,9 +17,9 @@ import sys -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -29,13 +29,15 @@ class ListCommand(command.Lister): auth_required = False def get_parser(self, prog_name): - parser = super(ListCommand, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--group', metavar='', - help=_('Show commands filtered by a command group, for example: ' - 'identity, volume, compute, image, network and ' - 'other keywords'), + help=_( + 'Show commands filtered by a command group, for example: ' + 'identity, volume, compute, image, network and ' + 'other keywords' + ), ) return parser @@ -46,7 +48,9 @@ def take_action(self, parsed_args): columns = ('Command Group', 'Commands') if parsed_args.group: - groups = (group for group in groups if parsed_args.group in group) + groups = sorted( + group for group in groups if parsed_args.group in group + ) commands = [] for group in groups: @@ -54,7 +58,6 @@ def take_action(self, parsed_args): command_names = sorted(command_names) if command_names != []: - # TODO(bapalm): Fix this when cliff properly supports # handling the detection rather than using the hard-code below. if parsed_args.formatter == 'table': @@ -71,7 +74,7 @@ class ListModule(command.ShowOne): auth_required = False def get_parser(self, prog_name): - parser = super(ListModule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all', action='store_true', @@ -81,7 +84,6 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - data = {} # Get module versions mods = sys.modules @@ -95,9 +97,12 @@ def take_action(self, parsed_args): # show for the default (not --all) invocation. # It should be just the things we actually care # about like client and plugin modules... - if (parsed_args.all or - # Handle xxxclient and openstacksdk - (k.endswith('client') or k == 'openstack')): + if ( + parsed_args.all + or + # Handle xxxclient and openstacksdk + (k.endswith('client') or k == 'openstack') + ): try: # NOTE(RuiChen): openstacksdk bug/1588823 exist, # no good way to add __version__ for @@ -108,7 +113,7 @@ def take_action(self, parsed_args): data[k] = mods[k].version.__version__ else: data[k] = mods[k].__version__ - except Exception: + except Exception: # noqa: S110 # Catch all exceptions, just skip it pass diff --git a/openstackclient/common/pagination.py b/openstackclient/common/pagination.py new file mode 100644 index 0000000000..b6a11c5f5f --- /dev/null +++ b/openstackclient/common/pagination.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from osc_lib.cli import parseractions + +from openstackclient.i18n import _ + + +# TODO(stephenfin): Consider moving these to osc-lib since they're broadly +# useful + + +def add_marker_pagination_option_to_parser(parser): + """Add marker-based pagination options to the parser. + + APIs that use marker-based paging use the marker and limit query parameters + to paginate through items in a collection. + + Marker-based pagination is often used in cases where the length of the + total set of items is either changing frequently, or where the total length + might not be known upfront. + """ + parser.add_argument( + '--limit', + metavar='', + type=int, + action=parseractions.NonNegativeAction, + help=_( + 'The maximum number of entries to return. If the value exceeds ' + 'the server-defined maximum, then the maximum value will be used.' + ), + ) + parser.add_argument( + '--marker', + metavar='', + default=None, + help=_( + 'The first position in the collection to return results from. ' + 'This should be a value that was returned in a previous request.' + ), + ) + + +def add_offset_pagination_option_to_parser(parser): + """Add offset-based pagination options to the parser. + + APIs that use offset-based paging use the offset and limit query parameters + to paginate through items in a collection. + + Offset-based pagination is often used where the list of items is of a fixed + and predetermined length. + """ + parser.add_argument( + '--limit', + metavar='', + type=int, + action=parseractions.NonNegativeAction, + help=_( + 'The maximum number of entries to return. If the value exceeds ' + 'the server-defined maximum, then the maximum value will be used.' + ), + ) + parser.add_argument( + '--offset', + metavar='', + type=int, + action=parseractions.NonNegativeAction, + default=None, + help=_( + 'The (zero-based) offset of the first item in the collection to ' + 'return.' + ), + ) diff --git a/openstackclient/common/progressbar.py b/openstackclient/common/progressbar.py index 7678aceba0..2852bb250a 100644 --- a/openstackclient/common/progressbar.py +++ b/openstackclient/common/progressbar.py @@ -16,7 +16,7 @@ import sys -class _ProgressBarBase(object): +class _ProgressBarBase: """A progress bar provider for a wrapped object. Base abstract class used by specific class wrapper to show @@ -38,9 +38,11 @@ def _display_progress_bar(self, size_read): if self._show_progress: self._percent += size_read / self._totalsize # Output something like this: [==========> ] 49% - sys.stdout.write('\r[{0:<30}] {1:.0%}'.format( - '=' * int(round(self._percent * 29)) + '>', self._percent - )) + sys.stdout.write( + '\r[{:<30}] {:.0%}'.format( + '=' * int(round(self._percent * 29)) + '>', self._percent + ) + ) sys.stdout.flush() def __getattr__(self, attr): diff --git a/openstackclient/common/project_cleanup.py b/openstackclient/common/project_cleanup.py index 1193051ab9..444f23ec2c 100644 --- a/openstackclient/common/project_cleanup.py +++ b/openstackclient/common/project_cleanup.py @@ -17,10 +17,11 @@ import logging import os import queue +import typing as ty from cliff.formatters import table -from osc_lib.command import command +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -35,8 +36,7 @@ def ask_user_yesno(msg): :return bool: User choice """ while True: - answer = getpass._raw_input( - '{} [{}]: '.format(msg, 'y/n')) + answer = getpass.getpass('{} [{}]: '.format(msg, 'y/n')) if answer in ('y', 'Y', 'yes'): return True elif answer in ('n', 'N', 'no'): @@ -47,59 +47,69 @@ class ProjectCleanup(command.Command): _description = _("Clean resources associated with a project") def get_parser(self, prog_name): - parser = super(ProjectCleanup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) action_group = parser.add_mutually_exclusive_group() action_group.add_argument( '--dry-run', action='store_true', - help=_("List a project's resources but do not delete them") + help=_("List a project's resources but do not delete them"), ) action_group.add_argument( '--auto-approve', action='store_true', - help=_("Delete resources without asking for confirmation") + help=_("Delete resources without asking for confirmation"), ) project_group = parser.add_mutually_exclusive_group(required=True) project_group.add_argument( '--auth-project', action='store_true', - help=_('Delete resources of the project used to authenticate') + help=_('Delete resources of the project used to authenticate'), ) project_group.add_argument( '--project', metavar='', - help=_('Project to clean (name or ID)') + help=_('Project to clean (name or ID)'), ) parser.add_argument( '--created-before', metavar='', - help=_('Only delete resources created before the given time') + help=_('Only delete resources created before the given time'), ) parser.add_argument( '--updated-before', metavar='', - help=_('Only delete resources updated before the given time') + help=_('Only delete resources updated before the given time'), + ) + parser.add_argument( + '--skip-resource', + metavar='', + help='Skip cleanup of specific resource (repeat if necessary)', + action='append', ) identity_common.add_project_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): - sdk = self.app.client_manager.sdk_connection + connection = self.app.client_manager.sdk_connection if parsed_args.auth_project: - project_connect = sdk + # is we've got a project already configured, use the connection + # as-is + pass elif parsed_args.project: - project = sdk.identity.find_project( - name_or_id=parsed_args.project, - ignore_missing=False) - project_connect = sdk.connect_as_project(project) - - if project_connect: - status_queue = queue.Queue() - parsed_args.max_width = int(os.environ.get('CLIFF_MAX_TERM_WIDTH', - 0)) - parsed_args.fit_width = bool(int(os.environ.get('CLIFF_FIT_WIDTH', - 0))) + project = connection.identity.find_project( + name_or_id=parsed_args.project, ignore_missing=False + ) + connection = connection.connect_as_project(project) + + if connection: + status_queue: queue.Queue[ty.Any] = queue.Queue() + parsed_args.max_width = int( + os.environ.get('CLIFF_MAX_TERM_WIDTH', 0) + ) + parsed_args.fit_width = bool( + int(os.environ.get('CLIFF_FIT_WIDTH', 0)) + ) parsed_args.print_empty = False table_fmt = table.TableFormatter() @@ -112,22 +122,23 @@ def take_action(self, parsed_args): if parsed_args.updated_before: filters['updated_at'] = parsed_args.updated_before - project_connect.project_cleanup(dry_run=True, - status_queue=status_queue, - filters=filters) + connection.project_cleanup( + dry_run=True, + status_queue=status_queue, + filters=filters, + skip_resources=parsed_args.skip_resource, + ) data = [] while not status_queue.empty(): resource = status_queue.get_nowait() data.append( - (type(resource).__name__, resource.id, resource.name)) + (type(resource).__name__, resource.id, resource.name) + ) status_queue.task_done() status_queue.join() table_fmt.emit_list( - ('Type', 'ID', 'Name'), - data, - self.app.stdout, - parsed_args + ('Type', 'ID', 'Name'), data, self.app.stdout, parsed_args ) if parsed_args.dry_run: @@ -135,11 +146,15 @@ def take_action(self, parsed_args): if not parsed_args.auto_approve: if not ask_user_yesno( - _("These resources will be deleted. Are you sure")): + _("These resources will be deleted. Are you sure") + ): return self.log.warning(_('Deleting resources')) - project_connect.project_cleanup(dry_run=False, - status_queue=status_queue, - filters=filters) + connection.project_cleanup( + dry_run=False, + status_queue=status_queue, + filters=filters, + skip_resources=parsed_args.skip_resource, + ) diff --git a/openstackclient/common/project_purge.py b/openstackclient/common/project_purge.py deleted file mode 100644 index 76ed4563b0..0000000000 --- a/openstackclient/common/project_purge.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import logging - -from osc_lib.command import command -from osc_lib import utils - -from openstackclient.i18n import _ -from openstackclient.identity import common as identity_common - - -LOG = logging.getLogger(__name__) - - -class ProjectPurge(command.Command): - _description = _("Clean resources associated with a project") - - def get_parser(self, prog_name): - parser = super(ProjectPurge, self).get_parser(prog_name) - parser.add_argument( - '--dry-run', - action='store_true', - help=_("List a project's resources"), - ) - parser.add_argument( - '--keep-project', - action='store_true', - help=_("Clean project resources, but don't delete the project"), - ) - project_group = parser.add_mutually_exclusive_group(required=True) - project_group.add_argument( - '--auth-project', - action='store_true', - help=_('Delete resources of the project used to authenticate'), - ) - project_group.add_argument( - '--project', - metavar='', - help=_('Project to clean (name or ID)'), - ) - identity_common.add_project_domain_option_to_parser(parser) - return parser - - def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - - if parsed_args.auth_project: - project_id = self.app.client_manager.auth_ref.project_id - elif parsed_args.project: - try: - project_id = identity_common.find_project( - identity_client, - parsed_args.project, - parsed_args.project_domain, - ).id - except AttributeError: # using v2 auth and supplying a domain - project_id = utils.find_resource( - identity_client.tenants, - parsed_args.project, - ).id - - # delete all non-identity resources - self.delete_resources(parsed_args.dry_run, project_id) - - # clean up the project - if not parsed_args.keep_project: - LOG.warning(_('Deleting project: %s'), project_id) - if not parsed_args.dry_run: - identity_client.projects.delete(project_id) - - def delete_resources(self, dry_run, project_id): - # servers - try: - compute_client = self.app.client_manager.compute - search_opts = {'tenant_id': project_id, 'all_tenants': True} - data = compute_client.servers.list(search_opts=search_opts) - self.delete_objects( - compute_client.servers.delete, data, 'server', dry_run) - except Exception: - pass - - # images - try: - image_client = self.app.client_manager.image - api_version = int(image_client.version) - if api_version == 1: - data = image_client.images.list(owner=project_id) - elif api_version == 2: - kwargs = {'filters': {'owner': project_id}} - data = image_client.images.list(**kwargs) - else: - raise NotImplementedError - self.delete_objects( - image_client.images.delete, data, 'image', dry_run) - except Exception: - pass - - # volumes, snapshots, backups - volume_client = self.app.client_manager.volume - search_opts = {'project_id': project_id, 'all_tenants': True} - try: - data = volume_client.volume_snapshots.list(search_opts=search_opts) - self.delete_objects( - self.delete_one_volume_snapshot, - data, - 'volume snapshot', - dry_run) - except Exception: - pass - try: - data = volume_client.backups.list(search_opts=search_opts) - self.delete_objects( - self.delete_one_volume_backup, - data, - 'volume backup', - dry_run) - except Exception: - pass - try: - data = volume_client.volumes.list(search_opts=search_opts) - self.delete_objects( - volume_client.volumes.force_delete, data, 'volume', dry_run) - except Exception: - pass - - def delete_objects(self, func_delete, data, resource, dry_run): - result = 0 - for i in data: - LOG.warning(_('Deleting %(resource)s : %(id)s') % - {'resource': resource, 'id': i.id}) - if not dry_run: - try: - func_delete(i.id) - except Exception as e: - result += 1 - LOG.error(_("Failed to delete %(resource)s with " - "ID '%(id)s': %(e)s") - % {'resource': resource, 'id': i.id, 'e': e}) - if result > 0: - total = len(data) - msg = (_("%(result)s of %(total)s %(resource)ss failed " - "to delete.") % - {'result': result, - 'total': total, - 'resource': resource}) - LOG.error(msg) - - def delete_one_volume_snapshot(self, snapshot_id): - volume_client = self.app.client_manager.volume - try: - volume_client.volume_snapshots.delete(snapshot_id) - except Exception: - # Only volume v2 support deleting by force - volume_client.volume_snapshots.delete(snapshot_id, force=True) - - def delete_one_volume_backup(self, backup_id): - volume_client = self.app.client_manager.volume - try: - volume_client.backups.delete(backup_id) - except Exception: - # Only volume v2 support deleting by force - volume_client.backups.delete(backup_id, force=True) diff --git a/openstackclient/common/quota.py b/openstackclient/common/quota.py index 670451e218..6d0025a754 100644 --- a/openstackclient/common/quota.py +++ b/openstackclient/common/quota.py @@ -11,7 +11,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# """Quota action implementations""" @@ -19,10 +18,13 @@ import itertools import logging import sys +import typing as ty -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions +from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.network import common @@ -33,7 +35,6 @@ COMPUTE_QUOTAS = { 'cores': 'cores', - 'fixed_ips': 'fixed-ips', 'injected_file_content_bytes': 'injected-file-size', 'injected_file_path_bytes': 'injected-path-size', 'injected_files': 'injected-files', @@ -41,8 +42,8 @@ 'key_pairs': 'key-pairs', 'metadata_items': 'properties', 'ram': 'ram', - 'server_groups': 'server-groups', 'server_group_members': 'server-group-members', + 'server_groups': 'server-groups', } VOLUME_QUOTAS = { @@ -61,6 +62,7 @@ ] NOVA_NETWORK_QUOTAS = { + 'fixed_ips': 'fixed-ips', 'floating_ips': 'floating-ips', 'security_group_rules': 'secgroup-rules', 'security_groups': 'secgroups', @@ -104,11 +106,8 @@ def _xform_get_quota(data, value, keys): def get_project(app, project): if project is not None: - identity_client = app.client_manager.identity - project = utils.find_resource( - identity_client.projects, - project, - ) + identity_client = app.client_manager.sdk_connection.identity + project = identity_client.find_project(project, ignore_missing=False) project_id = project.id project_name = project.name elif app.client_manager.auth_ref: @@ -130,88 +129,88 @@ def get_compute_quotas( app, project_id, *, - quota_class=False, detail=False, default=False, ): try: client = app.client_manager.compute - if quota_class: - # NOTE(stephenfin): The 'project' argument here could be anything - # as the nova API doesn't care what you pass in. We only pass the - # project in to avoid weirding people out :) - quota = client.quota_classes.get(project_id) - elif default: - quota = client.quotas.defaults(project_id) + if default: + quota = client.get_quota_set_defaults(project_id) else: - quota = client.quotas.get(project_id, detail=detail) - except Exception as e: - if type(e).__name__ == 'EndpointNotFound': - return {} - raise - return quota._info + quota = client.get_quota_set(project_id, usage=detail) + except sdk_exceptions.EndpointNotFound: + return {} + data = quota.to_dict() + if not detail: + del data['usage'] + del data['reservation'] + return data def get_volume_quotas( app, project_id, *, - quota_class=False, detail=False, default=False, ): try: - client = app.client_manager.volume - if quota_class: - quota = client.quota_classes.get(project_id) - elif default: - quota = client.quotas.defaults(project_id) + client = app.client_manager.sdk_connection.volume + if default: + quota = client.get_quota_set_defaults(project_id) else: - quota = client.quotas.get(project_id, usage=detail) - except Exception as e: - if type(e).__name__ == 'EndpointNotFound': - return {} - else: - raise - return quota._info + quota = client.get_quota_set(project_id, usage=detail) + except sdk_exceptions.EndpointNotFound: + return {} + data = quota.to_dict() + if not detail: + del data['usage'] + del data['reservation'] + return data def get_network_quotas( app, project_id, *, - quota_class=False, detail=False, default=False, ): def _network_quota_to_dict(network_quota, detail=False): - if type(network_quota) is not dict: - dict_quota = network_quota.to_dict() - else: - dict_quota = network_quota - - result = {} - + dict_quota = network_quota.to_dict(computed=False) + + if not detail: + return dict_quota + + # Neutron returns quota details in dict which is in format like: + # {'resource_name': {'in_use': X, 'limit': Y, 'reserved': Z}, + # 'resource_name_2': {'in_use': X2, 'limit': Y2, 'reserved': Z2}} + # + # but Nova and Cinder returns quota in different format, like: + # {'resource_name': X, + # 'resource_name_2': X2, + # 'usage': { + # 'resource_name': Y, + # 'resource_name_2': Y2 + # }, + # 'reserved': { + # 'resource_name': Z, + # 'resource_name_2': Z2 + # }} + # + # so we need to make conversion to have data in same format from + # all of the services + result: dict[str, ty.Any] = {"usage": {}, "reservation": {}} for key, values in dict_quota.items(): if values is None: continue - - # NOTE(slaweq): Neutron returns values with key "used" but Nova for - # example returns same data with key "in_use" instead. Because of - # that we need to convert Neutron key to the same as is returned - # from Nova to make result more consistent - if isinstance(values, dict) and 'used' in values: - values['in_use'] = values.pop("used") - - result[key] = values + if isinstance(values, dict): + result[key] = values['limit'] + result["reservation"][key] = values['reserved'] + result["usage"][key] = values['used'] return result - # neutron doesn't have the concept of quota classes and if we're using - # nova-network we already fetched this - if quota_class: - return {} - # we have nothing to return if we are not using neutron if not app.client_manager.is_network_endpoint_enabled(): return {} @@ -227,34 +226,14 @@ def _network_quota_to_dict(network_quota, detail=False): class ListQuota(command.Lister): - _description = _( - "List quotas for all projects with non-default quota values or " - "list detailed quota information for requested project" - ) + """List quotas for all projects with non-default quota values. + + Empty output means all projects are using default quotas, which can be + inspected with 'openstack quota show --default'. + """ def get_parser(self, prog_name): parser = super().get_parser(prog_name) - # TODO(stephenfin): Remove in OSC 8.0 - parser.add_argument( - '--project', - metavar='', - help=_( - "**Deprecated** List quotas for this project " - "(name or ID). " - "Use 'quota show' instead." - ), - ) - # TODO(stephenfin): Remove in OSC 8.0 - parser.add_argument( - '--detail', - dest='detail', - action='store_true', - default=False, - help=_( - "**Deprecated** Show details about quotas usage. " - "Use 'quota show --usage' instead." - ), - ) option = parser.add_mutually_exclusive_group(required=True) option.add_argument( '--compute', @@ -276,64 +255,121 @@ def get_parser(self, prog_name): ) return parser - def _get_detailed_quotas(self, parsed_args): - project_info = get_project(self.app, parsed_args.project) - project = project_info['id'] + def _list_quota_compute(self, parsed_args, project_ids): + compute_client = self.app.client_manager.compute + result = [] - quotas = {} + for project_id in project_ids: + try: + project_data = compute_client.get_quota_set(project_id) + # NOTE(stephenfin): Unfortunately, Nova raises a HTTP 400 (Bad + # Request) if the project ID is invalid, even though the project + # ID is actually the resource's identifier which would normally + # lead us to expect a HTTP 404 (Not Found). + except ( + sdk_exceptions.BadRequestException, + sdk_exceptions.ForbiddenException, + sdk_exceptions.NotFoundException, + ) as exc: + # Project not found, move on to next one + LOG.warning(f"Project {project_id} not found: {exc}") + continue - if parsed_args.compute: - quotas.update( - get_compute_quotas( - self.app, - project, - detail=parsed_args.detail, - ) + project_result = _xform_get_quota( + project_data, + project_id, + COMPUTE_QUOTAS.keys(), ) - if parsed_args.network: - quotas.update( - get_network_quotas( - self.app, - project, - detail=parsed_args.detail, - ) + default_data = compute_client.get_quota_set_defaults(project_id) + default_result = _xform_get_quota( + default_data, + project_id, + COMPUTE_QUOTAS.keys(), ) - if parsed_args.volume: - quotas.update( - get_volume_quotas( - self.app, - parsed_args, - detail=parsed_args.detail, - ), - ) + if default_result != project_result: + result += project_result + + columns: tuple[str, ...] = ( + 'id', + 'cores', + 'injected_files', + 'injected_file_content_bytes', + 'injected_file_path_bytes', + 'instances', + 'key_pairs', + 'metadata_items', + 'ram', + 'server_groups', + 'server_group_members', + ) + column_headers: tuple[str, ...] = ( + 'Project ID', + 'Cores', + 'Injected Files', + 'Injected File Content Bytes', + 'Injected File Path Bytes', + 'Instances', + 'Key Pairs', + 'Metadata Items', + 'Ram', + 'Server Groups', + 'Server Group Members', + ) + return ( + column_headers, + (utils.get_dict_properties(s, columns) for s in result), + ) + def _list_quota_volume(self, parsed_args, project_ids): + volume_client = self.app.client_manager.sdk_connection.volume result = [] - for resource, values in quotas.items(): - # NOTE(slaweq): there is no detailed quotas info for some resources - # and it shouldn't be displayed here - if isinstance(values, dict): - result.append( - { - 'resource': resource, - 'in_use': values.get('in_use'), - 'reserved': values.get('reserved'), - 'limit': values.get('limit'), - } - ) - columns = ( - 'resource', - 'in_use', - 'reserved', - 'limit', + for project_id in project_ids: + try: + project_data = volume_client.get_quota_set(project_id) + except ( + sdk_exceptions.ForbiddenException, + sdk_exceptions.NotFoundException, + ) as exc: + # Project not found, move on to next one + LOG.warning(f"Project {project_id} not found: {exc}") + continue + + project_result = _xform_get_quota( + project_data, + project_id, + VOLUME_QUOTAS.keys(), + ) + + default_data = volume_client.get_quota_set_defaults(project_id) + default_result = _xform_get_quota( + default_data, + project_id, + VOLUME_QUOTAS.keys(), + ) + + if default_result != project_result: + result += project_result + + columns: tuple[str, ...] = ( + 'id', + 'backups', + 'backup_gigabytes', + 'gigabytes', + 'per_volume_gigabytes', + 'snapshots', + 'volumes', ) - column_headers = ( - 'Resource', - 'In Use', - 'Reserved', - 'Limit', + column_headers: tuple[str, ...] = ( + 'Project ID', + 'Backups', + 'Backup Gigabytes', + 'Gigabytes', + 'Per Volume Gigabytes', + 'Snapshots', + 'Volumes', ) return ( @@ -341,214 +377,80 @@ def _get_detailed_quotas(self, parsed_args): (utils.get_dict_properties(s, columns) for s in result), ) - def take_action(self, parsed_args): - if parsed_args.detail: - msg = _( - "The --detail option has been deprecated. " - "Use 'openstack quota show --usage' instead." - ) - self.log.warning(msg) - elif parsed_args.project: # elif to avoid being too noisy - msg = _( - "The --project option has been deprecated. " - "Use 'openstack quota show' instead." - ) - self.log.warning(msg) - + def _list_quota_network(self, parsed_args, project_ids): + network_client = self.app.client_manager.network result = [] - project_ids = [] - if parsed_args.project is None: - for p in self.app.client_manager.identity.projects.list(): - project_ids.append(getattr(p, 'id', '')) - else: - identity_client = self.app.client_manager.identity - project = utils.find_resource( - identity_client.projects, - parsed_args.project, - ) - project_ids.append(getattr(project, 'id', '')) - if parsed_args.compute: - if parsed_args.detail: - return self._get_detailed_quotas(parsed_args) + for project_id in project_ids: + try: + project_data = network_client.get_quota(project_id) + except ( + sdk_exceptions.NotFoundException, + sdk_exceptions.ForbiddenException, + ) as exc: + # Project not found, move on to next one + LOG.warning(f"Project {project_id} not found: {exc}") + continue - compute_client = self.app.client_manager.compute - for p in project_ids: - try: - data = compute_client.quotas.get(p) - except Exception as ex: - if ( - type(ex).__name__ == 'NotFound' - or ex.http_status >= 400 - and ex.http_status <= 499 - ): - # Project not found, move on to next one - LOG.warning("Project %s not found: %s" % (p, ex)) - continue - else: - raise - - result_data = _xform_get_quota( - data, - p, - COMPUTE_QUOTAS.keys(), - ) - default_data = compute_client.quotas.defaults(p) - result_default = _xform_get_quota( - default_data, - p, - COMPUTE_QUOTAS.keys(), - ) - if result_default != result_data: - result += result_data - - columns = ( - 'id', - 'cores', - 'fixed_ips', - 'injected_files', - 'injected_file_content_bytes', - 'injected_file_path_bytes', - 'instances', - 'key_pairs', - 'metadata_items', - 'ram', - 'server_groups', - 'server_group_members', - ) - column_headers = ( - 'Project ID', - 'Cores', - 'Fixed IPs', - 'Injected Files', - 'Injected File Content Bytes', - 'Injected File Path Bytes', - 'Instances', - 'Key Pairs', - 'Metadata Items', - 'Ram', - 'Server Groups', - 'Server Group Members', - ) - return ( - column_headers, - (utils.get_dict_properties(s, columns) for s in result), + project_result = _xform_get_quota( + project_data, + project_id, + NETWORK_KEYS, ) - if parsed_args.volume: - if parsed_args.detail: - return self._get_detailed_quotas(parsed_args) - - volume_client = self.app.client_manager.volume - for p in project_ids: - try: - data = volume_client.quotas.get(p) - except Exception as ex: - if type(ex).__name__ == 'NotFound': - # Project not found, move on to next one - LOG.warning("Project %s not found: %s" % (p, ex)) - continue - else: - raise - - result_data = _xform_get_quota( - data, - p, - VOLUME_QUOTAS.keys(), - ) - default_data = volume_client.quotas.defaults(p) - result_default = _xform_get_quota( - default_data, - p, - VOLUME_QUOTAS.keys(), - ) - if result_default != result_data: - result += result_data - - columns = ( - 'id', - 'backups', - 'backup_gigabytes', - 'gigabytes', - 'per_volume_gigabytes', - 'snapshots', - 'volumes', - ) - column_headers = ( - 'Project ID', - 'Backups', - 'Backup Gigabytes', - 'Gigabytes', - 'Per Volume Gigabytes', - 'Snapshots', - 'Volumes', + default_data = network_client.get_quota_default(project_id) + default_result = _xform_get_quota( + default_data, + project_id, + NETWORK_KEYS, ) - return ( - column_headers, - (utils.get_dict_properties(s, columns) for s in result), - ) + if default_result != project_result: + result += project_result + + columns: tuple[str, ...] = ( + 'id', + 'floating_ips', + 'networks', + 'ports', + 'rbac_policies', + 'routers', + 'security_groups', + 'security_group_rules', + 'subnets', + 'subnet_pools', + ) + column_headers: tuple[str, ...] = ( + 'Project ID', + 'Floating IPs', + 'Networks', + 'Ports', + 'RBAC Policies', + 'Routers', + 'Security Groups', + 'Security Group Rules', + 'Subnets', + 'Subnet Pools', + ) - if parsed_args.network: - if parsed_args.detail: - return self._get_detailed_quotas(parsed_args) - - client = self.app.client_manager.network - for p in project_ids: - try: - data = client.get_quota(p) - except Exception as ex: - if type(ex).__name__ == 'NotFound': - # Project not found, move on to next one - LOG.warning("Project %s not found: %s" % (p, ex)) - continue - else: - raise - - result_data = _xform_get_quota( - data, - p, - NETWORK_KEYS, - ) - default_data = client.get_quota_default(p) - result_default = _xform_get_quota( - default_data, - p, - NETWORK_KEYS, - ) - if result_default != result_data: - result += result_data - - columns = ( - 'id', - 'floating_ips', - 'networks', - 'ports', - 'rbac_policies', - 'routers', - 'security_groups', - 'security_group_rules', - 'subnets', - 'subnet_pools', - ) - column_headers = ( - 'Project ID', - 'Floating IPs', - 'Networks', - 'Ports', - 'RBAC Policies', - 'Routers', - 'Security Groups', - 'Security Group Rules', - 'Subnets', - 'Subnet Pools', - ) + return ( + column_headers, + (utils.get_dict_properties(s, columns) for s in result), + ) - return ( - column_headers, - (utils.get_dict_properties(s, columns) for s in result), - ) + def take_action(self, parsed_args): + project_ids = [ + p.id + for p in self.app.client_manager.sdk_connection.identity.projects() + ] + if parsed_args.compute: + return self._list_quota_compute(parsed_args, project_ids) + elif parsed_args.volume: + return self._list_quota_volume(parsed_args, project_ids) + elif parsed_args.network: + return self._list_quota_network(parsed_args, project_ids) + # will never get here return ((), ()) @@ -590,14 +492,19 @@ def _build_options_list(self): return rets def get_parser(self, prog_name): - parser = super(SetQuota, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'project', metavar='', - help=_('Set quotas for this project or class (name or ID)'), + nargs='?', + help=_( + 'Set quotas for this project or class (name or ID) ' + '(defaults to current project)' + ), ) # TODO(stephenfin): Remove in OSC 8.0 - parser.add_argument( + type_group = parser.add_mutually_exclusive_group() + type_group.add_argument( '--class', dest='quota_class', action='store_true', @@ -609,10 +516,17 @@ def get_parser(self, prog_name): '(compute and volume only)' ), ) + type_group.add_argument( + '--default', + dest='default', + action='store_true', + default=False, + help=_('Set default quotas for '), + ) for k, v, h in self._build_options_list(): parser.add_argument( - '--%s' % v, - metavar='<%s>' % v, + f'--{v}', + metavar=f'<{v}>', dest=k, type=int, help=h, @@ -627,22 +541,19 @@ def get_parser(self, prog_name): '--force', action='store_true', dest='force', - # TODO(stephenfin): Change the default to False in Z or later - default=None, + default=False, help=_( - 'Force quota update (only supported by compute and network) ' - '(default for network)' + 'Force quota update (only supported by compute and network)' ), ) force_group.add_argument( '--no-force', action='store_false', dest='force', - default=None, + default=False, help=_( 'Do not force quota update ' - '(only supported by compute and network) ' - '(default for compute)' + '(only supported by compute and network) (default)' ), ) # kept here for backwards compatibility/to keep the neutron folks happy @@ -650,7 +561,7 @@ def get_parser(self, prog_name): '--check-limit', action='store_false', dest='force', - default=None, + default=False, help=argparse.SUPPRESS, ) return parser @@ -662,67 +573,75 @@ def take_action(self, parsed_args): "never fully implemented and the compute and volume services " "only support a single 'default' quota class while the " "network service does not support quota classes at all. " - "Please use 'openstack quota show --default' instead." + "Please use 'openstack quota set --default' instead." ) self.log.warning(msg) - identity_client = self.app.client_manager.identity - compute_client = self.app.client_manager.compute - volume_client = self.app.client_manager.volume + if ( + parsed_args.quota_class or parsed_args.default + ) and parsed_args.force: + msg = _('--force cannot be used with --class or --default') + raise exceptions.CommandError(msg) + compute_kwargs = {} - for k, v in COMPUTE_QUOTAS.items(): - value = getattr(parsed_args, k, None) - if value is not None: - compute_kwargs[k] = value + volume_kwargs = {} + network_kwargs = {} - if parsed_args.force is not None: - compute_kwargs['force'] = parsed_args.force + if self.app.client_manager.is_compute_endpoint_enabled(): + compute_client = self.app.client_manager.compute - volume_kwargs = {} - for k, v in VOLUME_QUOTAS.items(): - value = getattr(parsed_args, k, None) - if value is not None: - if parsed_args.volume_type and k in IMPACT_VOLUME_TYPE_QUOTAS: - k = k + '_%s' % parsed_args.volume_type - volume_kwargs[k] = value + for k, v in COMPUTE_QUOTAS.items(): + value = getattr(parsed_args, k, None) + if value is not None: + compute_kwargs[k] = value - network_kwargs = {} - if parsed_args.force is True: - # Unlike compute, network doesn't provide a simple boolean option. - # Instead, it provides two options: 'force' and 'check_limit' - # (a.k.a. 'not force') - network_kwargs['force'] = True - elif parsed_args.force is False: - network_kwargs['check_limit'] = True - else: - msg = _( - "This command currently defaults to '--force' when modifying " - "network quotas. This behavior will change in a future " - "release. Consider explicitly providing '--force' or " - "'--no-force' options to avoid changes in behavior." - ) - self.log.warning(msg) + if compute_kwargs and parsed_args.force is True: + compute_kwargs['force'] = parsed_args.force + + if self.app.client_manager.is_volume_endpoint_enabled(): + volume_client = self.app.client_manager.sdk_connection.volume + + for k, v in VOLUME_QUOTAS.items(): + value = getattr(parsed_args, k, None) + if value is not None: + if ( + parsed_args.volume_type + and k in IMPACT_VOLUME_TYPE_QUOTAS + ): + k = k + f'_{parsed_args.volume_type}' + volume_kwargs[k] = value if self.app.client_manager.is_network_endpoint_enabled(): + network_client = self.app.client_manager.network + for k, v in NETWORK_QUOTAS.items(): value = getattr(parsed_args, k, None) if value is not None: network_kwargs[k] = value - else: + elif self.app.client_manager.is_compute_endpoint_enabled(): for k, v in NOVA_NETWORK_QUOTAS.items(): value = getattr(parsed_args, k, None) if value is not None: compute_kwargs[k] = value - if parsed_args.quota_class: + if network_kwargs: + if parsed_args.force is True: + # Unlike compute, network doesn't provide a simple boolean + # option. Instead, it provides two options: 'force' and + # 'check_limit' (a.k.a. 'not force') + network_kwargs['force'] = True + else: + network_kwargs['check_limit'] = True + + if parsed_args.quota_class or parsed_args.default: if compute_kwargs: - compute_client.quota_classes.update( - parsed_args.project, + compute_client.update_quota_class_set( + parsed_args.project or 'default', **compute_kwargs, ) if volume_kwargs: - volume_client.quota_classes.update( - parsed_args.project, + volume_client.update_quota_class_set( + parsed_args.project or 'default', **volume_kwargs, ) if network_kwargs: @@ -730,57 +649,40 @@ def take_action(self, parsed_args): "Network quotas are ignored since quota classes are not " "supported." ) - else: - project = utils.find_resource( - identity_client.projects, - parsed_args.project, - ).id - if compute_kwargs: - compute_client.quotas.update(project, **compute_kwargs) - if volume_kwargs: - volume_client.quotas.update(project, **volume_kwargs) - if ( - network_kwargs - and self.app.client_manager.is_network_endpoint_enabled() - ): - network_client = self.app.client_manager.network - network_client.update_quota(project, **network_kwargs) + return + + project_info = get_project(self.app, parsed_args.project) + project = project_info['id'] + + if compute_kwargs: + compute_client.update_quota_set(project, **compute_kwargs) + if volume_kwargs: + volume_client.update_quota_set(project, **volume_kwargs) + if network_kwargs: + network_client.update_quota(project, **network_kwargs) class ShowQuota(command.Lister): - _description = _("""Show quotas for project or class. + _description = _( + """Show quotas for project or class. Specify ``--os-compute-api-version 2.50`` or higher to see ``server-groups`` -and ``server-group-members`` output for a given quota class.""") +and ``server-group-members`` output for a given quota class.""" + ) def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( 'project', - metavar='', + metavar='', nargs='?', help=_( - 'Show quotas for this project or class (name or ID) ' + 'Show quotas for this project (name or ID) ' '(defaults to current project)' ), ) type_group = parser.add_mutually_exclusive_group() - # TODO(stephenfin): Remove in OSC 8.0 - type_group.add_argument( - '--class', - dest='quota_class', - action='store_true', - default=False, - help=_( - '**Deprecated** Show quotas for . ' - 'Deprecated as quota classes were never fully implemented ' - 'and only the default class is supported. ' - 'Use --default instead which is also supported by the network ' - 'service. ' - '(compute and volume only)' - ), - ) type_group.add_argument( '--default', dest='default', @@ -832,20 +734,8 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - project = parsed_args.project - - if parsed_args.quota_class: - msg = _( - "The '--class' option has been deprecated. Quota classes were " - "never fully implemented and the compute and volume services " - "only support a single 'default' quota class while the " - "network service does not support quota classes at all. " - "Please use 'openstack quota show --default' instead." - ) - self.log.warning(msg) - else: - project_info = get_project(self.app, parsed_args.project) - project = project_info['id'] + project_info = get_project(self.app, parsed_args.project) + project = project_info['id'] compute_quota_info = {} volume_quota_info = {} @@ -856,69 +746,105 @@ def take_action(self, parsed_args): # values if the project or class does not exist. This is expected # behavior. However, we have already checked for the presence of the # project above so it shouldn't be an issue. - if parsed_args.service in {'all', 'compute'}: + if parsed_args.service == 'compute' or ( + parsed_args.service == 'all' + and self.app.client_manager.is_compute_endpoint_enabled() + ): compute_quota_info = get_compute_quotas( self.app, project, detail=parsed_args.usage, - quota_class=parsed_args.quota_class, default=parsed_args.default, ) - if parsed_args.service in {'all', 'volume'}: + + if parsed_args.service == 'volume' or ( + parsed_args.service == 'all' + and self.app.client_manager.is_volume_endpoint_enabled() + ): volume_quota_info = get_volume_quotas( self.app, project, detail=parsed_args.usage, - quota_class=parsed_args.quota_class, default=parsed_args.default, ) - if parsed_args.service in {'all', 'network'}: + + if parsed_args.service == 'network' or ( + parsed_args.service == 'all' + and self.app.client_manager.is_network_endpoint_enabled() + ): network_quota_info = get_network_quotas( self.app, project, detail=parsed_args.usage, - quota_class=parsed_args.quota_class, default=parsed_args.default, ) info = {} + if parsed_args.usage: + info["reservation"] = compute_quota_info.pop("reservation", {}) + info["reservation"].update( + volume_quota_info.pop("reservation", {}) + ) + info["reservation"].update( + network_quota_info.pop("reservation", {}) + ) + + info["usage"] = compute_quota_info.pop("usage", {}) + info["usage"].update(volume_quota_info.pop("usage", {})) + info["usage"].update(network_quota_info.pop("usage", {})) + info.update(compute_quota_info) info.update(volume_quota_info) info.update(network_quota_info) - # Map the internal quota names to the external ones - # COMPUTE_QUOTAS and NETWORK_QUOTAS share floating-ips, - # secgroup-rules and secgroups as dict value, so when - # neutron is enabled, quotas of these three resources - # in nova will be replaced by neutron's. - for k, v in itertools.chain( - COMPUTE_QUOTAS.items(), - NOVA_NETWORK_QUOTAS.items(), - VOLUME_QUOTAS.items(), - NETWORK_QUOTAS.items(), - ): - if not k == v and info.get(k) is not None: - info[v] = info[k] - info.pop(k) + def _normalize_names(section: dict) -> None: + # Map the internal quota names to the external ones + # COMPUTE_QUOTAS and NETWORK_QUOTAS share floating-ips, + # secgroup-rules and secgroups as dict value, so when + # neutron is enabled, quotas of these three resources + # in nova will be replaced by neutron's. + for k, v in itertools.chain( + COMPUTE_QUOTAS.items(), + NOVA_NETWORK_QUOTAS.items(), + VOLUME_QUOTAS.items(), + NETWORK_QUOTAS.items(), + ): + if not k == v and section.get(k) is not None: + section[v] = section.pop(k) + + _normalize_names(info) + if parsed_args.usage: + _normalize_names(info["reservation"]) + _normalize_names(info["usage"]) # Remove the 'id' field since it's not very useful if 'id' in info: del info['id'] - # Remove the 'location' field for resources from openstacksdk - if 'location' in info: - del info['location'] + # Remove the sdk-derived fields + for field in ('location', 'name', 'force'): + if field in info: + del info[field] if not parsed_args.usage: result = [{'resource': k, 'limit': v} for k, v in info.items()] else: - result = [{'resource': k, **v} for k, v in info.items()] - - columns = ( + result = [ + { + 'resource': k, + 'limit': v or 0, + 'in_use': info['usage'].get(k, 0), + 'reserved': info['reservation'].get(k, 0), + } + for k, v in info.items() + if k not in ('usage', 'reservation') + ] + + columns: tuple[str, ...] = ( 'resource', 'limit', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'Resource', 'Limit', ) @@ -990,21 +916,26 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - project = utils.find_resource( - identity_client.projects, - parsed_args.project, + identity_client = self.app.client_manager.sdk_connection.identity + project = identity_client.find_project( + parsed_args.project, ignore_missing=False ) # compute quotas - if parsed_args.service in {'all', 'compute'}: + if parsed_args.service == 'compute' or ( + parsed_args.service == 'all' + and self.app.client_manager.is_compute_endpoint_enabled() + ): compute_client = self.app.client_manager.compute - compute_client.quotas.delete(project.id) + compute_client.revert_quota_set(project.id) # volume quotas - if parsed_args.service in {'all', 'volume'}: - volume_client = self.app.client_manager.volume - volume_client.quotas.delete(project.id) + if parsed_args.service == 'volume' or ( + parsed_args.service == 'all' + and self.app.client_manager.is_volume_endpoint_enabled() + ): + volume_client = self.app.client_manager.sdk_connection.volume + volume_client.revert_quota_set(project.id) # network quotas (but only if we're not using nova-network, otherwise # we already deleted the quotas in the compute step) diff --git a/openstackclient/common/versions.py b/openstackclient/common/versions.py index 3acd9f73d7..dfd84e059d 100644 --- a/openstackclient/common/versions.py +++ b/openstackclient/common/versions.py @@ -14,8 +14,7 @@ """Versions Action Implementation""" -from osc_lib.command import command - +from openstackclient import command from openstackclient.i18n import _ @@ -23,7 +22,7 @@ class ShowVersions(command.Lister): _description = _("Show available versions of services") def get_parser(self, prog_name): - parser = super(ShowVersions, self).get_parser(prog_name) + parser = super().get_parser(prog_name) interface_group = parser.add_mutually_exclusive_group() interface_group.add_argument( "--all-interfaces", @@ -46,26 +45,29 @@ def get_parser(self, prog_name): parser.add_argument( '--service', metavar='', - help=_('Show versions for a specific service. The argument should ' - 'be either an exact match to what is in the catalog or a ' - 'known official value or alias from ' - 'service-types-authority ' - '(https://service-types.openstack.org/)'), + help=_( + 'Show versions for a specific service. The argument should ' + 'be either an exact match to what is in the catalog or a ' + 'known official value or alias from ' + 'service-types-authority ' + '(https://service-types.openstack.org/)' + ), ) parser.add_argument( '--status', metavar='', - help=_("""Show versions for a specific status. Valid values are: + help=_( + """Show versions for a specific status. Valid values are: - SUPPORTED - CURRENT - DEPRECATED -- EXPERIMENTAL""") +- EXPERIMENTAL""" + ), ) return parser def take_action(self, parsed_args): - interface = parsed_args.interface if parsed_args.is_all_interfaces: interface = None @@ -74,7 +76,8 @@ def take_action(self, parsed_args): version_data = session.get_all_version_data( interface=interface, region_name=parsed_args.region_name, - service_type=parsed_args.service) + service_type=parsed_args.service, + ) columns = [ "Region Name", @@ -97,13 +100,15 @@ def take_action(self, parsed_args): for data in service_versions: if status and status != data['status']: continue - versions.append(( - region_name, - service_type, - data['version'], - data['status'], - data['url'], - data['min_microversion'], - data['max_microversion'], - )) + versions.append( + ( + region_name, + service_type, + data['version'], + data['status'], + data['url'], + data['min_microversion'], + data['max_microversion'], + ) + ) return (columns, versions) diff --git a/openstackclient/compute/client.py b/openstackclient/compute/client.py index 6abfef0425..73ce2f87d5 100644 --- a/openstackclient/compute/client.py +++ b/openstackclient/compute/client.py @@ -11,95 +11,29 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# import logging -from osc_lib import exceptions from osc_lib import utils from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) +# global variables used when building the shell DEFAULT_API_VERSION = '2.1' API_VERSION_OPTION = 'os_compute_api_version' API_NAME = 'compute' -API_VERSIONS = { - "2": "novaclient.client", - "2.1": "novaclient.client", -} - -COMPUTE_API_TYPE = 'compute' -COMPUTE_API_VERSIONS = { - '2': 'openstackclient.api.compute_v2.APIv2', -} - -# Save the microversion if in use -_compute_api_version = None +API_VERSIONS = ('2', '2.1') def make_client(instance): """Returns a compute service client.""" - - # Defer client import until we actually need them - from novaclient import client as nova_client - - if _compute_api_version is not None: - version = _compute_api_version - else: - version = instance._api_version[API_NAME] - from novaclient import api_versions - # convert to APIVersion object - version = api_versions.get_api_version(version) - - if version.is_latest(): - import novaclient - # NOTE(RuiChen): executing version discovery make sense, but that need - # an initialized REST client, it's not available now, - # fallback to use the max version of novaclient side. - version = novaclient.API_MAX_VERSION - - LOG.debug('Instantiating compute client for %s', version) - - compute_api = utils.get_client_class( - API_NAME, - version.ver_major, - COMPUTE_API_VERSIONS, - ) - LOG.debug('Instantiating compute api: %s', compute_api) - - # Set client http_log_debug to True if verbosity level is high enough - http_log_debug = utils.get_effective_log_level() <= logging.DEBUG - - extensions = [ext for ext in nova_client.discover_extensions(version) - if ext.name == "list_extensions"] - - # Remember interface only if it is set - kwargs = utils.build_kwargs_dict('endpoint_type', instance.interface) - - client = nova_client.Client( - version, - session=instance.session, - extensions=extensions, - http_log_debug=http_log_debug, - timings=instance.timing, - region_name=instance.region_name, - **kwargs - ) - - client.api = compute_api( - session=instance.session, - service_type=COMPUTE_API_TYPE, - endpoint=instance.get_endpoint_for_service_type( - COMPUTE_API_TYPE, - region_name=instance.region_name, - interface=instance.interface, - ) + LOG.debug( + 'Compute client initialized using OpenStack SDK: %s', + instance.sdk_connection.compute, ) - - return client + return instance.sdk_connection.compute def build_option_parser(parser): @@ -108,46 +42,12 @@ def build_option_parser(parser): '--os-compute-api-version', metavar='', default=utils.env('OS_COMPUTE_API_VERSION'), - help=_("Compute API version, default=%s " - "(Env: OS_COMPUTE_API_VERSION)") % DEFAULT_API_VERSION + help=_("Compute API version, default=%s (Env: OS_COMPUTE_API_VERSION)") + % DEFAULT_API_VERSION, ) return parser def check_api_version(check_version): - """Validate version supplied by user - - Returns: - - * True if version is OK - * False if the version has not been checked and the previous plugin - check should be performed - * throws an exception if the version is no good - - TODO(dtroyer): make the exception thrown a version-related one - """ - - # Defer client imports until we actually need them - import novaclient - from novaclient import api_versions - - global _compute_api_version - - # Copy some logic from novaclient 3.3.0 for basic version detection - # NOTE(dtroyer): This is only enough to resume operations using API - # version 2.0 or any valid version supplied by the user. - _compute_api_version = api_versions.get_api_version(check_version) - - # Bypass X.latest format microversion - if not _compute_api_version.is_latest(): - if _compute_api_version > api_versions.APIVersion("2.0"): - if not _compute_api_version.matches( - novaclient.API_MIN_VERSION, - novaclient.API_MAX_VERSION, - ): - msg = _("versions supported by client: %(min)s - %(max)s") % { - "min": novaclient.API_MIN_VERSION.get_string(), - "max": novaclient.API_MAX_VERSION.get_string(), - } - raise exceptions.CommandError(msg) + # SDK supports auto-negotiation for us: always return True return True diff --git a/openstackclient/compute/v2/agent.py b/openstackclient/compute/v2/agent.py index 15fb0f9c36..71b68d4c61 100644 --- a/openstackclient/compute/v2/agent.py +++ b/openstackclient/compute/v2/agent.py @@ -17,10 +17,11 @@ import logging -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -36,51 +37,45 @@ class CreateAgent(command.ShowOne): """ def get_parser(self, prog_name): - parser = super(CreateAgent, self).get_parser(prog_name) - parser.add_argument( - "os", - metavar="", - help=_("Type of OS") - ) + parser = super().get_parser(prog_name) + parser.add_argument("os", metavar="", help=_("Type of OS")) parser.add_argument( "architecture", metavar="", - help=_("Type of architecture") - ) - parser.add_argument( - "version", - metavar="", - help=_("Version") - ) - parser.add_argument( - "url", - metavar="", - help=_("URL") - ) - parser.add_argument( - "md5hash", - metavar="", - help=_("MD5 hash") + help=_("Type of architecture"), ) + parser.add_argument("version", metavar="", help=_("Version")) + parser.add_argument("url", metavar="", help=_("URL")) + parser.add_argument("md5hash", metavar="", help=_("MD5 hash")) parser.add_argument( "hypervisor", metavar="", default="xen", - help=_("Type of hypervisor") + help=_("Type of hypervisor"), ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - args = ( - parsed_args.os, - parsed_args.architecture, - parsed_args.version, - parsed_args.url, - parsed_args.md5hash, - parsed_args.hypervisor + + # doing this since openstacksdk has decided not to support this + # deprecated command + data = { + 'agent': { + 'hypervisor': parsed_args.hypervisor, + 'os': parsed_args.os, + 'architecture': parsed_args.architecture, + 'version': parsed_args.version, + 'url': parsed_args.url, + 'md5hash': parsed_args.md5hash, + }, + } + response = compute_client.post( + '/os-agents', json=data, microversion='2.1' ) - agent = compute_client.agents.create(*args)._info.copy() + sdk_exceptions.raise_from_response(response) + agent = response.json().get('agent') + return zip(*sorted(agent.items())) @@ -93,12 +88,9 @@ class DeleteAgent(command.Command): """ def get_parser(self, prog_name): - parser = super(DeleteAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - "id", - metavar="", - nargs='+', - help=_("ID of agent(s) to delete") + "id", metavar="", nargs='+', help=_("ID of agent(s) to delete") ) return parser @@ -107,16 +99,25 @@ def take_action(self, parsed_args): result = 0 for id in parsed_args.id: try: - compute_client.agents.delete(id) + # doing this since openstacksdk has decided not to support this + # deprecated command + response = compute_client.delete( + f'/os-agents/{id}', microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) except Exception as e: result += 1 - LOG.error(_("Failed to delete agent with ID '%(id)s': %(e)s"), - {'id': id, 'e': e}) + LOG.error( + _("Failed to delete agent with ID '%(id)s': %(e)s"), + {'id': id, 'e': e}, + ) if result > 0: total = len(parsed_args.id) - msg = (_("%(result)s of %(total)s agents failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s agents failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -129,11 +130,11 @@ class ListAgent(command.Lister): """ def get_parser(self, prog_name): - parser = super(ListAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--hypervisor", metavar="", - help=_("Type of hypervisor") + help=_("Type of hypervisor"), ) return parser @@ -146,73 +147,81 @@ def take_action(self, parsed_args): "Architecture", "Version", "Md5Hash", - "URL" + "URL", ) - data = compute_client.agents.list(parsed_args.hypervisor) - return (columns, - (utils.get_item_properties( - s, columns, - ) for s in data)) + + # doing this since openstacksdk has decided not to support this + # deprecated command + path = '/os-agents' + if parsed_args.hypervisor: + path += f'?hypervisor={parsed_args.hypervisor}' + + response = compute_client.get(path, microversion='2.1') + sdk_exceptions.raise_from_response(response) + agents = response.json().get('agents') + + return columns, (utils.get_dict_properties(s, columns) for s in agents) class SetAgent(command.Command): """Set compute agent properties. - The compute agent functionality is hypervisor specific and is only + The compute agent functionality is hypervisor-specific and is only supported by the XenAPI hypervisor driver. It was removed from nova in the 23.0.0 (Wallaby) release. """ def get_parser(self, prog_name): - parser = super(SetAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "id", metavar="", - help=_("ID of the agent") + type=int, + help=_("ID of the agent"), ) parser.add_argument( "--agent-version", dest="version", metavar="", - help=_("Version of the agent") + help=_("Version of the agent"), ) parser.add_argument( - "--url", - metavar="", - help=_("URL of the agent") + "--url", metavar="", help=_("URL of the agent") ) parser.add_argument( - "--md5hash", - metavar="", - help=_("MD5 hash of the agent") + "--md5hash", metavar="", help=_("MD5 hash of the agent") ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - data = compute_client.agents.list(hypervisor=None) - agent = {} - - for s in data: - if s.agent_id == int(parsed_args.id): - agent['version'] = s.version - agent['url'] = s.url - agent['md5hash'] = s.md5hash - if agent == {}: + + response = compute_client.get('/os-agents', microversion='2.1') + sdk_exceptions.raise_from_response(response) + agents = response.json().get('agents') + data = {} + for agent in agents: + if agent['agent_id'] == parsed_args.id: + data['version'] = agent['version'] + data['url'] = agent['url'] + data['md5hash'] = agent['md5hash'] + break + else: msg = _("No agent with a ID of '%(id)s' exists.") - raise exceptions.CommandError(msg % parsed_args.id) + raise exceptions.CommandError(msg % {'id': parsed_args.id}) if parsed_args.version: - agent['version'] = parsed_args.version + data['version'] = parsed_args.version if parsed_args.url: - agent['url'] = parsed_args.url + data['url'] = parsed_args.url if parsed_args.md5hash: - agent['md5hash'] = parsed_args.md5hash + data['md5hash'] = parsed_args.md5hash + + data = {'para': data} - args = ( - parsed_args.id, - agent['version'], - agent['url'], - agent['md5hash'], + # doing this since openstacksdk has decided not to support this + # deprecated command + response = compute_client.put( + f'/os-agents/{parsed_args.id}', json=data, microversion='2.1' ) - compute_client.agents.update(*args) + sdk_exceptions.raise_from_response(response) diff --git a/openstackclient/compute/v2/aggregate.py b/openstackclient/compute/v2/aggregate.py index 37522a788a..f8c3d9677e 100644 --- a/openstackclient/compute/v2/aggregate.py +++ b/openstackclient/compute/v2/aggregate.py @@ -17,21 +17,23 @@ """Compute v2 Aggregate action implementations""" import logging +import typing as ty +from cliff import columns from openstack import utils as sdk_utils from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -_aggregate_formatters = { +_aggregate_formatters: dict[str, type[columns.FormattableColumn[ty.Any]]] = { 'Hosts': format_columns.ListColumn, 'Metadata': format_columns.DictColumn, 'hosts': format_columns.ListColumn, @@ -47,38 +49,40 @@ def _get_aggregate_columns(item): } hidden_columns = ['links', 'location'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, column_map, hidden_columns + ) class AddAggregateHost(command.ShowOne): _description = _("Add host to aggregate") def get_parser(self, prog_name): - parser = super(AddAggregateHost, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'aggregate', metavar='', - help=_("Aggregate (name or ID)") + help=_("Aggregate (name or ID)"), ) parser.add_argument( - 'host', - metavar='', - help=_("Host to add to ") + 'host', metavar='', help=_("Host to add to ") ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute aggregate = compute_client.find_aggregate( - parsed_args.aggregate, ignore_missing=False) + parsed_args.aggregate, ignore_missing=False + ) aggregate = compute_client.add_host_to_aggregate( - aggregate.id, parsed_args.host) + aggregate.id, parsed_args.host + ) display_columns, columns = _get_aggregate_columns(aggregate) data = utils.get_item_properties( - aggregate, columns, formatters=_aggregate_formatters) + aggregate, columns, formatters=_aggregate_formatters + ) return (display_columns, data) @@ -86,29 +90,29 @@ class CreateAggregate(command.ShowOne): _description = _("Create a new aggregate") def get_parser(self, prog_name): - parser = super(CreateAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - "name", - metavar="", - help=_("New aggregate name") + "name", metavar="", help=_("New aggregate name") ) parser.add_argument( "--zone", metavar="", - help=_("Availability zone name") + help=_("Availability zone name"), ) parser.add_argument( "--property", metavar="", action=parseractions.KeyValueAction, dest="properties", - help=_("Property to add to this aggregate " - "(repeat option to set multiple properties)") + help=_( + "Property to add to this aggregate " + "(repeat option to set multiple properties)" + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute attrs = {'name': parsed_args.name} @@ -125,7 +129,8 @@ def take_action(self, parsed_args): display_columns, columns = _get_aggregate_columns(aggregate) data = utils.get_item_properties( - aggregate, columns, formatters=_aggregate_formatters) + aggregate, columns, formatters=_aggregate_formatters + ) return (display_columns, data) @@ -133,34 +138,42 @@ class DeleteAggregate(command.Command): _description = _("Delete existing aggregate(s)") def get_parser(self, prog_name): - parser = super(DeleteAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'aggregate', metavar='', nargs='+', - help=_("Aggregate(s) to delete (name or ID)") + help=_("Aggregate(s) to delete (name or ID)"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute result = 0 for a in parsed_args.aggregate: try: aggregate = compute_client.find_aggregate( - a, ignore_missing=False) + a, ignore_missing=False + ) compute_client.delete_aggregate( - aggregate.id, ignore_missing=False) + aggregate.id, ignore_missing=False + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete aggregate with name or " - "ID '%(aggregate)s': %(e)s"), - {'aggregate': a, 'e': e}) + LOG.error( + _( + "Failed to delete aggregate with name or " + "ID '%(aggregate)s': %(e)s" + ), + {'aggregate': a, 'e': e}, + ) if result > 0: total = len(parsed_args.aggregate) - msg = (_("%(result)s of %(total)s aggregates failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s aggregates failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -168,83 +181,92 @@ class ListAggregate(command.Lister): _description = _("List all aggregates") def get_parser(self, prog_name): - parser = super(ListAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute aggregates = list(compute_client.aggregates()) + if sdk_utils.supports_microversion(compute_client, '2.41'): + column_headers: tuple[str, ...] = ("ID", "UUID") + columns: tuple[str, ...] = ("id", "uuid") + else: + column_headers = ("ID",) + columns = ("id",) + + column_headers += ( + "Name", + "Availability Zone", + ) + columns += ( + "name", + "availability_zone", + ) + if parsed_args.long: # Remove availability_zone from metadata because Nova doesn't for aggregate in aggregates: if 'availability_zone' in aggregate.metadata: aggregate.metadata.pop('availability_zone') - # This is the easiest way to change column headers - column_headers = ( - "ID", - "Name", - "Availability Zone", + + column_headers += ( "Properties", "Hosts", ) - columns = ( - "ID", - "Name", - "Availability Zone", - "Metadata", - "Hosts", - ) - else: - column_headers = columns = ( - "ID", - "Name", - "Availability Zone", + columns += ( + "metadata", + "hosts", ) - data = ( - utils.get_item_properties( - s, columns, formatters=_aggregate_formatters - ) for s in aggregates) - return (column_headers, data) + return ( + column_headers, + ( + utils.get_item_properties( + s, columns, formatters=_aggregate_formatters + ) + for s in aggregates + ), + ) class RemoveAggregateHost(command.ShowOne): _description = _("Remove host from aggregate") def get_parser(self, prog_name): - parser = super(RemoveAggregateHost, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'aggregate', metavar='', - help=_("Aggregate (name or ID)") + help=_("Aggregate (name or ID)"), ) parser.add_argument( - 'host', - metavar='', - help=_("Host to remove from ") + 'host', metavar='', help=_("Host to remove from ") ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute aggregate = compute_client.find_aggregate( - parsed_args.aggregate, ignore_missing=False) + parsed_args.aggregate, ignore_missing=False + ) aggregate = compute_client.remove_host_from_aggregate( - aggregate.id, parsed_args.host) + aggregate.id, parsed_args.host + ) display_columns, columns = _get_aggregate_columns(aggregate) data = utils.get_item_properties( - aggregate, columns, formatters=_aggregate_formatters) + aggregate, columns, formatters=_aggregate_formatters + ) return (display_columns, data) @@ -252,44 +274,46 @@ class SetAggregate(command.Command): _description = _("Set aggregate properties") def get_parser(self, prog_name): - parser = super(SetAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'aggregate', metavar='', - help=_("Aggregate to modify (name or ID)") + help=_("Aggregate to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar='', - help=_("Set aggregate name") + '--name', metavar='', help=_("Set aggregate name") ) parser.add_argument( "--zone", metavar="", - help=_("Set availability zone name") + help=_("Set availability zone name"), ) parser.add_argument( "--property", metavar="", action=parseractions.KeyValueAction, dest="properties", - help=_("Property to set on " - "(repeat option to set multiple properties)") + help=_( + "Property to set on " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( "--no-property", action="store_true", - help=_("Remove all properties from " - "(specify both --property and --no-property to " - "overwrite the current properties)"), + help=_( + "Remove all properties from " + "(specify both --property and --no-property to " + "overwrite the current properties)" + ), ) return parser def take_action(self, parsed_args): - - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute aggregate = compute_client.find_aggregate( - parsed_args.aggregate, ignore_missing=False) + parsed_args.aggregate, ignore_missing=False + ) kwargs = {} if parsed_args.name: @@ -299,14 +323,17 @@ def take_action(self, parsed_args): if kwargs: compute_client.update_aggregate(aggregate.id, **kwargs) - properties = {} + properties: dict[str, ty.Any] = {} if parsed_args.no_property: # NOTE(RuiChen): "availability_zone" can not be unset from # properties. It is already excluded from show and create output. - properties.update({ - key: None for key in aggregate.metadata.keys() - if key != 'availability_zone' - }) + properties.update( + { + key: None + for key in aggregate.metadata.keys() + if key != 'availability_zone' + } + ) if parsed_args.properties: properties.update(parsed_args.properties) @@ -319,19 +346,19 @@ class ShowAggregate(command.ShowOne): _description = _("Display aggregate details") def get_parser(self, prog_name): - parser = super(ShowAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'aggregate', metavar='', - help=_("Aggregate to display (name or ID)") + help=_("Aggregate to display (name or ID)"), ) return parser def take_action(self, parsed_args): - - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute aggregate = compute_client.find_aggregate( - parsed_args.aggregate, ignore_missing=False) + parsed_args.aggregate, ignore_missing=False + ) # Remove availability_zone from metadata because Nova doesn't if 'availability_zone' in aggregate.metadata: @@ -339,7 +366,8 @@ def take_action(self, parsed_args): display_columns, columns = _get_aggregate_columns(aggregate) data = utils.get_item_properties( - aggregate, columns, formatters=_aggregate_formatters) + aggregate, columns, formatters=_aggregate_formatters + ) return (display_columns, data) @@ -347,11 +375,11 @@ class UnsetAggregate(command.Command): _description = _("Unset aggregate properties") def get_parser(self, prog_name): - parser = super(UnsetAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "aggregate", metavar="", - help=_("Aggregate to modify (name or ID)") + help=_("Aggregate to modify (name or ID)"), ) parser.add_argument( "--property", @@ -359,15 +387,18 @@ def get_parser(self, prog_name): action="append", default=[], dest="properties", - help=_("Property to remove from aggregate " - "(repeat option to remove multiple properties)") + help=_( + "Property to remove from aggregate " + "(repeat option to remove multiple properties)" + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute aggregate = compute_client.find_aggregate( - parsed_args.aggregate, ignore_missing=False) + parsed_args.aggregate, ignore_missing=False + ) properties = {key: None for key in parsed_args.properties} @@ -381,23 +412,25 @@ class CacheImageForAggregate(command.Command): # not be anything to return. def get_parser(self, prog_name): - parser = super(CacheImageForAggregate, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'aggregate', metavar='', - help=_("Aggregate (name or ID)") + help=_("Aggregate (name or ID)"), ) parser.add_argument( 'image', metavar='', nargs='+', - help=_("Image ID to request caching for aggregate (name or ID). " - "May be specified multiple times.") + help=_( + "Image ID to request caching for aggregate (name or ID). " + "May be specified multiple times." + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute if not sdk_utils.supports_microversion(compute_client, '2.81'): msg = _( @@ -406,13 +439,15 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) + image_client = self.app.client_manager.sdk_connection.image + aggregate = compute_client.find_aggregate( - parsed_args.aggregate, ignore_missing=False) + parsed_args.aggregate, ignore_missing=False + ) images = [] for img in parsed_args.image: - image = self.app.client_manager.sdk_connection.image.find_image( - img, ignore_missing=False) + image = image_client.find_image(img, ignore_missing=False) images.append(image.id) compute_client.aggregate_precache_images(aggregate.id, images) diff --git a/openstackclient/compute/v2/console.py b/openstackclient/compute/v2/console.py index 0ab5c8a2a3..cbcce4b09f 100644 --- a/openstackclient/compute/v2/console.py +++ b/openstackclient/compute/v2/console.py @@ -16,30 +16,30 @@ """Compute v2 Console action implementations""" from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ def _get_console_columns(item): # To maintain backwards compatibility we need to rename sdk props to # whatever OSC was using before - column_map = {} hidden_columns = ['id', 'links', 'location', 'name'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, {}, hidden_columns + ) class ShowConsoleLog(command.Command): _description = _("Show server's console output") def get_parser(self, prog_name): - parser = super(ShowConsoleLog, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', - help=_("Server to show console log (name or ID)") + help=_("Server to show console log (name or ID)"), ) parser.add_argument( '--lines', @@ -47,39 +47,43 @@ def get_parser(self, prog_name): type=int, default=None, action=parseractions.NonNegativeAction, - help=_("Number of lines to display from the end of the log " - "(default=all)") + help=_( + "Number of lines to display from the end of the log " + "(default=all)" + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - name_or_id=parsed_args.server, - ignore_missing=False + name_or_id=parsed_args.server, ignore_missing=False ) output = compute_client.get_server_console_output( - server.id, length=parsed_args.lines) - data = None + server.id, length=parsed_args.lines + ) + data: str | None = None if output: data = output.get('output', None) if data and data[-1] != '\n': data += '\n' - self.app.stdout.write(data) + + if data: + self.app.stdout.write(data) class ShowConsoleURL(command.ShowOne): _description = _("Show server's remote console URL") def get_parser(self, prog_name): - parser = super(ShowConsoleURL, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', - help=_("Server to show URL (name or ID)") + help=_("Server to show URL (name or ID)"), ) type_group = parser.add_mutually_exclusive_group() type_group.add_argument( @@ -88,21 +92,28 @@ def get_parser(self, prog_name): action='store_const', const='novnc', default='novnc', - help=_("Show noVNC console URL (default)") + help=_("Show noVNC console URL (default)"), ) type_group.add_argument( '--xvpvnc', dest='url_type', action='store_const', const='xvpvnc', - help=_("Show xvpvnc console URL") + help=_("Show xvpvnc console URL"), ) type_group.add_argument( '--spice', dest='url_type', action='store_const', const='spice-html5', - help=_("Show SPICE console URL") + help=_("Show SPICE console URL"), + ) + type_group.add_argument( + '--spice-direct', + dest='url_type', + action='store_const', + const='spice-direct', + help=_("Show SPICE direct protocol native console URL"), ) type_group.add_argument( '--rdp', @@ -128,13 +139,14 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - parsed_args.server, - ignore_missing=False) + parsed_args.server, ignore_missing=False + ) - data = compute_client.create_console(server.id, - console_type=parsed_args.url_type) + data = compute_client.create_console( + server.id, console_type=parsed_args.url_type + ) display_columns, columns = _get_console_columns(data) data = utils.get_dict_properties(data, columns) diff --git a/openstackclient/compute/v2/console_connection.py b/openstackclient/compute/v2/console_connection.py new file mode 100644 index 0000000000..97eb1a80e1 --- /dev/null +++ b/openstackclient/compute/v2/console_connection.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Compute v2 Console auth token implementations.""" + +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + + +def _get_console_connection_columns(item): + column_map: dict[str, str] = {} + hidden_columns = ['id', 'location', 'name'] + return utils.get_osc_show_columns_for_sdk_resource( + item, column_map, hidden_columns + ) + + +class ShowConsoleConnectionInformation(command.ShowOne): + _description = _("Show server's remote console connection information") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'token', + metavar='', + help=_("Nova console token to lookup"), + ) + return parser + + def take_action(self, parsed_args): + compute_client = self.app.client_manager.compute + data = compute_client.validate_console_auth_token(parsed_args.token) + display_columns, columns = _get_console_connection_columns(data) + data = utils.get_dict_properties(data, columns) + + return (display_columns, data) diff --git a/openstackclient/compute/v2/flavor.py b/openstackclient/compute/v2/flavor.py index bc8f758bb1..de3a710298 100644 --- a/openstackclient/compute/v2/flavor.py +++ b/openstackclient/compute/v2/flavor.py @@ -21,10 +21,11 @@ from openstack import utils as sdk_utils from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -34,7 +35,7 @@ _formatters = { 'extra_specs': format_columns.DictColumn, - 'properties': format_columns.DictColumn + 'properties': format_columns.DictColumn, } @@ -45,70 +46,64 @@ def _get_flavor_columns(item): 'extra_specs': 'properties', 'ephemeral': 'OS-FLV-EXT-DATA:ephemeral', 'is_disabled': 'OS-FLV-DISABLED:disabled', - 'is_public': 'os-flavor-access:is_public' - + 'is_public': 'os-flavor-access:is_public', } hidden_columns = ['links', 'location', 'original_name'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, column_map, hidden_columns + ) class CreateFlavor(command.ShowOne): _description = _("Create new flavor") def get_parser(self, prog_name): - parser = super(CreateFlavor, self).get_parser(prog_name) - parser.add_argument( - "name", - metavar="", - help=_("New flavor name") - ) + parser = super().get_parser(prog_name) parser.add_argument( - "--id", - metavar="", - help=_("Unique flavor ID") + "name", metavar="", help=_("New flavor name") ) + parser.add_argument("--id", metavar="", help=_("Unique flavor ID")) parser.add_argument( "--ram", type=int, metavar="", default=256, - help=_("Memory size in MB (default 256M)") + help=_("Memory size in MB (default 256M)"), ) parser.add_argument( "--disk", type=int, metavar="", default=0, - help=_("Disk size in GB (default 0G)") + help=_("Disk size in GB (default 0G)"), ) parser.add_argument( "--ephemeral", type=int, metavar="", default=0, - help=_("Ephemeral disk size in GB (default 0G)") + help=_("Ephemeral disk size in GB (default 0G)"), ) parser.add_argument( "--swap", type=int, metavar="", default=0, - help=_("Additional swap space size in MB (default 0M)") + help=_("Additional swap space size in MB (default 0M)"), ) parser.add_argument( "--vcpus", type=int, metavar="", default=1, - help=_("Number of vcpus (default 1)") + help=_("Number of vcpus (default 1)"), ) parser.add_argument( "--rxtx-factor", type=float, metavar="", default=1.0, - help=_("RX/TX factor (default 1.0)") + help=_("RX/TX factor (default 1.0)"), ) public_group = parser.add_mutually_exclusive_group() public_group.add_argument( @@ -116,51 +111,70 @@ def get_parser(self, prog_name): dest="public", action="store_true", default=True, - help=_("Flavor is available to other projects (default)") + help=_("Flavor is available to other projects (default)"), ) public_group.add_argument( "--private", dest="public", action="store_false", - help=_("Flavor is not available to other projects") + help=_("Flavor is not available to other projects"), ) parser.add_argument( "--property", metavar="", action=parseractions.KeyValueAction, dest="properties", - help=_("Property to add for this flavor " - "(repeat option to set multiple properties)") + help=_( + "Property to add for this flavor " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( '--project', metavar='', - help=_("Allow to access private flavor (name or ID) " - "(Must be used with --private option)"), + help=_( + "Allow to access private flavor (name or ID) " + "(Must be used with --private option)" + ), ) parser.add_argument( '--description', metavar='', - help=_("Description for the flavor.(Supported by API versions " - "'2.55' - '2.latest'") + help=_( + "Description for the flavor.(Supported by API versions " + "'2.55' - '2.latest'" + ), ) identity_common.add_project_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity if parsed_args.project and parsed_args.public: msg = _("--project is only allowed with --private") raise exceptions.CommandError(msg) + flavor_id = parsed_args.id + if parsed_args.id == 'auto': + # novaclient aliased 'auto' to mean "generate a UUID for me": we + # do the same to avoid breaking existing users + flavor_id = None + + msg = _( + "Passing '--id auto' is deprecated. Nova will automatically " + "assign a UUID-like ID if no ID is provided. Omit the '--id' " + "parameter instead." + ) + self.log.warning(msg) + args = { 'name': parsed_args.name, 'ram': parsed_args.ram, 'vcpus': parsed_args.vcpus, 'disk': parsed_args.disk, - 'id': parsed_args.id, + 'id': flavor_id, 'ephemeral': parsed_args.ephemeral, 'swap': parsed_args.swap, 'rxtx_factor': parsed_args.rxtx_factor, @@ -186,22 +200,24 @@ def take_action(self, parsed_args): parsed_args.project, parsed_args.project_domain, ).id - compute_client.flavor_add_tenant_access( - flavor.id, project_id) + compute_client.flavor_add_tenant_access(flavor.id, project_id) except Exception as e: - msg = _("Failed to add project %(project)s access to " - "flavor: %(e)s") + msg = _( + "Failed to add project %(project)s access to flavor: %(e)s" + ) LOG.error(msg, {'project': parsed_args.project, 'e': e}) if parsed_args.properties: try: flavor = compute_client.create_flavor_extra_specs( - flavor, parsed_args.properties) + flavor, parsed_args.properties + ) except Exception as e: LOG.error(_("Failed to set flavor properties: %s"), e) display_columns, columns = _get_flavor_columns(flavor) - data = utils.get_dict_properties(flavor, columns, - formatters=_formatters) + data = utils.get_dict_properties( + flavor, columns, formatters=_formatters + ) return (display_columns, data) @@ -210,17 +226,17 @@ class DeleteFlavor(command.Command): _description = _("Delete flavor(s)") def get_parser(self, prog_name): - parser = super(DeleteFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "flavor", metavar="", nargs='+', - help=_("Flavor(s) to delete (name or ID)") + help=_("Flavor(s) to delete (name or ID)"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute result = 0 for f in parsed_args.flavor: try: @@ -228,13 +244,20 @@ def take_action(self, parsed_args): compute_client.delete_flavor(flavor.id) except Exception as e: result += 1 - LOG.error(_("Failed to delete flavor with name or " - "ID '%(flavor)s': %(e)s"), {'flavor': f, 'e': e}) + LOG.error( + _( + "Failed to delete flavor with name or " + "ID '%(flavor)s': %(e)s" + ), + {'flavor': f, 'e': e}, + ) if result > 0: total = len(parsed_args.flavor) - msg = (_("%(result)s of %(total)s flavors failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s flavors failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -242,27 +265,27 @@ class ListFlavor(command.Lister): _description = _("List flavors") def get_parser(self, prog_name): - parser = super(ListFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) public_group = parser.add_mutually_exclusive_group() public_group.add_argument( "--public", dest="public", action="store_true", default=True, - help=_("List only public flavors (default)") + help=_("List only public flavors (default)"), ) public_group.add_argument( "--private", dest="public", action="store_false", - help=_("List only private flavors") + help=_("List only private flavors"), ) public_group.add_argument( "--all", dest="all", action="store_true", default=False, - help=_("List all flavors, whether public or private") + help=_("List all flavors, whether public or private"), ) parser.add_argument( '--min-disk', @@ -280,37 +303,20 @@ def get_parser(self, prog_name): '--long', action='store_true', default=False, - help=_("List additional fields in output") - ) - parser.add_argument( - '--marker', - metavar="", - help=_("The last flavor ID of the previous page") - ) - parser.add_argument( - '--limit', - type=int, - metavar='', - help=_( - 'Maximum number of flavors to display. This is also ' - 'configurable on the server. The actual limit used will be ' - 'the lower of the user-supplied value and the server ' - 'configuration-derived value' - ), + help=_("List additional fields in output"), ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute # is_public is ternary - None means give all flavors, # True is public only and False is private only # By default Nova assumes True and gives admins public flavors # and flavors from their own projects only. is_public = None if parsed_args.all else parsed_args.public - query_attrs = { - 'is_public': is_public - } + query_attrs = {'is_public': is_public} if parsed_args.marker: query_attrs['marker'] = parsed_args.marker @@ -336,14 +342,14 @@ def take_action(self, parsed_args): if parsed_args.long and not f.extra_specs: compute_client.fetch_flavor_extra_specs(f) - columns = ( + columns: tuple[str, ...] = ( "id", "name", "ram", "disk", "ephemeral", "vcpus", - "is_public" + "is_public", ) if parsed_args.long: columns += ( @@ -352,14 +358,14 @@ def take_action(self, parsed_args): "extra_specs", ) - column_headers = ( + column_headers: tuple[str, ...] = ( "ID", "Name", "RAM", "Disk", "Ephemeral", "VCPUs", - "Is Public" + "Is Public", ) if parsed_args.long: column_headers += ( @@ -381,53 +387,57 @@ class SetFlavor(command.Command): _description = _("Set flavor properties") def get_parser(self, prog_name): - parser = super(SetFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "flavor", metavar="", - help=_("Flavor to modify (name or ID)") + help=_("Flavor to modify (name or ID)"), ) parser.add_argument( "--no-property", action="store_true", - help=_("Remove all properties from this flavor " - "(specify both --no-property and --property" - " to remove the current properties before setting" - " new properties.)"), + help=_( + "Remove all properties from this flavor " + "(specify both --no-property and --property" + " to remove the current properties before setting" + " new properties.)" + ), ) parser.add_argument( "--property", metavar="", action=parseractions.KeyValueAction, dest="properties", - help=_("Property to add or modify for this flavor " - "(repeat option to set multiple properties)") + help=_( + "Property to add or modify for this flavor " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( '--project', metavar='', - help=_('Set flavor access to project (name or ID) ' - '(admin only)'), + help=_('Set flavor access to project (name or ID) (admin only)'), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--description', metavar='', - help=_("Set description for the flavor.(Supported by API " - "versions '2.55' - '2.latest'") + help=_( + "Set description for the flavor.(Supported by API " + "versions '2.55' - '2.latest'" + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity try: flavor = compute_client.find_flavor( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False) + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) except sdk_exceptions.ResourceNotFound as e: raise exceptions.CommandError(e.message) @@ -440,14 +450,16 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) compute_client.update_flavor( - flavor=flavor.id, description=parsed_args.description) + flavor=flavor.id, description=parsed_args.description + ) result = 0 if parsed_args.no_property: try: for key in flavor.extra_specs.keys(): compute_client.delete_flavor_extra_specs_property( - flavor.id, key) + flavor.id, key + ) except Exception as e: LOG.error(_("Failed to clear flavor properties: %s"), e) result += 1 @@ -455,7 +467,8 @@ def take_action(self, parsed_args): if parsed_args.properties: try: compute_client.create_flavor_extra_specs( - flavor.id, parsed_args.properties) + flavor.id, parsed_args.properties + ) except Exception as e: LOG.error(_("Failed to set flavor properties: %s"), e) result += 1 @@ -472,45 +485,52 @@ def take_action(self, parsed_args): parsed_args.project_domain, ).id compute_client.flavor_add_tenant_access( - flavor.id, project_id) + flavor.id, project_id + ) except Exception as e: LOG.error(_("Failed to set flavor access to project: %s"), e) result += 1 if result > 0: - raise exceptions.CommandError(_("Command Failed: One or more of" - " the operations failed")) + raise exceptions.CommandError( + _("Command Failed: One or more of the operations failed") + ) class ShowFlavor(command.ShowOne): _description = _("Display flavor details") def get_parser(self, prog_name): - parser = super(ShowFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "flavor", metavar="", - help=_("Flavor to display (name or ID)") + help=_("Flavor to display (name or ID)"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute flavor = compute_client.find_flavor( - parsed_args.flavor, get_extra_specs=True, ignore_missing=False) + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) access_projects = None # get access projects list of this flavor if not flavor.is_public: try: flavor_access = compute_client.get_flavor_access( - flavor=flavor.id) + flavor=flavor.id + ) access_projects = [ utils.get_field(access, 'tenant_id') - for access in flavor_access] + for access in flavor_access + ] except Exception as e: - msg = _("Failed to get access projects list " - "for flavor '%(flavor)s': %(e)s") + msg = _( + "Failed to get access projects list " + "for flavor '%(flavor)s': %(e)s" + ) LOG.error(msg, {'flavor': parsed_args.flavor, 'e': e}) # Since we need to inject "access_project_id" into resource - convert @@ -520,7 +540,8 @@ def take_action(self, parsed_args): display_columns, columns = _get_flavor_columns(flavor) data = utils.get_dict_properties( - flavor, columns, formatters=_formatters) + flavor, columns, formatters=_formatters + ) return (display_columns, data) @@ -529,48 +550,51 @@ class UnsetFlavor(command.Command): _description = _("Unset flavor properties") def get_parser(self, prog_name): - parser = super(UnsetFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "flavor", metavar="", - help=_("Flavor to modify (name or ID)") + help=_("Flavor to modify (name or ID)"), ) parser.add_argument( "--property", metavar="", action='append', dest="properties", - help=_("Property to remove from flavor " - "(repeat option to unset multiple properties)") + help=_( + "Property to remove from flavor " + "(repeat option to unset multiple properties)" + ), ) parser.add_argument( '--project', metavar='', - help=_('Remove flavor access from project (name or ID) ' - '(admin only)'), + help=_( + 'Remove flavor access from project (name or ID) (admin only)' + ), ) identity_common.add_project_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity try: flavor = compute_client.find_flavor( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False) + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) except sdk_exceptions.ResourceNotFound as e: - raise exceptions.CommandError(_(e.message)) + raise exceptions.CommandError(e.message) result = 0 if parsed_args.properties: for key in parsed_args.properties: try: compute_client.delete_flavor_extra_specs_property( - flavor.id, key) + flavor.id, key + ) except sdk_exceptions.SDKException as e: LOG.error(_("Failed to unset flavor property: %s"), e) result += 1 @@ -587,12 +611,15 @@ def take_action(self, parsed_args): parsed_args.project_domain, ).id compute_client.flavor_remove_tenant_access( - flavor.id, project_id) + flavor.id, project_id + ) except Exception as e: - LOG.error(_("Failed to remove flavor access from project: %s"), - e) + LOG.error( + _("Failed to remove flavor access from project: %s"), e + ) result += 1 if result > 0: - raise exceptions.CommandError(_("Command Failed: One or more of" - " the operations failed")) + raise exceptions.CommandError( + _("Command Failed: One or more of the operations failed") + ) diff --git a/openstackclient/compute/v2/host.py b/openstackclient/compute/v2/host.py index e6dd3a6f82..58023676d4 100644 --- a/openstackclient/compute/v2/host.py +++ b/openstackclient/compute/v2/host.py @@ -15,9 +15,10 @@ """Host action implementations""" -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -29,92 +30,86 @@ def get_parser(self, prog_name): parser.add_argument( "--zone", metavar="", - help=_("Only return hosts in the availability zone") + help=_("Only return hosts in the availability zone"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute - columns = ( - "Host Name", - "Service", - "Zone" - ) + compute_client = self.app.client_manager.compute self.log.warning( - "API has been deprecated. " - "Please consider using 'hypervisor list' instead." + "API has been deprecated; " + "consider using 'hypervisor list' instead." ) # doing this since openstacksdk has decided not to support this # deprecated command - hosts = compute_client.get( - '/os-hosts', microversion='2.1' - ).json().get('hosts') - + response = compute_client.get('/os-hosts', microversion='2.1') + sdk_exceptions.raise_from_response(response) + hosts = response.json().get('hosts') if parsed_args.zone is not None: - filtered_hosts = [] - for host in hosts: - if host['zone'] == parsed_args.zone: - filtered_hosts.append(host) - - hosts = filtered_hosts + hosts = [h for h in hosts if h['zone'] == parsed_args.zone] + columns = ("Host Name", "Service", "Zone") return columns, (utils.get_dict_properties(s, columns) for s in hosts) class SetHost(command.Command): - _description = _("Set host properties") + _description = _("DEPRECATED: Set host properties") def get_parser(self, prog_name): - parser = super(SetHost, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - "host", - metavar="", - help=_("Host to modify (name only)") + "host", metavar="", help=_("Host to modify (name only)") ) status = parser.add_mutually_exclusive_group() status.add_argument( - '--enable', - action='store_true', - help=_("Enable the host") + '--enable', action='store_true', help=_("Enable the host") ) status.add_argument( - '--disable', - action='store_true', - help=_("Disable the host") + '--disable', action='store_true', help=_("Disable the host") ) maintenance = parser.add_mutually_exclusive_group() maintenance.add_argument( '--enable-maintenance', action='store_true', - help=_("Enable maintenance mode for the host") + help=_("Enable maintenance mode for the host"), ) maintenance.add_argument( '--disable-maintenance', action='store_true', - help=_("Disable maintenance mode for the host") + help=_("Disable maintenance mode for the host"), ) return parser def take_action(self, parsed_args): - kwargs = {} + compute_client = self.app.client_manager.compute + + self.log.warning( + "API has been deprecated; " + "consider using 'compute service set' instead." + ) + data = {} if parsed_args.enable: - kwargs['status'] = 'enable' + data['status'] = 'enable' if parsed_args.disable: - kwargs['status'] = 'disable' + data['status'] = 'disable' if parsed_args.enable_maintenance: - kwargs['maintenance_mode'] = 'enable' + data['maintenance_mode'] = 'enable' if parsed_args.disable_maintenance: - kwargs['maintenance_mode'] = 'disable' + data['maintenance_mode'] = 'disable' - compute_client = self.app.client_manager.compute + if not data: + # don't bother calling if nothing given + return - compute_client.api.host_set( - parsed_args.host, - **kwargs + # doing this since openstacksdk has decided not to support this + # deprecated command + response = compute_client.put( + f'/os-hosts/{parsed_args.host}', json=data, microversion='2.1' ) + sdk_exceptions.raise_from_response(response) class ShowHost(command.Lister): @@ -122,38 +117,30 @@ class ShowHost(command.Lister): def get_parser(self, prog_name): parser = super().get_parser(prog_name) - parser.add_argument( - "host", - metavar="", - help=_("Name of host") - ) + parser.add_argument("host", metavar="", help=_("Name of host")) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute - columns = ( - "Host", - "Project", - "CPU", - "Memory MB", - "Disk GB" - ) + compute_client = self.app.client_manager.compute self.log.warning( - "API has been deprecated. " - "Please consider using 'hypervisor show' instead." + "API has been deprecated; " + "consider using 'hypervisor show' instead." ) # doing this since openstacksdk has decided not to support this # deprecated command - resources = compute_client.get( - '/os-hosts/' + parsed_args.host, - microversion='2.1' - ).json().get('host') + response = compute_client.get( + f'/os-hosts/{parsed_args.host}', microversion='2.1' + ) + sdk_exceptions.raise_from_response(response) + resources = response.json().get('host') data = [] if resources is not None: for resource in resources: data.append(resource['resource']) + columns = ("Host", "Project", "CPU", "Memory MB", "Disk GB") + return columns, (utils.get_dict_properties(s, columns) for s in data) diff --git a/openstackclient/compute/v2/hypervisor.py b/openstackclient/compute/v2/hypervisor.py index d4b4003bf9..9e1b265b19 100644 --- a/openstackclient/compute/v2/hypervisor.py +++ b/openstackclient/compute/v2/hypervisor.py @@ -18,13 +18,14 @@ import json import re -from novaclient import exceptions as nova_exceptions +from openstack import exceptions as sdk_exceptions from openstack import utils as sdk_utils from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ @@ -33,32 +34,37 @@ def _get_hypervisor_columns(item, client): hidden_columns = ['location', 'servers'] if sdk_utils.supports_microversion(client, '2.88'): - hidden_columns.extend([ - 'current_workload', - 'disk_available', - 'local_disk_free', - 'local_disk_size', - 'local_disk_used', - 'memory_free', - 'memory_size', - 'memory_used', - 'running_vms', - 'vcpus_used', - 'vcpus', - ]) + hidden_columns.extend( + [ + 'current_workload', + 'disk_available', + 'local_disk_free', + 'local_disk_size', + 'local_disk_used', + 'memory_free', + 'memory_size', + 'memory_used', + 'running_vms', + 'vcpus_used', + 'vcpus', + ] + ) else: - column_map.update({ - 'disk_available': 'disk_available_least', - 'local_disk_free': 'free_disk_gb', - 'local_disk_size': 'local_gb', - 'local_disk_used': 'local_gb_used', - 'memory_free': 'free_ram_mb', - 'memory_used': 'memory_mb_used', - 'memory_size': 'memory_mb', - }) + column_map.update( + { + 'disk_available': 'disk_available_least', + 'local_disk_free': 'free_disk_gb', + 'local_disk_size': 'local_gb', + 'local_disk_used': 'local_gb_used', + 'memory_free': 'free_ram_mb', + 'memory_used': 'memory_mb_used', + 'memory_size': 'memory_mb', + } + ) return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, column_map, hidden_columns + ) class ListHypervisor(command.Lister): @@ -69,45 +75,27 @@ def get_parser(self, prog_name): parser.add_argument( '--matching', metavar='', - help=_("Filter hypervisors using substring") - ) - parser.add_argument( - '--marker', - metavar='', help=_( - "The UUID of the last hypervisor of the previous page; " - "displays list of hypervisors after 'marker'. " - "(supported with --os-compute-api-version 2.33 or above)" - ), - ) - parser.add_argument( - '--limit', - metavar='', - type=int, - help=_( - "Maximum number of hypervisors to display. Note that there " - "is a configurable max limit on the server, and the limit " - "that is used will be the minimum of what is requested " - "here and what is configured in the server. " - "(supported with --os-compute-api-version 2.33 or above)" + "Filter hypervisors using substring" + "Hypervisor Type and Host IP are not returned " + "when using microversion 2.52 or lower" ), ) + pagination.add_marker_pagination_option_to_parser(parser) parser.add_argument( '--long', action='store_true', - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute list_opts = {} if parsed_args.matching and (parsed_args.marker or parsed_args.limit): - msg = _( - '--matching is not compatible with --marker or --limit' - ) + msg = _('--matching is not compatible with --marker or --limit') raise exceptions.CommandError(msg) if parsed_args.marker: @@ -128,40 +116,40 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) list_opts['limit'] = parsed_args.limit - column_headers = ( + if parsed_args.matching: + list_opts['hypervisor_hostname_pattern'] = parsed_args.matching + + column_headers: tuple[str, ...] = ( "ID", "Hypervisor Hostname", "Hypervisor Type", "Host IP", - "State" + "State", ) - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', 'hypervisor_type', 'host_ip', - 'state' + 'state', ) + if parsed_args.long: if not sdk_utils.supports_microversion(compute_client, '2.88'): column_headers += ( 'vCPUs Used', 'vCPUs', 'Memory MB Used', - 'Memory MB' + 'Memory MB', ) columns += ( 'vcpus_used', 'vcpus', 'memory_used', - 'memory_size' + 'memory_size', ) - if parsed_args.matching: - data = compute_client.find_hypervisor( - parsed_args.matching, ignore_missing=False) - else: - data = compute_client.hypervisors(**list_opts, details=True) + data = compute_client.hypervisors(**list_opts, details=True) return ( column_headers, @@ -177,14 +165,17 @@ def get_parser(self, prog_name): parser.add_argument( "hypervisor", metavar="", - help=_("Hypervisor to display (name or ID)") + help=_("Hypervisor to display (name or ID)"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute - hypervisor = compute_client.find_hypervisor( - parsed_args.hypervisor, ignore_missing=False).copy() + compute_client = self.app.client_manager.compute + + hypervisor_id = compute_client.find_hypervisor( + parsed_args.hypervisor, ignore_missing=False, details=False + ).id + hypervisor = compute_client.get_hypervisor(hypervisor_id).copy() # Some of the properties in the hypervisor object need to be processed # before they get reported to the user. We spend this section @@ -204,14 +195,18 @@ def take_action(self, parsed_args): if cell: # The host aggregates are also prefixed by "@" - member_of = [aggregate.name - for aggregate in aggregates - if cell in aggregate.name and - service_host in aggregate.hosts] + member_of = [ + aggregate.name + for aggregate in aggregates + if cell in aggregate.name + and service_host in aggregate.hosts + ] else: - member_of = [aggregate.name - for aggregate in aggregates - if service_host in aggregate.hosts] + member_of = [ + aggregate.name + for aggregate in aggregates + if service_host in aggregate.hosts + ] hypervisor['aggregates'] = member_of try: @@ -221,21 +216,24 @@ def take_action(self, parsed_args): else: del hypervisor['uptime'] uptime = compute_client.get_hypervisor_uptime( - hypervisor['id'])['uptime'] + hypervisor['id'] + )['uptime'] # Extract data from uptime value # format: 0 up 0, 0 users, load average: 0, 0, 0 # example: 17:37:14 up 2:33, 3 users, # load average: 0.33, 0.36, 0.34 m = re.match( r"\s*(.+)\sup\s+(.+),\s+(.+)\susers?,\s+load average:\s(.+)", - uptime) + uptime, + ) if m: hypervisor['host_time'] = m.group(1) hypervisor['uptime'] = m.group(2) hypervisor['users'] = m.group(3) hypervisor['load_average'] = m.group(4) - except nova_exceptions.HTTPNotImplemented: - pass + except sdk_exceptions.HttpException as exc: + if exc.status_code != 501: + raise hypervisor['service_id'] = service_details['id'] hypervisor['service_host'] = service_details['host'] @@ -246,11 +244,14 @@ def take_action(self, parsed_args): # string; on earlier fields, do this manually hypervisor['cpu_info'] = json.loads(hypervisor['cpu_info'] or '{}') display_columns, columns = _get_hypervisor_columns( - hypervisor, compute_client) + hypervisor, compute_client + ) data = utils.get_dict_properties( - hypervisor, columns, + hypervisor, + columns, formatters={ 'cpu_info': format_columns.DictColumn, - }) + }, + ) return display_columns, data diff --git a/openstackclient/compute/v2/hypervisor_stats.py b/openstackclient/compute/v2/hypervisor_stats.py index cb63a8000c..d151b41614 100644 --- a/openstackclient/compute/v2/hypervisor_stats.py +++ b/openstackclient/compute/v2/hypervisor_stats.py @@ -13,9 +13,9 @@ """Hypervisor Stats action implementations""" -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -29,11 +29,11 @@ def _get_hypervisor_stat_columns(item): 'memory_free': 'free_ram_mb', 'memory_size': 'memory_mb', 'memory_used': 'memory_mb_used', - } hidden_columns = ['id', 'links', 'location', 'name'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, column_map, hidden_columns + ) class ShowHypervisorStats(command.ShowOne): @@ -41,19 +41,17 @@ class ShowHypervisorStats(command.ShowOne): def take_action(self, parsed_args): # The command is deprecated since it is being dropped in Nova. - self.log.warning( - _("This command is deprecated.") - ) - compute_client = self.app.client_manager.sdk_connection.compute + self.log.warning(_("This command is deprecated.")) + compute_client = self.app.client_manager.compute # We do API request directly cause this deprecated method is not and # will not be supported by OpenStackSDK. response = compute_client.get( - '/os-hypervisors/statistics', - microversion='2.1') + '/os-hypervisors/statistics', microversion='2.1' + ) hypervisor_stats = response.json().get('hypervisor_statistics') display_columns, columns = _get_hypervisor_stat_columns( - hypervisor_stats) - data = utils.get_dict_properties( - hypervisor_stats, columns) + hypervisor_stats + ) + data = utils.get_dict_properties(hypervisor_stats, columns) return (display_columns, data) diff --git a/openstackclient/compute/v2/keypair.py b/openstackclient/compute/v2/keypair.py index 7dabf78d9c..b7744698b4 100644 --- a/openstackclient/compute/v2/keypair.py +++ b/openstackclient/compute/v2/keypair.py @@ -15,100 +15,154 @@ """Keypair action implementations""" -import io +import collections import logging import os -import sys +from cryptography.hazmat.primitives.asymmetric import ed25519 +from cryptography.hazmat.primitives import serialization from openstack import utils as sdk_utils -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common LOG = logging.getLogger(__name__) +Keypair = collections.namedtuple('Keypair', 'private_key public_key') + + +def _generate_keypair(): + """Generate a Ed25519 keypair in OpenSSH format. + + :returns: A `Keypair` named tuple with the generated private and public + keys. + """ + key = ed25519.Ed25519PrivateKey.generate() + private_key = key.private_bytes( + serialization.Encoding.PEM, + serialization.PrivateFormat.OpenSSH, + serialization.NoEncryption(), + ).decode() + public_key = ( + key.public_key() + .public_bytes( + serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH + ) + .decode() + ) + + return Keypair(private_key, public_key) def _get_keypair_columns(item, hide_pub_key=False, hide_priv_key=False): # To maintain backwards compatibility we need to rename sdk props to # whatever OSC was using before - column_map = {} hidden_columns = ['links', 'location'] if hide_pub_key: hidden_columns.append('public_key') if hide_priv_key: hidden_columns.append('private_key') return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, {}, hidden_columns + ) class CreateKeypair(command.ShowOne): _description = _("Create new public or private key for server ssh access") def get_parser(self, prog_name): - parser = super(CreateKeypair, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_("New public or private key name") + 'name', metavar='', help=_("New public or private key name") ) key_group = parser.add_mutually_exclusive_group() key_group.add_argument( '--public-key', metavar='', - help=_("Filename for public key to add. If not used, " - "creates a private key.") + help=_( + "Filename for public key to add. " + "If not used, generates a private key in ssh-ed25519 format. " + "To generate keys in other formats, including the legacy " + "ssh-rsa format, you must use an external tool such as " + "ssh-keygen and specify this argument." + ), ) key_group.add_argument( '--private-key', metavar='', - help=_("Filename for private key to save. If not used, " - "print private key in console.") + help=_( + "Filename for private key to save. " + "If not used, print private key in console." + ), ) parser.add_argument( '--type', metavar='', choices=['ssh', 'x509'], help=_( - "Keypair type. Can be ssh or x509. " - "(Supported by API versions '2.2' - '2.latest')" + 'Keypair type ' + '(supported by --os-compute-api-version 2.2 or above)' ), ) parser.add_argument( '--user', metavar='', help=_( - 'The owner of the keypair. (admin only) (name or ID). ' - 'Requires ``--os-compute-api-version`` 2.10 or greater.' + 'The owner of the keypair (admin only) (name or ID) ' + '(supported by --os-compute-api-version 2.10 or above)' ), ) identity_common.add_user_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity - kwargs = { - 'name': parsed_args.name - } + kwargs = {'name': parsed_args.name} - public_key = parsed_args.public_key - if public_key: + if parsed_args.public_key: try: - with io.open(os.path.expanduser(parsed_args.public_key)) as p: + with open(os.path.expanduser(parsed_args.public_key)) as p: public_key = p.read() - except IOError as e: + except OSError as e: msg = _("Key file %(public_key)s not found: %(exception)s") raise exceptions.CommandError( - msg % {"public_key": parsed_args.public_key, - "exception": e} + msg + % { + "public_key": parsed_args.public_key, + "exception": e, + } ) kwargs['public_key'] = public_key + else: + generated_keypair = _generate_keypair() + kwargs['public_key'] = generated_keypair.public_key + + # If user have us a file, save private key into specified file + if parsed_args.private_key: + try: + with open( + os.path.expanduser(parsed_args.private_key), 'w+' + ) as p: + p.write(generated_keypair.private_key) + except OSError as e: + msg = _( + "Key file %(private_key)s can not be saved: " + "%(exception)s" + ) + raise exceptions.CommandError( + msg + % { + "private_key": parsed_args.private_key, + "exception": e, + } + ) if parsed_args.type: if not sdk_utils.supports_microversion(compute_client, '2.2'): @@ -136,32 +190,18 @@ def take_action(self, parsed_args): keypair = compute_client.create_keypair(**kwargs) - private_key = parsed_args.private_key - # Save private key into specified file - if private_key: - try: - with io.open( - os.path.expanduser(parsed_args.private_key), 'w+' - ) as p: - p.write(keypair.private_key) - except IOError as e: - msg = _("Key file %(private_key)s can not be saved: " - "%(exception)s") - raise exceptions.CommandError( - msg % {"private_key": parsed_args.private_key, - "exception": e} - ) # NOTE(dtroyer): how do we want to handle the display of the private # key when it needs to be communicated back to the user # For now, duplicate nova keypair-add command output - if public_key or private_key: + if parsed_args.public_key or parsed_args.private_key: display_columns, columns = _get_keypair_columns( - keypair, hide_pub_key=True, hide_priv_key=True) + keypair, hide_pub_key=True, hide_priv_key=True + ) data = utils.get_item_properties(keypair, columns) return (display_columns, data) else: - sys.stdout.write(keypair.private_key) + self.app.stdout.write(generated_keypair.private_key) return ({}, {}) @@ -169,12 +209,12 @@ class DeleteKeypair(command.Command): _description = _("Delete public or private key(s)") def get_parser(self, prog_name): - parser = super(DeleteKeypair, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', nargs='+', - help=_("Name of key(s) to delete (name only)") + help=_("Name of key(s) to delete (name only)"), ) parser.add_argument( '--user', @@ -188,7 +228,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity kwargs = {} @@ -211,16 +251,21 @@ def take_action(self, parsed_args): for n in parsed_args.name: try: compute_client.delete_keypair( - n, **kwargs, ignore_missing=False) + n, **kwargs, ignore_missing=False + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete key with name " - "'%(name)s': %(e)s"), {'name': n, 'e': e}) + LOG.error( + _("Failed to delete key with name '%(name)s': %(e)s"), + {'name': n, 'e': e}, + ) if result > 0: total = len(parsed_args.name) - msg = (_("%(result)s of %(total)s keys failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s keys failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -249,20 +294,13 @@ def get_parser(self, prog_name): ), ) identity_common.add_project_domain_option_to_parser(parser) - parser.add_argument( - '--marker', - help=_('The last keypair ID of the previous page'), - ) - parser.add_argument( - '--limit', - type=int, - help=_('Maximum number of keypairs to display'), - ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity + identity_sdk_client = self.app.client_manager.sdk_connection.identity kwargs = {} @@ -298,9 +336,7 @@ def take_action(self, parsed_args): # NOTE(stephenfin): Because we're doing this client-side, we # can't really rely on the marker, because we don't know what # user the marker is associated with - msg = _( - '--project is not compatible with --marker' - ) + msg = _('--project is not compatible with --marker') # NOTE(stephenfin): This is done client side because nova doesn't # currently support doing so server-side. If this is slow, we can @@ -310,11 +346,17 @@ def take_action(self, parsed_args): parsed_args.project, parsed_args.project_domain, ).id - users = identity_client.users.list(tenant_id=project) + assignments = identity_sdk_client.role_assignments( + scope_project_id=project + ) + user_ids = set() + for assignment in assignments: + if assignment.user: + user_ids.add(assignment.user['id']) data = [] - for user in users: - kwargs['user_id'] = user.id + for user_id in user_ids: + kwargs['user_id'] = user_id data.extend(compute_client.keypairs(**kwargs)) elif parsed_args.user: if not sdk_utils.supports_microversion(compute_client, '2.10'): @@ -335,13 +377,10 @@ def take_action(self, parsed_args): else: data = compute_client.keypairs(**kwargs) - columns = ( - "Name", - "Fingerprint" - ) + columns: tuple[str, ...] = ("Name", "Fingerprint") if sdk_utils.supports_microversion(compute_client, '2.2'): - columns += ("Type", ) + columns += ("Type",) return ( columns, @@ -353,17 +392,17 @@ class ShowKeypair(command.ShowOne): _description = _("Display key details") def get_parser(self, prog_name): - parser = super(ShowKeypair, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', - help=_("Public or private key to display (name only)") + help=_("Public or private key to display (name only)"), ) parser.add_argument( '--public-key', action='store_true', default=False, - help=_("Show only bare public key paired with the generated key") + help=_("Show only bare public key paired with the generated key"), ) parser.add_argument( '--user', @@ -377,7 +416,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity kwargs = {} @@ -397,13 +436,15 @@ def take_action(self, parsed_args): ).id keypair = compute_client.find_keypair( - parsed_args.name, **kwargs, ignore_missing=False) + parsed_args.name, **kwargs, ignore_missing=False + ) if not parsed_args.public_key: display_columns, columns = _get_keypair_columns( - keypair, hide_pub_key=True) + keypair, hide_pub_key=True + ) data = utils.get_item_properties(keypair, columns) return (display_columns, data) else: - sys.stdout.write(keypair.public_key) + self.app.stdout.write(keypair.public_key) return ({}, {}) diff --git a/openstackclient/compute/v2/server.py b/openstackclient/compute/v2/server.py index 23bd5e6f50..1eee828d9b 100644 --- a/openstackclient/compute/v2/server.py +++ b/openstackclient/compute/v2/server.py @@ -16,47 +16,47 @@ """Compute v2 Server action implementations""" import argparse +import base64 import getpass -import io import json import logging import os +import typing as ty from cliff import columns as cliff_columns import iso8601 -from novaclient import api_versions -from novaclient.v2 import servers from openstack import exceptions as sdk_exceptions from openstack import utils as sdk_utils from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils -from oslo_utils import strutils +from openstackclient.api import compute_v2 +from openstackclient import command +from openstackclient.common import envvars +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common as network_common - LOG = logging.getLogger(__name__) IMAGE_STRING_FOR_BFV = 'N/A (booted from volume)' -class PowerStateColumn(cliff_columns.FormattableColumn): +class PowerStateColumn(cliff_columns.FormattableColumn[int]): """Generate a formatted string of a server's power state.""" power_states = [ - 'NOSTATE', # 0x00 - 'Running', # 0x01 - '', # 0x02 - 'Paused', # 0x03 - 'Shutdown', # 0x04 - '', # 0x05 - 'Crashed', # 0x06 - 'Suspended' # 0x07 + 'NOSTATE', # 0x00 + 'Running', # 0x01 + '', # 0x02 + 'Paused', # 0x03 + 'Shutdown', # 0x04 + '', # 0x05 + 'Crashed', # 0x06 + 'Suspended', # 0x07 ] def human_readable(self): @@ -66,23 +66,28 @@ def human_readable(self): return 'N/A' -class AddressesColumn(cliff_columns.FormattableColumn): +class AddressesColumn(cliff_columns.FormattableColumn[ty.Any]): """Generate a formatted string of a server's addresses.""" def human_readable(self): try: - return utils.format_dict_of_list({ - k: [i['addr'] for i in v if 'addr' in i] - for k, v in self._value.items()}) + return utils.format_dict_of_list( + { + k: [i['addr'] for i in v if 'addr' in i] + for k, v in self._value.items() + } + ) except Exception: return 'N/A' def machine_readable(self): - return {k: [i['addr'] for i in v if 'addr' in i] - for k, v in self._value.items()} + return { + k: [i['addr'] for i in v if 'addr' in i] + for k, v in (self._value.items() if self._value else []) + } -class HostColumn(cliff_columns.FormattableColumn): +class HostColumn(cliff_columns.FormattableColumn[str | None]): """Generate a formatted string of a hostname.""" def human_readable(self): @@ -121,52 +126,49 @@ def _get_ip_address(addresses, address_type, ip_address_family): return addy['addr'] msg = _("ERROR: No %(type)s IP version %(family)s address found") raise exceptions.CommandError( - msg % {"type": address_type, - "family": ip_address_family} + msg % {"type": address_type, "family": ip_address_family} ) -def _prep_server_detail(compute_client, image_client, server, refresh=True): +def _prep_server_detail(compute_client, image_client, server, *, refresh=True): """Prepare the detailed server dict for printing :param compute_client: a compute client instance :param image_client: an image client instance :param server: a Server resource :param refresh: Flag indicating if ``server`` is already the latest version - or if it needs to be refreshed, for example when showing - the latest details of a server after creating it. + or if it needs to be refreshed, for example when showing the latest + details of a server after creating it. :rtype: a dict of server details """ - # Note: Some callers of this routine pass a novaclient server, and others - # pass an SDK server. Column names may be different across those cases. info = server.to_dict() + if refresh: - server = utils.find_resource(compute_client.servers, info['id']) - info.update(server.to_dict()) + server = compute_client.get_server(info['id']) + # we only update if the field is not empty, to avoid overwriting + # existing values + info.update( + **{x: y for x, y in server.to_dict().items() if x not in info or y} + ) # Some commands using this routine were originally implemented with the # nova python wrappers, and were later migrated to use the SDK. Map the # SDK's property names to the original property names to maintain backward - # compatibility for existing users. Data is duplicated under both the old - # and new name so users can consume the data by either name. + # compatibility for existing users. column_map = { 'access_ipv4': 'accessIPv4', 'access_ipv6': 'accessIPv6', 'admin_password': 'adminPass', - 'admin_password': 'adminPass', - 'volumes': 'os-extended-volumes:volumes_attached', + 'attached_volumes': 'volumes_attached', 'availability_zone': 'OS-EXT-AZ:availability_zone', - 'block_device_mapping': 'block_device_mapping_v2', 'compute_host': 'OS-EXT-SRV-ATTR:host', 'created_at': 'created', 'disk_config': 'OS-DCF:diskConfig', - 'flavor_id': 'flavorRef', 'has_config_drive': 'config_drive', 'host_id': 'hostId', 'fault': 'fault', 'hostname': 'OS-EXT-SRV-ATTR:hostname', 'hypervisor_hostname': 'OS-EXT-SRV-ATTR:hypervisor_hostname', - 'image_id': 'imageRef', 'instance_name': 'OS-EXT-SRV-ATTR:instance_name', 'is_locked': 'locked', 'kernel_id': 'OS-EXT-SRV-ATTR:kernel_id', @@ -177,17 +179,80 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True): 'ramdisk_id': 'OS-EXT-SRV-ATTR:ramdisk_id', 'reservation_id': 'OS-EXT-SRV-ATTR:reservation_id', 'root_device_name': 'OS-EXT-SRV-ATTR:root_device_name', - 'scheduler_hints': 'OS-SCH-HNT:scheduler_hints', 'task_state': 'OS-EXT-STS:task_state', 'terminated_at': 'OS-SRV-USG:terminated_at', 'updated_at': 'updated', 'user_data': 'OS-EXT-SRV-ATTR:user_data', 'vm_state': 'OS-EXT-STS:vm_state', } + # NOTE(ratailor): microversion 2.96 introduces + # pinned_availability_zone support + if sdk_utils.supports_microversion(compute_client, '2.96'): + column_map['pinned_availability_zone'] = 'pinned_availability_zone' + + # NOTE(ratailor): microversion 2.100 introduces + # scheduler_hints support + if sdk_utils.supports_microversion(compute_client, '2.100'): + column_map['scheduler_hints'] = 'scheduler_hints' + + # Some columns returned by openstacksdk should not be shown because they're + # either irrelevant or duplicates + ignored_columns = { + # computed columns + 'interface_ip', + 'location', + 'private_v4', + 'private_v6', + 'public_v4', + 'public_v6', + # create-only columns + 'block_device_mapping', + 'flavor_id', + 'host', + 'image_id', + 'max_count', + 'min_count', + 'networks', + 'personality', + # aliases + 'volumes', + # unnecessary + 'links', + } + # Some columns are only present in certain responses and should not be + # shown otherwise. + optional_columns = { + # only in create responses if '[api] enable_instance_password' is set + 'admin_password', + # only present in errored servers + 'fault', + # only present in create, detail responses + 'security_groups', + } + + data = {} + for key, value in info.items(): + if key in ignored_columns: + continue + + if key in optional_columns: + if info[key] is None: + continue - info.update({ - column_map[column]: data for column, data in info.items() - if column in column_map}) + alias = column_map.get(key) + data[alias or key] = value + + info = data + + # NOTE(dviroel): microversion 2.100 is now retrieving scheduler_hints + # content from request_spec on detailed responses + if not sdk_utils.supports_microversion(compute_client, '2.100'): + info.pop('scheduler_hints', None) + + # NOTE(ratailor): microversion 2.96 introduces + # pinned_availability_zone support + if not sdk_utils.supports_microversion(compute_client, '2.96'): + info.pop('pinned_availability_zone', None) # Convert the image blob to a name image_info = info.get('image', {}) @@ -195,7 +260,7 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True): image_id = image_info.get('id', '') try: image = image_client.get_image(image_id) - info['image'] = "%s (%s)" % (image.name, image_id) + info['image'] = f"{image.name} ({image_id})" except Exception: info['image'] = image_id else: @@ -208,42 +273,61 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True): # Convert the flavor blob to a name flavor_info = info.get('flavor', {}) # Microversion 2.47 puts the embedded flavor into the server response - # body but omits the id, so if not present we just expose the flavor - # dict in the server output. - if 'id' in flavor_info: + # body. The presence of the 'original_name' attribute indicates this. + if flavor_info.get('original_name') is None: # microversion < 2.47 flavor_id = flavor_info.get('id', '') try: - flavor = utils.find_resource(compute_client.flavors, flavor_id) - info['flavor'] = "%s (%s)" % (flavor.name, flavor_id) + flavor = compute_client.find_flavor( + flavor_id, ignore_missing=False + ) + info['flavor'] = f"{flavor.name} ({flavor_id})" except Exception: info['flavor'] = flavor_id - else: + else: # microversion >= 2.47 info['flavor'] = format_columns.DictColumn(flavor_info) - if 'os-extended-volumes:volumes_attached' in info: + # there's a lot of redundant information in BDMs - strip it + if 'volumes_attached' in info: info.update( { 'volumes_attached': format_columns.ListDictColumn( - info.pop('os-extended-volumes:volumes_attached')) + [ + { + k: v + for k, v in volume.items() + if v is not None and k != 'location' + } + for volume in info.pop('volumes_attached') or [] + ] + ) } ) + if 'security_groups' in info: info.update( { 'security_groups': format_columns.ListDictColumn( - info.pop('security_groups')) + info.pop('security_groups'), + ) } ) + if 'tags' in info: - info.update({'tags': format_columns.ListColumn(info.pop('tags'))}) + info.update( + {'tags': format_columns.ListColumn(info.pop('tags') or [])} + ) - # NOTE(dtroyer): novaclient splits these into separate entries... - # Format addresses in a useful way - info['addresses'] = ( - AddressesColumn(info['addresses']) if 'addresses' in info - else format_columns.DictListColumn(info.get('networks'))) + # Map 'networks' to 'addresses', if present. Note that the 'networks' key + # is used for create responses, otherwise it's 'addresses'. We know it'll + # be set because this is one of our optional columns. + if 'networks' in info: + info['addresses'] = format_columns.DictListColumn( + info.pop('networks', {}), + ) + else: + info['addresses'] = AddressesColumn(info.get('addresses', {})) - # Map 'metadata' field to 'properties' + # Map 'metadata' field to 'properties' and format info['properties'] = format_columns.DictColumn(info.pop('metadata')) # Migrate tenant_id to project_id naming @@ -253,37 +337,23 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True): # Map power state num to meaningful string if 'OS-EXT-STS:power_state' in info: info['OS-EXT-STS:power_state'] = PowerStateColumn( - info['OS-EXT-STS:power_state']) + info['OS-EXT-STS:power_state'] + ) - # Remove values that are long and not too useful - info.pop('links', None) + if sdk_utils.supports_microversion(compute_client, '2.100'): + if 'scheduler_hints' in info: + info['scheduler_hints'] = format_columns.DictListColumn( + info.pop('scheduler_hints', {}), + ) return info -def boolenv(*vars, default=False): - """Search for the first defined of possibly many bool-like env vars. - - Returns the first environment variable defined in vars, or returns the - default. - - :param vars: Arbitrary strings to search for. Case sensitive. - :param default: The default to return if no value found. - :returns: A boolean corresponding to the value found, else the default if - no value found. - """ - for v in vars: - value = os.environ.get(v, None) - if value: - return strutils.bool_from_string(value) - return default - - class AddFixedIP(command.ShowOne): _description = _("Add fixed IP address to server") def get_parser(self, prog_name): - parser = super(AddFixedIP, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "server", metavar="", @@ -307,15 +377,14 @@ def get_parser(self, prog_name): help=_( 'Tag for the attached interface. ' '(supported by --os-compute-api-version 2.49 or above)' - ) + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - parsed_args.server, - ignore_missing=False + parsed_args.server, ignore_missing=False ) if parsed_args.tag: @@ -329,41 +398,42 @@ def take_action(self, parsed_args): if self.app.client_manager.is_network_endpoint_enabled(): network_client = self.app.client_manager.network net_id = network_client.find_network( - parsed_args.network, - ignore_missing=False + parsed_args.network, ignore_missing=False ).id else: net_id = parsed_args.network - if not sdk_utils.supports_microversion(compute_client, '2.44'): - compute_client.add_fixed_ip_to_server( - server.id, - net_id - ) - return ((), ()) - - kwargs = { - 'net_id': net_id - } + kwargs = {'net_id': net_id} if parsed_args.fixed_ip_address: kwargs['fixed_ips'] = [ - {"ip_address": parsed_args.fixed_ip_address}] + {"ip_address": parsed_args.fixed_ip_address} + ] if parsed_args.tag: kwargs['tag'] = parsed_args.tag interface = compute_client.create_server_interface(server.id, **kwargs) - columns = ( - 'port_id', 'server_id', 'net_id', 'mac_addr', 'port_state', + columns: tuple[str, ...] = ( + 'port_id', + 'server_id', + 'net_id', + 'mac_addr', + 'port_state', 'fixed_ips', ) - column_headers = ( - 'Port ID', 'Server ID', 'Network ID', 'MAC Address', 'Port State', + column_headers: tuple[str, ...] = ( + 'Port ID', + 'Server ID', + 'Network ID', + 'MAC Address', + 'Port State', 'Fixed IPs', ) - if sdk_utils.supports_microversion(compute_client, '2.49'): - columns += ('tag',) - column_headers += ('Tag',) + + if parsed_args.tag: + if sdk_utils.supports_microversion(compute_client, '2.49'): + columns += ('tag',) + column_headers += ('Tag',) return ( column_headers, @@ -389,8 +459,10 @@ def update_parser_common(self, parser): parser.add_argument( "ip_address", metavar="", - help=_("Floating IP address to assign to the first available " - "server port (IP only)"), + help=_( + "Floating IP address to assign to the first available " + "server port (IP only)" + ), ) parser.add_argument( "--fixed-ip-address", @@ -411,9 +483,9 @@ def take_action_network(self, client, parsed_args): parsed_args.ip_address, ignore_missing=False, ) - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) ports = list(client.ports(device_id=server.id)) if not ports: @@ -449,8 +521,11 @@ def take_action_network(self, client, parsed_args): client.update_ip(obj, **attrs) except sdk_exceptions.NotFoundException as exp: # 404 ExternalGatewayForFloatingIPNotFound from neutron - LOG.info('Skipped port %s because it is not attached to ' - 'an external gateway', port.id) + LOG.info( + 'Skipped port %s because it is not attached to ' + 'an external gateway', + port.id, + ) error = exp continue else: @@ -460,8 +535,9 @@ def take_action_network(self, client, parsed_args): raise error def take_action_compute(self, client, parsed_args): - client.api.floating_ip_add( - parsed_args.server, + server = client.find_server(parsed_args.server, ignore_missing=False) + client.add_floating_ip_to_server( + server, parsed_args.ip_address, fixed_address=parsed_args.fixed_ip_address, ) @@ -471,7 +547,7 @@ class AddPort(command.Command): _description = _("Add port to server") def get_parser(self, prog_name): - parser = super(AddPort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "server", metavar="", @@ -488,26 +564,26 @@ def get_parser(self, prog_name): help=_( 'Tag for the attached interface ' '(supported by --os-compute-api-version 2.49 or later)' - ) + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - parsed_args.server, ignore_missing=False) + parsed_args.server, ignore_missing=False + ) if self.app.client_manager.is_network_endpoint_enabled(): network_client = self.app.client_manager.network port_id = network_client.find_port( - parsed_args.port, ignore_missing=False).id + parsed_args.port, ignore_missing=False + ).id else: port_id = parsed_args.port - kwargs = { - 'port_id': port_id - } + kwargs = {'port_id': port_id} if parsed_args.tag: if not sdk_utils.supports_microversion(compute_client, '2.49'): @@ -525,7 +601,7 @@ class AddNetwork(command.Command): _description = _("Add network to server") def get_parser(self, prog_name): - parser = super(AddNetwork, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "server", metavar="", @@ -547,21 +623,21 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - parsed_args.server, ignore_missing=False) + parsed_args.server, ignore_missing=False + ) if self.app.client_manager.is_network_endpoint_enabled(): network_client = self.app.client_manager.network net_id = network_client.find_network( - parsed_args.network, ignore_missing=False).id + parsed_args.network, ignore_missing=False + ).id else: net_id = parsed_args.network - kwargs = { - 'net_id': net_id - } + kwargs = {'net_id': net_id} if parsed_args.tag: if not sdk_utils.supports_microversion(compute_client, '2.49'): @@ -577,44 +653,84 @@ def take_action(self, parsed_args): class AddServerSecurityGroup(command.Command): - _description = _("Add security group to server") + _description = _("Add security group(s) to server") def get_parser(self, prog_name): - parser = super(AddServerSecurityGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', help=_('Server (name or ID)'), ) parser.add_argument( - 'group', - metavar='', - help=_('Security group to add (name or ID)'), + 'security_groups', + metavar='', + nargs='+', + help=_( + 'Security group(s) to add to the server (name or ID) ' + '(repeat option to add multiple groups)' + ), ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, - ) - security_group = compute_client.api.security_group_find( - parsed_args.group, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) + if self.app.client_manager.is_network_endpoint_enabled(): + # the server handles both names and IDs for neutron SGs, so just + # pass things through if using neutron + security_groups = parsed_args.security_groups + else: + # however, if using nova-network then it needs names, not IDs + security_groups = [] + for security_group in parsed_args.security_groups: + security_groups.append( + compute_v2.find_security_group( + compute_client, security_group + )['name'] + ) + + errors = 0 + for security_group in security_groups: + try: + compute_client.add_security_group_to_server( + server, + {'name': security_group}, + ) + except sdk_exceptions.HttpException as e: + errors += 1 + LOG.error( + _( + "Failed to add security group with name or ID " + "'%(security_group)s' to server '%(server)s': %(e)s" + ), + { + 'security_group': security_group, + 'server': server.id, + 'e': e, + }, + ) - server.add_security_group(security_group['id']) + if errors > 0: + msg = _( + "%(errors)d of %(total)d security groups were not added." + ) % {'errors': errors, 'total': len(security_groups)} + raise exceptions.CommandError(msg) class AddServerVolume(command.ShowOne): - _description = _("""Add volume to server. + _description = _( + """Add volume to server. Specify ``--os-compute-api-version 2.20`` or higher to add a volume to a server -with status ``SHELVED`` or ``SHELVED_OFFLOADED``.""") +with status ``SHELVED`` or ``SHELVED_OFFLOADED``.""" + ) def get_parser(self, prog_name): - parser = super(AddServerVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -660,7 +776,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute volume_client = self.app.client_manager.sdk_connection.volume server = compute_client.find_server( @@ -672,10 +788,7 @@ def take_action(self, parsed_args): ignore_missing=False, ) - kwargs = { - "volumeId": volume.id, - "device": parsed_args.device - } + kwargs = {"volumeId": volume.id, "device": parsed_args.device} if parsed_args.tag: if not sdk_utils.supports_microversion(compute_client, '2.49'): @@ -712,8 +825,13 @@ def take_action(self, parsed_args): **kwargs, ) - columns = ('id', 'server id', 'volume id', 'device') - column_headers = ('ID', 'Server ID', 'Volume ID', 'Device') + columns: tuple[str, ...] = ('id', 'server id', 'volume id', 'device') + column_headers: tuple[str, ...] = ( + 'ID', + 'Server ID', + 'Volume ID', + 'Device', + ) if sdk_utils.supports_microversion(compute_client, '2.49'): columns += ('tag',) column_headers += ('Tag',) @@ -723,12 +841,14 @@ def take_action(self, parsed_args): return ( column_headers, - utils.get_item_properties(volume_attachment, columns,) + utils.get_item_properties( + volume_attachment, + columns, + ), ) class NoneNICAction(argparse.Action): - def __init__(self, option_strings, dest, help=None): super().__init__( option_strings=option_strings, @@ -748,7 +868,6 @@ def __call__(self, parser, namespace, values, option_string=None): class AutoNICAction(argparse.Action): - def __init__(self, option_strings, dest, help=None): super().__init__( option_strings=option_strings, @@ -768,7 +887,6 @@ def __call__(self, parser, namespace, values, option_string=None): class NICAction(argparse.Action): - def __init__( self, option_strings, @@ -802,7 +920,7 @@ def __call__(self, parser, namespace, values, option_string=None): "Invalid argument %s; characters ',' and '=' are not " "allowed" ) - raise argparse.ArgumentTypeError(msg % values) + raise argparse.ArgumentError(self, msg % values) values = '='.join([self.key, values]) else: @@ -830,22 +948,28 @@ def __call__(self, parser, namespace, values, option_string=None): "'net-id=net-uuid,port-id=port-uuid,v4-fixed-ip=ip-addr," "v6-fixed-ip=ip-addr,tag=tag'" ) - raise argparse.ArgumentTypeError(msg % values) + raise argparse.ArgumentError(self, msg % values) info[k] = v if info['net-id'] and info['port-id']: msg = _( - 'Invalid argument %s; either network or port should be ' - 'specified but not both' + "Invalid argument %s; either 'network' or 'port' should be " + "specified but not both" + ) + raise argparse.ArgumentError(self, msg % values) + + if info['v4-fixed-ip'] and info['v6-fixed-ip']: + msg = _( + "Invalid argument %s; either 'v4-fixed-ip' or 'v6-fixed-ip' " + "should be specified but not both" ) - raise argparse.ArgumentTypeError(msg % values) + raise argparse.ArgumentError(self, msg % values) getattr(namespace, self.dest).append(info) class BDMLegacyAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): # Make sure we have an empty list rather than None if getattr(namespace, self.dest, None) is None: @@ -858,7 +982,7 @@ def __call__(self, parser, namespace, values, option_string=None): "Invalid argument %s; argument must be of form " "'dev-name=id[:type[:size[:delete-on-terminate]]]'" ) - raise argparse.ArgumentTypeError(msg % values) + raise argparse.ArgumentError(self, msg % values) mapping = { 'device_name': dev_name, @@ -875,7 +999,7 @@ def __call__(self, parser, namespace, values, option_string=None): "Invalid argument %s; 'type' must be one of: volume, " "snapshot, image" ) - raise argparse.ArgumentTypeError(msg % values) + raise argparse.ArgumentError(self, msg % values) mapping['source_type'] = dev_map[1] @@ -890,18 +1014,27 @@ def __call__(self, parser, namespace, values, option_string=None): class BDMAction(parseractions.MultiKeyValueAction): - def __init__(self, option_strings, dest, **kwargs): - required_keys = [] optional_keys = [ - 'uuid', 'source_type', 'destination_type', - 'disk_bus', 'device_type', 'device_name', 'volume_size', - 'guest_format', 'boot_index', 'delete_on_termination', 'tag', + 'uuid', + 'source_type', + 'destination_type', + 'disk_bus', + 'device_type', + 'device_name', + 'volume_size', + 'guest_format', + 'boot_index', + 'delete_on_termination', + 'tag', 'volume_type', ] super().__init__( - option_strings, dest, required_keys=required_keys, - optional_keys=optional_keys, **kwargs, + option_strings, + dest, + required_keys=[], + optional_keys=optional_keys, + **kwargs, ) # TODO(stephenfin): Remove once I549d0897ef3704b7f47000f867d6731ad15d3f2b @@ -918,10 +1051,14 @@ def validate_keys(self, keys): "Invalid keys %(invalid_keys)s specified.\n" "Valid keys are: %(valid_keys)s" ) - raise argparse.ArgumentTypeError(msg % { - 'invalid_keys': ', '.join(invalid_keys), - 'valid_keys': ', '.join(valid_keys), - }) + raise argparse.ArgumentError( + self, + msg + % { + 'invalid_keys': ', '.join(invalid_keys), + 'valid_keys': ', '.join(valid_keys), + }, + ) missing_keys = [k for k in self.required_keys if k not in keys] if missing_keys: @@ -929,10 +1066,14 @@ def validate_keys(self, keys): "Missing required keys %(missing_keys)s.\n" "Required keys are: %(required_keys)s" ) - raise argparse.ArgumentTypeError(msg % { - 'missing_keys': ', '.join(missing_keys), - 'required_keys': ', '.join(self.required_keys), - }) + raise argparse.ArgumentError( + self, + msg + % { + 'missing_keys': ', '.join(missing_keys), + 'required_keys': ', '.join(self.required_keys), + }, + ) def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest, None) is None: @@ -954,7 +1095,7 @@ class CreateServer(command.ShowOne): _description = _("Create a new server") def get_parser(self, prog_name): - parser = super(CreateServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server_name', metavar='', @@ -1024,7 +1165,7 @@ def get_parser(self, prog_name): 'be deleted when the server is deleted. This option is ' 'mutually exclusive with the ``--volume`` and ``--snapshot`` ' 'options.' - ) + ), ) # TODO(stephenfin): Remove this in the v7.0 parser.add_argument( @@ -1053,7 +1194,7 @@ def get_parser(self, prog_name): ) parser.add_argument( '--block-device', - metavar='', + metavar='', action=BDMAction, dest='block_devices', default=[], @@ -1169,7 +1310,7 @@ def get_parser(self, prog_name): parser.add_argument( '--nic', metavar="", + "v6-fixed-ip=ip-addr,tag=tag,auto,none>", dest='nics', action=NICAction, # NOTE(RuiChen): Add '\n' to the end of line to improve formatting; @@ -1203,13 +1344,26 @@ def get_parser(self, prog_name): 'This option requires cloud support.' ), ) - parser.add_argument( + secgroups = parser.add_mutually_exclusive_group() + secgroups.add_argument( + '--no-security-group', + dest='security_groups', + action='store_const', + const=[], + help=_( + 'Do not associate a security group with ports attached to ' + 'this server. This does not affect the security groups ' + 'associated with pre-existing ports.' + ), + ) + secgroups.add_argument( '--security-group', metavar='', action='append', - default=[], + dest='security_groups', help=_( - 'Security group to assign to this server (name or ID) ' + 'Security group to associate with ports attached to this ' + 'server (name or ID) ' '(repeat option to set multiple groups)' ), ) @@ -1235,7 +1389,7 @@ def get_parser(self, prog_name): default=[], help=_( 'File(s) to inject into image before boot ' - '(repeat option to set multiple files)' + '(repeat option to set multiple files) ' '(supported by --os-compute-api-version 2.57 or below)' ), ) @@ -1282,10 +1436,19 @@ def get_parser(self, prog_name): '(supported by --os-compute-api-version 2.74 or above)' ), ) + parser.add_argument( + '--server-group', + metavar='', + help=_( + "Server group to create the server within " + "(this is an alias for '--hint group=')" + ), + ) parser.add_argument( '--hint', metavar='', action=parseractions.KeyValueAppendAction, + dest='hints', default={}, help=_('Hints for the scheduler'), ) @@ -1373,10 +1536,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute @@ -1387,18 +1549,10 @@ def _show_progress(progress): image = None if parsed_args.image: image = image_client.find_image( - parsed_args.image, ignore_missing=False) + parsed_args.image, ignore_missing=False + ) if not image and parsed_args.image_properties: - def emit_duplicated_warning(img): - img_uuid_list = [str(image.id) for image in img] - LOG.warning( - 'Multiple matching images: %(img_uuid_list)s\n' - 'Using image: %(chosen_one)s', - { - 'img_uuid_list': img_uuid_list, - 'chosen_one': img_uuid_list[0], - }) def _match_image(image_api, wanted_properties): image_list = image_api.images() @@ -1412,14 +1566,16 @@ def _match_image(image_api, wanted_properties): img_dict_items.extend(list(img.properties.items())) for key, value in img_dict_items: try: - set([key, value]) + {key, value} except TypeError: if key != 'properties': LOG.debug( 'Skipped the \'%s\' attribute. ' 'That cannot be compared. ' '(image: %s, value: %s)', - key, img.id, value, + key, + img.id, + value, ) pass else: @@ -1435,13 +1591,20 @@ def _match_image(image_api, wanted_properties): images = _match_image(image_client, parsed_args.image_properties) if len(images) > 1: - emit_duplicated_warning(images, parsed_args.image_properties) + img_uuid_list = [str(image.id) for image in images] + LOG.warning( + 'Multiple matching images: %(img_uuid_list)s\n' + 'Using image: %(chosen_one)s', + { + 'img_uuid_list': img_uuid_list, + 'chosen_one': img_uuid_list[0], + }, + ) if images: image = images[0] else: msg = _( - 'No images match the property expected by ' - '--image-property' + 'No images match the property expected by --image-property' ) raise exceptions.CommandError(msg) @@ -1469,11 +1632,12 @@ def _match_image(image_api, wanted_properties): parsed_args.snapshot, ).id - flavor = utils.find_resource( - compute_client.flavors, parsed_args.flavor) + flavor = compute_client.find_flavor( + parsed_args.flavor, ignore_missing=False + ) if parsed_args.file: - if compute_client.api_version >= api_versions.APIVersion('2.57'): + if sdk_utils.supports_microversion(compute_client, '2.57'): msg = _( 'Personality files are deprecated and are not supported ' 'for --os-compute-api-version greater than 2.56; use ' @@ -1485,8 +1649,8 @@ def _match_image(image_api, wanted_properties): for f in parsed_args.file: dst, src = f.split('=', 1) try: - files[dst] = io.open(src, 'rb') - except IOError as e: + files[dst] = open(src, 'rb') + except OSError as e: msg = _("Can't open '%(source)s': %(exception)s") raise exceptions.CommandError( msg % {'source': src, 'exception': e} @@ -1504,59 +1668,87 @@ def _match_image(image_api, wanted_properties): msg = _("max instances should be > 0") raise exceptions.CommandError(msg) - userdata = None + user_data = None if parsed_args.user_data: try: - userdata = io.open(parsed_args.user_data) - except IOError as e: + with open(parsed_args.user_data, 'rb') as fh: + # TODO(stephenfin): SDK should do this for us + user_data = base64.b64encode(fh.read()).decode('utf-8') + except OSError as e: msg = _("Can't open '%(data)s': %(exception)s") raise exceptions.CommandError( msg % {'data': parsed_args.user_data, 'exception': e} ) if parsed_args.description: - if compute_client.api_version < api_versions.APIVersion("2.19"): - msg = _("Description is not supported for " - "--os-compute-api-version less than 2.19") + if not sdk_utils.supports_microversion(compute_client, '2.19'): + msg = _( + '--os-compute-api-version 2.19 or greater is ' + 'required to support the --description option' + ) raise exceptions.CommandError(msg) block_device_mapping_v2 = [] - if volume: - block_device_mapping_v2 = [{ - 'uuid': volume, - 'boot_index': 0, - 'source_type': 'volume', - 'destination_type': 'volume' - }] - elif snapshot: - block_device_mapping_v2 = [{ - 'uuid': snapshot, - 'boot_index': 0, - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False - }] - elif parsed_args.boot_from_volume: + if parsed_args.boot_from_volume: # Tell nova to create a root volume from the image provided. - block_device_mapping_v2 = [{ - 'uuid': image.id, - 'boot_index': 0, - 'source_type': 'image', - 'destination_type': 'volume', - 'volume_size': parsed_args.boot_from_volume - }] + if not image: + msg = _( + "An image (--image or --image-property) is required " + "to support --boot-from-volume option" + ) + raise exceptions.CommandError(msg) + block_device_mapping_v2 = [ + { + 'uuid': image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'volume', + 'volume_size': parsed_args.boot_from_volume, + } + ] # If booting from volume we do not pass an image to compute. image = None + elif image: + block_device_mapping_v2 = [ + { + 'uuid': image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + } + ] + elif volume: + block_device_mapping_v2 = [ + { + 'uuid': volume, + 'boot_index': 0, + 'source_type': 'volume', + 'destination_type': 'volume', + } + ] + elif snapshot: + block_device_mapping_v2 = [ + { + 'uuid': snapshot, + 'boot_index': 0, + 'source_type': 'snapshot', + 'destination_type': 'volume', + 'delete_on_termination': False, + } + ] if parsed_args.swap: - block_device_mapping_v2.append({ - 'boot_index': -1, - 'source_type': 'blank', - 'destination_type': 'local', - 'guest_format': 'swap', - 'volume_size': parsed_args.swap, - 'delete_on_termination': True, - }) + block_device_mapping_v2.append( + { + 'boot_index': -1, + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': 'swap', + 'volume_size': parsed_args.swap, + 'delete_on_termination': True, + } + ) for mapping in parsed_args.ephemerals: block_device_mapping_dict = { @@ -1578,12 +1770,14 @@ def _match_image(image_api, wanted_properties): # just in case if mapping['source_type'] == 'volume': volume_id = utils.find_resource( - volume_client.volumes, mapping['uuid'], + volume_client.volumes, + mapping['uuid'], ).id mapping['uuid'] = volume_id elif mapping['source_type'] == 'snapshot': snapshot_id = utils.find_resource( - volume_client.volume_snapshots, mapping['uuid'], + volume_client.volume_snapshots, + mapping['uuid'], ).id mapping['uuid'] = snapshot_id elif mapping['source_type'] == 'image': @@ -1599,7 +1793,8 @@ def _match_image(image_api, wanted_properties): # create a volume from the image and attach it to the # server as a non-root volume. image_id = image_client.find_image( - mapping['uuid'], ignore_missing=False, + mapping['uuid'], + ignore_missing=False, ).id mapping['uuid'] = image_id @@ -1617,7 +1812,7 @@ def _match_image(image_api, wanted_properties): raise exceptions.CommandError(msg) if 'tag' in mapping and ( - compute_client.api_version < api_versions.APIVersion('2.42') + not sdk_utils.supports_microversion(compute_client, '2.42') ): msg = _( '--os-compute-api-version 2.42 or greater is ' @@ -1626,7 +1821,7 @@ def _match_image(image_api, wanted_properties): raise exceptions.CommandError(msg) if 'volume_type' in mapping and ( - compute_client.api_version < api_versions.APIVersion('2.67') + not sdk_utils.supports_microversion(compute_client, '2.67') ): msg = _( '--os-compute-api-version 2.67 or greater is ' @@ -1636,7 +1831,10 @@ def _match_image(image_api, wanted_properties): if 'source_type' in mapping: if mapping['source_type'] not in ( - 'volume', 'image', 'snapshot', 'blank', + 'volume', + 'image', + 'snapshot', + 'blank', ): msg = _( 'The source_type key of --block-device should be one ' @@ -1661,8 +1859,10 @@ def _match_image(image_api, wanted_properties): if 'delete_on_termination' in mapping: try: - value = strutils.bool_from_string( - mapping['delete_on_termination'], strict=True) + value = envvars.bool_from_str( + mapping['delete_on_termination'], + strict=True, + ) except ValueError: msg = _( 'The delete_on_termination key of --block-device ' @@ -1677,7 +1877,7 @@ def _match_image(image_api, wanted_properties): block_device_mapping_v2.append(mapping) - if not image and not any( + if not any( [bdm.get('boot_index') == 0 for bdm in block_device_mapping_v2] ): msg = _( @@ -1686,10 +1886,12 @@ def _match_image(image_api, wanted_properties): ) raise exceptions.CommandError(msg) - nics = parsed_args.nics + # Default to empty list if nothing was specified and let nova + # decide the default behavior. + networks: str | list[dict[str, str]] | None = [] - if 'auto' in nics or 'none' in nics: - if len(nics) > 1: + if 'auto' in parsed_args.nics or 'none' in parsed_args.nics: + if len(parsed_args.nics) > 1: msg = _( 'Specifying a --nic of auto or none cannot ' 'be used with any other --nic, --network ' @@ -1697,7 +1899,7 @@ def _match_image(image_api, wanted_properties): ) raise exceptions.CommandError(msg) - if compute_client.api_version < api_versions.APIVersion('2.37'): + if not sdk_utils.supports_microversion(compute_client, '2.37'): msg = _( '--os-compute-api-version 2.37 or greater is ' 'required to support explicit auto-allocation of a ' @@ -1705,13 +1907,12 @@ def _match_image(image_api, wanted_properties): ) raise exceptions.CommandError(msg) - nics = nics[0] + networks = parsed_args.nics[0] else: - for nic in nics: + for nic in parsed_args.nics: if 'tag' in nic: - if ( - compute_client.api_version < - api_versions.APIVersion('2.43') + if not sdk_utils.supports_microversion( + compute_client, '2.43' ): msg = _( '--os-compute-api-version 2.43 or greater is ' @@ -1724,20 +1925,24 @@ def _match_image(image_api, wanted_properties): if nic['net-id']: net = network_client.find_network( - nic['net-id'], ignore_missing=False, + nic['net-id'], + ignore_missing=False, ) nic['net-id'] = net.id if nic['port-id']: port = network_client.find_port( - nic['port-id'], ignore_missing=False, + nic['port-id'], + ignore_missing=False, ) nic['port-id'] = port.id else: if nic['net-id']: - nic['net-id'] = compute_client.api.network_find( + net = compute_v2.find_network( + compute_client, nic['net-id'], - )['id'] + ) + nic['net-id'] = net['id'] if nic['port-id']: msg = _( @@ -1746,40 +1951,67 @@ def _match_image(image_api, wanted_properties): ) raise exceptions.CommandError(msg) - if not nics: + # convert from the novaclient-derived "NIC" view to the actual + # "network" view + network: dict[str, str] = {} + + if nic['net-id']: + network['uuid'] = nic['net-id'] + + if nic['port-id']: + network['port'] = nic['port-id'] + + if nic['v4-fixed-ip']: + network['fixed_ip'] = nic['v4-fixed-ip'] + elif nic['v6-fixed-ip']: + network['fixed_ip'] = nic['v6-fixed-ip'] + + if nic.get('tag'): # tags are optional + network['tag'] = nic['tag'] + + networks.append(network) # type: ignore[union-attr] + + if not parsed_args.nics and sdk_utils.supports_microversion( + compute_client, '2.37' + ): # Compute API version >= 2.37 requires a value, so default to # 'auto' to maintain legacy behavior if a nic wasn't specified. - if compute_client.api_version >= api_versions.APIVersion('2.37'): - nics = 'auto' - else: - # Default to empty list if nothing was specified and let nova - # decide the default behavior. - nics = [] - - # Check security group exist and convert ID to name - security_group_names = [] - if self.app.client_manager.is_network_endpoint_enabled(): - network_client = self.app.client_manager.network - for each_sg in parsed_args.security_group: - sg = network_client.find_security_group(each_sg, - ignore_missing=False) - # Use security group ID to avoid multiple security group have - # same name in neutron networking backend - security_group_names.append(sg.id) - else: - # Handle nova-network case - for each_sg in parsed_args.security_group: - sg = compute_client.api.security_group_find(each_sg) - security_group_names.append(sg['name']) + networks = 'auto' + + # Check security group(s) exist and convert ID to name + security_groups = None + if parsed_args.security_groups is not None: + security_groups = [] + if self.app.client_manager.is_network_endpoint_enabled(): + network_client = self.app.client_manager.network + for security_group in parsed_args.security_groups: + sg = network_client.find_security_group( + security_group, ignore_missing=False + ) + # Use security group ID to avoid multiple security group + # have same name in neutron networking backend + security_groups.append({'name': sg.id}) + else: # nova-network + for security_group in parsed_args.security_groups: + sg = compute_v2.find_security_group( + compute_client, security_group + ) + security_groups.append({'name': sg['name']}) hints = {} - for key, values in parsed_args.hint.items(): + for key, values in parsed_args.hints.items(): # only items with multiple values will result in a list if len(values) == 1: hints[key] = values[0] else: hints[key] = values + if parsed_args.server_group: + server_group_obj = compute_client.find_server_group( + parsed_args.server_group, ignore_missing=False + ) + hints['group'] = server_group_obj.id + if isinstance(parsed_args.config_drive, bool): # NOTE(stephenfin): The API doesn't accept False as a value :'( config_drive = parsed_args.config_drive or None @@ -1788,73 +2020,99 @@ def _match_image(image_api, wanted_properties): # '--config-drive' if str(parsed_args.config_drive).lower() in ("true", "1"): config_drive = True - elif str(parsed_args.config_drive).lower() in ("false", "0", - "", "none"): + elif str(parsed_args.config_drive).lower() in ( + "false", + "0", + "", + "none", + ): config_drive = None else: config_drive = parsed_args.config_drive - boot_args = [parsed_args.server_name, image, flavor] - - boot_kwargs = dict( - meta=parsed_args.properties, - files=files, - reservation_id=None, - min_count=parsed_args.min, - max_count=parsed_args.max, - security_groups=security_group_names, - userdata=userdata, - key_name=parsed_args.key_name, - availability_zone=parsed_args.availability_zone, - admin_pass=parsed_args.password, - block_device_mapping_v2=block_device_mapping_v2, - nics=nics, - scheduler_hints=hints, - config_drive=config_drive) + kwargs = { + 'name': parsed_args.server_name, + 'image_id': image.id if image else '', + 'flavor_id': flavor.id, + 'min_count': parsed_args.min, + 'max_count': parsed_args.max, + } if parsed_args.description: - boot_kwargs['description'] = parsed_args.description + kwargs['description'] = parsed_args.description + + if parsed_args.availability_zone: + kwargs['availability_zone'] = parsed_args.availability_zone + + if parsed_args.password: + kwargs['admin_password'] = parsed_args.password + + if parsed_args.properties: + kwargs['metadata'] = parsed_args.properties + + if parsed_args.key_name: + kwargs['key_name'] = parsed_args.key_name + + if user_data: + kwargs['user_data'] = user_data + + if files: + kwargs['personality'] = files + + if security_groups is not None: + kwargs['security_groups'] = security_groups + + if block_device_mapping_v2: + kwargs['block_device_mapping'] = block_device_mapping_v2 + + if hints: + kwargs['scheduler_hints'] = hints + + if networks is not None: + kwargs['networks'] = networks + + if config_drive is not None: + kwargs['config_drive'] = config_drive if parsed_args.tags: - if compute_client.api_version < api_versions.APIVersion('2.52'): + if not sdk_utils.supports_microversion(compute_client, '2.52'): msg = _( '--os-compute-api-version 2.52 or greater is required to ' 'support the --tag option' ) raise exceptions.CommandError(msg) - boot_kwargs['tags'] = parsed_args.tags + kwargs['tags'] = parsed_args.tags if parsed_args.host: - if compute_client.api_version < api_versions.APIVersion("2.74"): + if not sdk_utils.supports_microversion(compute_client, '2.74'): msg = _( '--os-compute-api-version 2.74 or greater is required to ' 'support the --host option' ) raise exceptions.CommandError(msg) - boot_kwargs['host'] = parsed_args.host + kwargs['host'] = parsed_args.host if parsed_args.hypervisor_hostname: - if compute_client.api_version < api_versions.APIVersion("2.74"): + if not sdk_utils.supports_microversion(compute_client, '2.74'): msg = _( '--os-compute-api-version 2.74 or greater is required to ' 'support the --hypervisor-hostname option' ) raise exceptions.CommandError(msg) - boot_kwargs['hypervisor_hostname'] = ( - parsed_args.hypervisor_hostname) + kwargs['hypervisor_hostname'] = parsed_args.hypervisor_hostname if parsed_args.hostname: - if compute_client.api_version < api_versions.APIVersion("2.90"): + if not sdk_utils.supports_microversion(compute_client, '2.90'): msg = _( '--os-compute-api-version 2.90 or greater is required to ' 'support the --hostname option' ) raise exceptions.CommandError(msg) - boot_kwargs['hostname'] = parsed_args.hostname + kwargs['hostname'] = parsed_args.hostname # TODO(stephenfin): Handle OS_TRUSTED_IMAGE_CERTIFICATE_IDS if parsed_args.trusted_image_certs: @@ -1864,7 +2122,7 @@ def _match_image(image_api, wanted_properties): 'servers booted directly from images' ) raise exceptions.CommandError(msg) - if compute_client.api_version < api_versions.APIVersion('2.63'): + if not sdk_utils.supports_microversion(compute_client, '2.63'): msg = _( '--os-compute-api-version 2.63 or greater is required to ' 'support the --trusted-image-cert option' @@ -1872,36 +2130,30 @@ def _match_image(image_api, wanted_properties): raise exceptions.CommandError(msg) certs = parsed_args.trusted_image_certs - boot_kwargs['trusted_image_certificates'] = certs + kwargs['trusted_image_certificates'] = certs - LOG.debug('boot_args: %s', boot_args) - LOG.debug('boot_kwargs: %s', boot_kwargs) + LOG.debug('boot_kwargs: %s', kwargs) # Wrap the call to catch exceptions in order to close files try: - server = compute_client.servers.create(*boot_args, **boot_kwargs) + server = compute_client.create_server(**kwargs) finally: # Clean up open files - make sure they are not strings for f in files: if hasattr(f, 'close'): f.close() - if hasattr(userdata, 'close'): - userdata.close() if parsed_args.wait: - if utils.wait_for_status( - compute_client.servers.get, + if not utils.wait_for_status( + compute_client.get_server, server.id, callback=_show_progress, ): - self.app.stdout.write('\n') - else: - LOG.error('Error creating server: %s', parsed_args.server_name) - self.app.stdout.write(_('Error creating server\n')) - raise SystemExit + msg = _('Error creating server: %s') % parsed_args.server_name + raise exceptions.CommandError(msg) - details = _prep_server_detail(compute_client, image_client, server) - return zip(*sorted(details.items())) + data = _prep_server_detail(compute_client, image_client, server) + return zip(*sorted(data.items())) class CreateServerDump(command.Command): @@ -1917,7 +2169,7 @@ class CreateServerDump(command.Command): """ def get_parser(self, prog_name): - parser = super(CreateServerDump, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -1927,9 +2179,11 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute for name_or_id in parsed_args.server: - server = compute_client.find_server(name_or_id) + server = compute_client.find_server( + name_or_id, ignore_missing=False + ) server.trigger_crash_dump(compute_client) @@ -1937,7 +2191,7 @@ class DeleteServer(command.Command): _description = _("Delete server(s)") def get_parser(self, prog_name): - parser = super(DeleteServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -1952,7 +2206,7 @@ def get_parser(self, prog_name): parser.add_argument( '--all-projects', action='store_true', - default=boolenv('ALL_PROJECTS'), + default=envvars.boolenv('ALL_PROJECTS'), help=_( 'Delete server(s) in another project by name (admin only)' '(can be specified using the ALL_PROJECTS envvar)' @@ -1966,47 +2220,105 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute + + deleted_servers = [] for server in parsed_args.server: - server_obj = utils.find_resource( - compute_client.servers, server, - all_tenants=parsed_args.all_projects) + try: + server_obj = compute_client.find_server( + server, + ignore_missing=False, + all_projects=parsed_args.all_projects, + ) - if parsed_args.force: - compute_client.servers.force_delete(server_obj.id) - else: - compute_client.servers.delete(server_obj.id) + compute_client.delete_server( + server_obj, force=parsed_args.force + ) + deleted_servers.append(server_obj) + except Exception as e: + LOG.error( + _( + "Failed to delete server with " + "name or ID '%(server)s': %(e)s" + ), + {'server': server, 'e': e}, + ) - if parsed_args.wait: - if not utils.wait_for_delete( - compute_client.servers, - server_obj.id, - callback=_show_progress, - ): - msg = _('Error deleting server: %s') - LOG.error(msg, server_obj.id) - self.app.stdout.write(_('Error deleting server\n')) - raise SystemExit + if parsed_args.wait: + for server_obj in deleted_servers: + try: + compute_client.wait_for_delete( + server_obj, callback=_show_progress + ) + except sdk_exceptions.ResourceTimeout: + msg = _('Error deleting server: %s') % server_obj.id + deleted_servers.remove(server_obj) + raise exceptions.CommandError(msg) + + fails = len(parsed_args.server) - len(deleted_servers) + if fails > 0: + total = len(parsed_args.server) + msg = _("%(fails)s of %(total)s servers failed to delete.") % { + 'fails': fails, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class PercentAction(argparse.Action): + def __init__( + self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None, + ): + if nargs == 0: + raise ValueError( + 'nargs for store actions must be != 0; if you ' + 'have nothing to store, actions such as store ' + 'true or store const may be more appropriate' + ) + if const is not None: + raise ValueError('const does not make sense for PercentAction') + + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar, + ) -def percent_type(x): - x = int(x) - if not 0 < x <= 100: - raise argparse.ArgumentTypeError("Must be between 0 and 100") - return x + def __call__(self, parser, namespace, values, option_string=None): + x = int(values) + if not 0 < x <= 100: + raise argparse.ArgumentError(self, "Must be between 0 and 100") + setattr(namespace, self.dest, x) class ListServer(command.Lister): _description = _("List servers") def get_parser(self, prog_name): - parser = super(ListServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--reservation-id', metavar='', @@ -2063,7 +2375,7 @@ def get_parser(self, prog_name): 'SHUTOFF', 'SOFT_DELETED', 'SUSPENDED', - 'VERIFY_RESIZE' + 'VERIFY_RESIZE', ), help=_('Search by server status'), ) @@ -2085,7 +2397,7 @@ def get_parser(self, prog_name): parser.add_argument( '--all-projects', action='store_true', - default=boolenv('ALL_PROJECTS'), + default=envvars.boolenv('ALL_PROJECTS'), help=_( 'Include all projects (admin only) ' '(can be specified using the ALL_PROJECTS envvar)' @@ -2094,7 +2406,7 @@ def get_parser(self, prog_name): parser.add_argument( '--project', metavar='', - help=_("Search by project (admin only) (name or ID)") + help=_("Search by project (admin only) (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( @@ -2123,8 +2435,7 @@ def get_parser(self, prog_name): parser.add_argument( '--key-name', help=_( - 'Search by keypair name ' - '(admin only before microversion 2.83)' + 'Search by keypair name (admin only before microversion 2.83)' ), ) config_drive_group = parser.add_mutually_exclusive_group() @@ -2152,7 +2463,7 @@ def get_parser(self, prog_name): ) parser.add_argument( '--progress', - type=percent_type, + action=PercentAction, default=None, help=_( 'Search by progress value (%%) ' @@ -2260,7 +2571,8 @@ def get_parser(self, prog_name): ) name_lookup_group = parser.add_mutually_exclusive_group() name_lookup_group.add_argument( - '-n', '--no-name-lookup', + '-n', + '--no-name-lookup', action='store_true', default=False, help=_( @@ -2273,34 +2585,12 @@ def get_parser(self, prog_name): action='store_true', default=False, help=_( - 'When looking up flavor and image names, look them up' + 'When looking up flavor and image names, look them up ' 'one by one as needed instead of all together (default). ' 'Mutually exclusive with "--no-name-lookup|-n" option.' ), ) - parser.add_argument( - '--marker', - metavar='', - default=None, - help=_( - 'The last server of the previous page. Display ' - 'list of servers after marker. Display all servers if not ' - 'specified. When used with ``--deleted``, the marker must ' - 'be an ID, otherwise a name or ID can be used.' - ), - ) - parser.add_argument( - '--limit', - metavar='', - type=int, - default=None, - help=_( - "Maximum number of servers to display. If limit equals -1, " - "all servers will be displayed. If limit is greater than " - "'osapi_max_limit' option of Nova API, " - "'osapi_max_limit' will be used instead." - ), - ) + pagination.add_marker_pagination_option_to_parser(parser) parser.add_argument( '--changes-before', metavar='', @@ -2368,7 +2658,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity image_client = self.app.client_manager.image @@ -2393,10 +2683,9 @@ def take_action(self, parsed_args): # flavor name is given, map it to ID. flavor_id = None if parsed_args.flavor: - flavor = compute_client.find_flavor(parsed_args.flavor) - if flavor is None: - msg = _('Unable to find flavor: %s') % parsed_args.flavor - raise exceptions.CommandError(msg) + flavor = compute_client.find_flavor( + parsed_args.flavor, ignore_missing=False + ) flavor_id = flavor.id # Nova only supports list servers searching by image ID. So if a @@ -2416,7 +2705,7 @@ def take_action(self, parsed_args): 'status': parsed_args.status, 'flavor': flavor_id, 'image': image_id, - 'host': parsed_args.host, + 'compute_host': parsed_args.host, 'project_id': project_id, 'all_projects': parsed_args.all_projects, 'user_id': user_id, @@ -2512,8 +2801,8 @@ def take_action(self, parsed_args): iso8601.parse_date(search_opts['changes-before']) except (TypeError, iso8601.ParseError): raise exceptions.CommandError( - _('Invalid changes-before value: %s') % - search_opts['changes-before'] + _('Invalid changes-before value: %s') + % search_opts['changes-before'] ) if search_opts['changes-since']: @@ -2525,12 +2814,12 @@ def take_action(self, parsed_args): msg % search_opts['changes-since'] ) - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', 'status', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Name', 'Status', @@ -2599,6 +2888,17 @@ def take_action(self, parsed_args): 'Host', 'Properties', ) + if sdk_utils.supports_microversion(compute_client, '2.96'): + columns += ('pinned_availability_zone',) + column_headers += ('Pinned Availability Zone',) + + if sdk_utils.supports_microversion(compute_client, '2.100'): + columns += ('scheduler_hints',) + column_headers += ('Scheduler Hints',) + + if parsed_args.all_projects: + columns += ('project_id',) + column_headers += ('Project ID',) # support for additional columns if parsed_args.columns: @@ -2630,12 +2930,28 @@ def take_action(self, parsed_args): if c in ('Availability Zone', "availability_zone"): columns += ('availability_zone',) column_headers += ('Availability Zone',) + if c in ( + 'pinned_availability_zone', + 'Pinned Availability Zone', + ): + if sdk_utils.supports_microversion(compute_client, '2.96'): + columns += ('pinned_availability_zone',) + column_headers += ('Pinned Availability Zone',) if c in ('Host', "host"): columns += ('hypervisor_hostname',) column_headers += ('Host',) if c in ('Properties', "properties"): columns += ('Metadata',) column_headers += ('Properties',) + if c in ( + 'scheduler_hints', + "Scheduler Hints", + ): + if sdk_utils.supports_microversion( + compute_client, '2.100' + ): + columns += ('scheduler_hints',) + column_headers += ('Scheduler Hints',) # remove duplicates column_headers = tuple(dict.fromkeys(column_headers)) @@ -2649,7 +2965,9 @@ def take_action(self, parsed_args): if parsed_args.deleted: marker_id = parsed_args.marker else: - marker_id = compute_client.find_server(parsed_args.marker).id + marker_id = compute_client.find_server( + parsed_args.marker, ignore_missing=False + ).id search_opts['marker'] = marker_id data = list(compute_client.servers(**search_opts)) @@ -2660,7 +2978,8 @@ def take_action(self, parsed_args): # partial responses from down cells will not have an image # attribute so we use getattr image_ids = { - s.image['id'] for s in data + s.image['id'] + for s in data if getattr(s, 'image', None) and s.image.get('id') } @@ -2670,16 +2989,17 @@ def take_action(self, parsed_args): # there are infra failures if parsed_args.name_lookup_one_by_one or image_id: for image_id in image_ids: - # "Image Name" is not crucial, so we swallow any exceptions try: images[image_id] = image_client.get_image(image_id) - except Exception: + except Exception: # noqa: S110 + # retrieving image names is not crucial, so we swallow + # any exceptions pass else: try: # some deployments can have *loads* of images so we only # want to list the ones we care about. It would be better - # to only retrun the *fields* we care about (name) but + # to only return the *fields* we care about (name) but # glance doesn't support that # NOTE(stephenfin): This could result in super long URLs # but it seems unlikely to cause issues. Apache supports @@ -2692,7 +3012,9 @@ def take_action(self, parsed_args): ) for i in images_list: images[i.id] = i - except Exception: + except Exception: # noqa: S110 + # retrieving image names is not crucial, so we swallow any + # exceptions pass # create a dict that maps flavor_id to flavor object, which is used @@ -2700,22 +3022,27 @@ def take_action(self, parsed_args): # present on microversion 2.47 or later and 'flavor' won't be # present if there are infra failures if parsed_args.name_lookup_one_by_one or flavor_id: - for f_id in set( - s.flavor['id'] for s in data + for f_id in { + s.flavor['id'] + for s in data if s.flavor and s.flavor.get('id') - ): - # "Flavor Name" is not crucial, so we swallow any - # exceptions + }: try: - flavors[f_id] = compute_client.find_flavor(f_id) - except Exception: + flavors[f_id] = compute_client.find_flavor( + f_id, ignore_missing=False + ) + except Exception: # noqa: S110 + # retrieving flavor names is not crucial, so we swallow + # any exceptions pass else: try: flavors_list = compute_client.flavors(is_public=None) for i in flavors_list: flavors[i.id] = i - except Exception: + except Exception: # noqa: S110 + # retrieving flavor names is not crucial, so we swallow any + # exceptions pass # Populate image_name, image_id, flavor_name and flavor_id attributes @@ -2727,7 +3054,7 @@ def take_action(self, parsed_args): # infrastructure failure situations. # For those servers with partial constructs we just skip the # processing of the image and flavor information. - if not hasattr(s, 'image') or not hasattr(s, 'flavor'): + if getattr(s, 'status') == 'UNKNOWN': continue if 'id' in s.image and s.image.id is not None: @@ -2766,8 +3093,8 @@ def take_action(self, parsed_args): # it's on, providing useful information to a user in this # situation. if ( - sdk_utils.supports_microversion(compute_client, '2.16') and - parsed_args.long + sdk_utils.supports_microversion(compute_client, '2.16') + and parsed_args.long ): if any([s.host_status is not None for s in data]): columns += ('Host Status',) @@ -2777,7 +3104,8 @@ def take_action(self, parsed_args): column_headers, ( utils.get_item_properties( - s, columns, + s, + columns, mixed_case_fields=( 'task_state', 'power_state', @@ -2790,21 +3118,24 @@ def take_action(self, parsed_args): 'metadata': format_columns.DictColumn, 'security_groups_name': format_columns.ListColumn, 'hypervisor_hostname': HostColumn, + 'scheduler_hints': format_columns.DictListColumn, }, - ) for s in data + ) + for s in data ), ) return table class LockServer(command.Command): + _description = _( + """Lock server(s) - _description = _("""Lock server(s) - -A non-admin user will not be able to execute actions.""") +A non-admin user will not be able to execute actions.""" + ) def get_parser(self, prog_name): - parser = super(LockServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -2815,24 +3146,32 @@ def get_parser(self, prog_name): '--reason', metavar='', default=None, - help=_("Reason for locking the server(s). Requires " - "``--os-compute-api-version`` 2.73 or greater.") + help=_( + 'Reason for locking the server(s) ' + '(supported by --os-compute-api-version 2.73 or above)' + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.compute - support_reason = compute_client.api_version >= api_versions.APIVersion( - '2.73') - if not support_reason and parsed_args.reason: - msg = _('--os-compute-api-version 2.73 or greater is required to ' - 'use the --reason option.') - raise exceptions.CommandError(msg) + + kwargs = {} + if parsed_args.reason: + if not sdk_utils.supports_microversion(compute_client, '2.73'): + msg = _( + '--os-compute-api-version 2.73 or greater is required to ' + 'use the --reason option' + ) + raise exceptions.CommandError(msg) + + kwargs['locked_reason'] = parsed_args.reason + for server in parsed_args.server: - serv = utils.find_resource(compute_client.servers, server) - (serv.lock(reason=parsed_args.reason) if support_reason - else serv.lock()) + server_id = compute_client.find_server( + server, ignore_missing=False + ).id + compute_client.lock_server(server_id, **kwargs) # FIXME(dtroyer): Here is what I want, how with argparse/cliff? @@ -2845,8 +3184,10 @@ def take_action(self, parsed_args): # live_parser = parser.add_argument_group(title='Live migration options') # then adding the groups doesn't seem to work + class MigrateServer(command.Command): - _description = _("""Migrate server to different host. + _description = _( + """Migrate server to different host. A migrate operation is implemented as a resize operation using the same flavor as the old server. This means that, like resize, migrate works by creating a @@ -2854,10 +3195,11 @@ class MigrateServer(command.Command): into a new one. As with resize, the migrate operation is a two-step process for the user: the first step is to perform the migrate, and the second step is to either confirm (verify) success and release the old server, or to declare a -revert to release the new server and restart the old one.""") +revert to release the new server and restart the old one.""" + ) def get_parser(self, prog_name): - parser = super(MigrateServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -2910,7 +3252,7 @@ def get_parser(self, prog_name): action='store_true', default=None, help=_( - 'Allow disk over-commit on the destination host' + 'Allow disk over-commit on the destination host ' '(supported with --os-compute-api-version 2.24 or below)' ), ) @@ -2919,7 +3261,7 @@ def get_parser(self, prog_name): dest='disk_overcommit', action='store_false', help=_( - 'Do not over-commit disk on the destination host (default)' + 'Do not over-commit disk on the destination host (default) ' '(supported with --os-compute-api-version 2.24 or below)' ), ) @@ -2931,17 +3273,15 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) if parsed_args.live_migration: @@ -2949,10 +3289,7 @@ def _show_progress(progress): block_migration = parsed_args.block_migration if block_migration is None: - if ( - compute_client.api_version < - api_versions.APIVersion('2.25') - ): + if not sdk_utils.supports_microversion(compute_client, '2.25'): block_migration = False else: block_migration = 'auto' @@ -2965,9 +3302,8 @@ def _show_progress(progress): # want to support, so if the user is using --live-migration # and --host, we want to enforce that they are using version # 2.30 or greater. - if ( - parsed_args.host and - compute_client.api_version < api_versions.APIVersion('2.30') + if parsed_args.host and not sdk_utils.supports_microversion( + compute_client, '2.30' ): raise exceptions.CommandError( '--os-compute-api-version 2.30 or greater is required ' @@ -2977,13 +3313,13 @@ def _show_progress(progress): # The host parameter is required in the API even if None. kwargs['host'] = parsed_args.host - if compute_client.api_version < api_versions.APIVersion('2.25'): - kwargs['disk_over_commit'] = parsed_args.disk_overcommit + if not sdk_utils.supports_microversion(compute_client, '2.25'): + kwargs['disk_overcommit'] = parsed_args.disk_overcommit # We can't use an argparse default value because then we can't # distinguish between explicit 'False' and unset for the below # case (microversion >= 2.25) - if kwargs['disk_over_commit'] is None: - kwargs['disk_over_commit'] = False + if kwargs['disk_overcommit'] is None: + kwargs['disk_overcommit'] = False elif parsed_args.disk_overcommit is not None: # TODO(stephenfin): Raise an error here in OSC 7.0 msg = _( @@ -2994,16 +3330,16 @@ def _show_progress(progress): ) self.log.warning(msg) - server.live_migrate(**kwargs) + compute_client.live_migrate_server(server, **kwargs) else: # cold migration if parsed_args.block_migration or parsed_args.disk_overcommit: raise exceptions.CommandError( "--live-migration must be specified if " "--block-migration or --disk-overcommit is " - "specified") + "specified" + ) if parsed_args.host: - if (compute_client.api_version < - api_versions.APIVersion('2.56')): + if not sdk_utils.supports_microversion(compute_client, '2.56'): msg = _( '--os-compute-api-version 2.56 or greater is ' 'required to use --host without --live-migration.' @@ -3011,28 +3347,31 @@ def _show_progress(progress): raise exceptions.CommandError(msg) kwargs = {'host': parsed_args.host} if parsed_args.host else {} - server.migrate(**kwargs) + compute_client.migrate_server(server, **kwargs) if parsed_args.wait: if utils.wait_for_status( - compute_client.servers.get, + compute_client.get_server, server.id, - success_status=['active', 'verify_resize'], + success_status=('active', 'verify_resize'), callback=_show_progress, ): - self.app.stdout.write(_('Complete\n')) + self.app.stdout.write( + _( + 'Complete, check success/failure by ' + 'openstack server migration/event list/show\n' + ) + ) else: - LOG.error(_('Error migrating server: %s'), - server.id) - self.app.stdout.write(_('Error migrating server\n')) - raise SystemExit + msg = _('Error migrating server: %s') % server.id + raise exceptions.CommandError(msg) class PauseServer(command.Command): _description = _("Pause server(s)") def get_parser(self, prog_name): - parser = super(PauseServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3042,7 +3381,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute for server in parsed_args.server: server_id = compute_client.find_server( server, @@ -3055,7 +3394,7 @@ class RebootServer(command.Command): _description = _("Perform a hard or soft server reboot") def get_parser(self, prog_name): - parser = super(RebootServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3066,16 +3405,16 @@ def get_parser(self, prog_name): '--hard', dest='reboot_type', action='store_const', - const=servers.REBOOT_HARD, - default=servers.REBOOT_SOFT, + const='HARD', + default='SOFT', help=_('Perform a hard reboot'), ) group.add_argument( '--soft', dest='reboot_type', action='store_const', - const=servers.REBOOT_SOFT, - default=servers.REBOOT_SOFT, + const='SOFT', + default='SOFT', help=_('Perform a soft reboot'), ) parser.add_argument( @@ -3086,36 +3425,36 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, parsed_args.server) - server.reboot(parsed_args.reboot_type) + server_id = compute_client.find_server( + parsed_args.server, + ignore_missing=False, + ).id + compute_client.reboot_server(server_id, parsed_args.reboot_type) if parsed_args.wait: + # We use osc-lib's wait_for_status since that allows for a callback if utils.wait_for_status( - compute_client.servers.get, - server.id, + compute_client.get_server, + server_id, callback=_show_progress, ): self.app.stdout.write(_('Complete\n')) else: - LOG.error(_('Error rebooting server: %s'), - server.id) - self.app.stdout.write(_('Error rebooting server\n')) - raise SystemExit + msg = _('Error rebooting server: %s') % server_id + raise exceptions.CommandError(msg) class RebuildServer(command.ShowOne): _description = _("Rebuild server") def get_parser(self, prog_name): - parser = super(RebuildServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3125,7 +3464,7 @@ def get_parser(self, prog_name): '--image', metavar='', help=_( - 'Recreate server from the specified image (name or ID).' + 'Recreate server from the specified image (name or ID). ' 'Defaults to the currently used one.' ), ) @@ -3291,17 +3630,17 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute image_client = self.app.client_manager.image - server = utils.find_resource( - compute_client.servers, parsed_args.server) + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) # If parsed_args.image is not set and if the instance is image backed, # default to the currently used one. If the instance is volume backed, @@ -3309,9 +3648,10 @@ def _show_progress(progress): # to error out in this case and ask user to supply the image. if parsed_args.image: image = image_client.find_image( - parsed_args.image, ignore_missing=False) + parsed_args.image, ignore_missing=False + ) else: - if not server.image: + if not server.image or not server.image.id: msg = _( 'The --image option is required when rebuilding a ' 'volume-backed server' @@ -3324,14 +3664,17 @@ def _show_progress(progress): if parsed_args.name is not None: kwargs['name'] = parsed_args.name + if parsed_args.password is not None: + kwargs['admin_password'] = parsed_args.password + if parsed_args.preserve_ephemeral is not None: kwargs['preserve_ephemeral'] = parsed_args.preserve_ephemeral if parsed_args.properties: - kwargs['meta'] = parsed_args.properties + kwargs['metadata'] = parsed_args.properties if parsed_args.description: - if compute_client.api_version < api_versions.APIVersion('2.19'): + if not sdk_utils.supports_microversion(compute_client, '2.19'): msg = _( '--os-compute-api-version 2.19 or greater is required to ' 'support the --description option' @@ -3341,7 +3684,7 @@ def _show_progress(progress): kwargs['description'] = parsed_args.description if parsed_args.key_name: - if compute_client.api_version < api_versions.APIVersion('2.54'): + if not sdk_utils.supports_microversion(compute_client, '2.54'): msg = _( '--os-compute-api-version 2.54 or greater is required to ' 'support the --key-name option' @@ -3350,7 +3693,7 @@ def _show_progress(progress): kwargs['key_name'] = parsed_args.key_name elif parsed_args.no_key_name: - if compute_client.api_version < api_versions.APIVersion('2.54'): + if not sdk_utils.supports_microversion(compute_client, '2.54'): msg = _( '--os-compute-api-version 2.54 or greater is required to ' 'support the --no-key-name option' @@ -3359,9 +3702,8 @@ def _show_progress(progress): kwargs['key_name'] = None - userdata = None if parsed_args.user_data: - if compute_client.api_version < api_versions.APIVersion('2.54'): + if not sdk_utils.supports_microversion(compute_client, '2.54'): msg = _( '--os-compute-api-version 2.54 or greater is required to ' 'support the --user-data option' @@ -3369,27 +3711,29 @@ def _show_progress(progress): raise exceptions.CommandError(msg) try: - userdata = io.open(parsed_args.user_data) - except IOError as e: + with open(parsed_args.user_data, 'rb') as fh: + # TODO(stephenfin): SDK should do this for us + user_data = base64.b64encode(fh.read()).decode('utf-8') + except OSError as e: msg = _("Can't open '%(data)s': %(exception)s") raise exceptions.CommandError( msg % {'data': parsed_args.user_data, 'exception': e} ) - kwargs['userdata'] = userdata + kwargs['user_data'] = user_data elif parsed_args.no_user_data: - if compute_client.api_version < api_versions.APIVersion('2.54'): + if not sdk_utils.supports_microversion(compute_client, '2.54'): msg = _( '--os-compute-api-version 2.54 or greater is required to ' 'support the --no-user-data option' ) raise exceptions.CommandError(msg) - kwargs['userdata'] = None + kwargs['user_data'] = None # TODO(stephenfin): Handle OS_TRUSTED_IMAGE_CERTIFICATE_IDS if parsed_args.trusted_image_certs: - if compute_client.api_version < api_versions.APIVersion('2.63'): + if not sdk_utils.supports_microversion(compute_client, '2.63'): msg = _( '--os-compute-api-version 2.63 or greater is required to ' 'support the --trusted-certs option' @@ -3399,7 +3743,7 @@ def _show_progress(progress): certs = parsed_args.trusted_image_certs kwargs['trusted_image_certificates'] = certs elif parsed_args.no_trusted_image_certs: - if compute_client.api_version < api_versions.APIVersion('2.63'): + if not sdk_utils.supports_microversion(compute_client, '2.63'): msg = _( '--os-compute-api-version 2.63 or greater is required to ' 'support the --no-trusted-certs option' @@ -3409,7 +3753,7 @@ def _show_progress(progress): kwargs['trusted_image_certificates'] = None if parsed_args.hostname: - if compute_client.api_version < api_versions.APIVersion('2.90'): + if not sdk_utils.supports_microversion(compute_client, '2.90'): msg = _( '--os-compute-api-version 2.90 or greater is required to ' 'support the --hostname option' @@ -3418,9 +3762,8 @@ def _show_progress(progress): kwargs['hostname'] = parsed_args.hostname - v2_93 = api_versions.APIVersion('2.93') if parsed_args.reimage_boot_volume: - if compute_client.api_version < v2_93: + if not sdk_utils.supports_microversion(compute_client, '2.93'): msg = _( '--os-compute-api-version 2.93 or greater is required to ' 'support the --reimage-boot-volume option' @@ -3429,8 +3772,8 @@ def _show_progress(progress): else: # force user to explicitly request reimaging of volume-backed # server - if not server.image: - if compute_client.api_version >= v2_93: + if not server.image or not server.image.id: + if sdk_utils.supports_microversion(compute_client, '2.93'): msg = ( '--reimage-boot-volume is required to rebuild a ' 'volume-backed server' @@ -3453,31 +3796,38 @@ def _show_progress(progress): 'future release.' ) - try: - server = server.rebuild(image, parsed_args.password, **kwargs) - finally: - if userdata and hasattr(userdata, 'close'): - userdata.close() + status = getattr(server, 'status', '').lower() + if status == 'shutoff': + success_status = ['shutoff'] + elif status in ('error', 'active'): + success_status = ['active'] + else: + msg = _("The server status is not ACTIVE, SHUTOFF or ERROR.") + raise exceptions.CommandError(msg) + + server = compute_client.rebuild_server(server, image, **kwargs) if parsed_args.wait: if utils.wait_for_status( - compute_client.servers.get, + compute_client.get_server, server.id, callback=_show_progress, + success_status=success_status, ): self.app.stdout.write(_('Complete\n')) else: - LOG.error(_('Error rebuilding server: %s'), server.id) - self.app.stdout.write(_('Error rebuilding server\n')) - raise SystemExit + msg = _('Error rebuilding server: %s') % server.id + raise exceptions.CommandError(msg) - details = _prep_server_detail( - compute_client, image_client, server, refresh=False) - return zip(*sorted(details.items())) + data = _prep_server_detail( + compute_client, image_client, server, refresh=False + ) + return zip(*sorted(data.items())) class EvacuateServer(command.ShowOne): - _description = _("""Evacuate a server to a different host. + _description = _( + """Evacuate a server to a different host. This command is used to recreate a server after the host it was on has failed. It can only be used if the compute service that manages the server is down. @@ -3490,22 +3840,25 @@ class EvacuateServer(command.ShowOne): If the server uses boot for volume or has its root disk on shared storage the root disk will be preserved and reused for the evacuated instance on the new -host.""") +host.""" + ) def get_parser(self, prog_name): - parser = super(EvacuateServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', help=_('Server (name or ID)'), ) - parser.add_argument( - '--wait', action='store_true', + '--wait', + action='store_true', help=_('Wait for evacuation to complete'), ) parser.add_argument( - '--host', metavar='', default=None, + '--host', + metavar='', + default=None, help=_( 'Set the preferred host on which to rebuild the evacuated ' 'server. The host will be validated by the scheduler. ' @@ -3514,7 +3867,9 @@ def get_parser(self, prog_name): ) shared_storage_group = parser.add_mutually_exclusive_group() shared_storage_group.add_argument( - '--password', metavar='', default=None, + '--password', + metavar='', + default=None, help=_( 'Set the password on the evacuated instance. This option is ' 'mutually exclusive with the --shared-storage option. ' @@ -3522,7 +3877,9 @@ def get_parser(self, prog_name): ), ) shared_storage_group.add_argument( - '--shared-storage', action='store_true', dest='shared_storage', + '--shared-storage', + action='store_true', + dest='shared_storage', help=_( 'Indicate that the instance is on shared storage. ' 'This will be auto-calculated with ' @@ -3534,17 +3891,16 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute image_client = self.app.client_manager.image if parsed_args.host: - if compute_client.api_version < api_versions.APIVersion('2.29'): + if not sdk_utils.supports_microversion(compute_client, '2.29'): msg = _( '--os-compute-api-version 2.29 or later is required ' 'to specify a preferred host.' @@ -3552,7 +3908,7 @@ def _show_progress(progress): raise exceptions.CommandError(msg) if parsed_args.shared_storage: - if compute_client.api_version > api_versions.APIVersion('2.13'): + if sdk_utils.supports_microversion(compute_client, '2.14'): msg = _( '--os-compute-api-version 2.13 or earlier is required ' 'to specify shared-storage.' @@ -3561,39 +3917,43 @@ def _show_progress(progress): kwargs = { 'host': parsed_args.host, - 'password': parsed_args.password, + 'admin_pass': parsed_args.password, } - if compute_client.api_version <= api_versions.APIVersion('2.13'): + if not sdk_utils.supports_microversion(compute_client, '2.14'): kwargs['on_shared_storage'] = parsed_args.shared_storage - server = utils.find_resource( - compute_client.servers, parsed_args.server) - - server.evacuate(**kwargs) + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) + compute_client.evacuate_server(server, **kwargs) if parsed_args.wait: + orig_status = server.status + success = ['ACTIVE'] + if orig_status == 'SHUTOFF': + success.append('SHUTOFF') + if utils.wait_for_status( - compute_client.servers.get, + compute_client.get_server, server.id, + success_status=success, callback=_show_progress, ): self.app.stdout.write(_('Complete\n')) else: - LOG.error(_('Error evacuating server: %s'), server.id) - self.app.stdout.write(_('Error evacuating server\n')) - raise SystemExit + msg = _('Error evacuating server: %s') % server.id + raise exceptions.CommandError(msg) - details = _prep_server_detail( - compute_client, image_client, server, refresh=True) - return zip(*sorted(details.items())) + data = _prep_server_detail(compute_client, image_client, server) + return zip(*sorted(data.items())) class RemoveFixedIP(command.Command): _description = _("Remove fixed IP address from server") def get_parser(self, prog_name): - parser = super(RemoveFixedIP, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "server", metavar="", @@ -3609,10 +3969,12 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, parsed_args.server) - - server.remove_fixed_ip(parsed_args.ip_address) + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) + compute_client.remove_fixed_ip_from_server( + server, parsed_args.ip_address + ) class RemoveFloatingIP(network_common.NetworkAndComputeCommand): @@ -3634,27 +3996,23 @@ def update_parser_common(self, parser): return parser def take_action_network(self, client, parsed_args): - attrs = {} obj = client.find_ip( parsed_args.ip_address, ignore_missing=False, ) - attrs['port_id'] = None - client.update_ip(obj, **attrs) + client.update_ip(obj, port_id=None) def take_action_compute(self, client, parsed_args): - client.api.floating_ip_remove( - parsed_args.server, - parsed_args.ip_address, - ) + server = client.find_server(parsed_args.server, ignore_missing=False) + client.remove_floating_ip_from_server(server, parsed_args.ip_address) class RemovePort(command.Command): _description = _("Remove port from server") def get_parser(self, prog_name): - parser = super(RemovePort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "server", metavar="", @@ -3668,15 +4026,17 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - parsed_args.server, ignore_missing=False) + parsed_args.server, ignore_missing=False + ) if self.app.client_manager.is_network_endpoint_enabled(): network_client = self.app.client_manager.network port_id = network_client.find_port( - parsed_args.port, ignore_missing=False).id + parsed_args.port, ignore_missing=False + ).id else: port_id = parsed_args.port @@ -3691,7 +4051,7 @@ class RemoveNetwork(command.Command): _description = _("Remove all ports of a network from server") def get_parser(self, prog_name): - parser = super(RemoveNetwork, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "server", metavar="", @@ -3705,15 +4065,17 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( - parsed_args.server, ignore_missing=False) + parsed_args.server, ignore_missing=False + ) if self.app.client_manager.is_network_endpoint_enabled(): network_client = self.app.client_manager.network net_id = network_client.find_network( - parsed_args.network, ignore_missing=False).id + parsed_args.network, ignore_missing=False + ).id else: net_id = parsed_args.network @@ -3729,41 +4091,81 @@ class RemoveServerSecurityGroup(command.Command): _description = _("Remove security group from server") def get_parser(self, prog_name): - parser = super(RemoveServerSecurityGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', - help=_('Name or ID of server to use'), + help=_('Server (name or ID)'), ) parser.add_argument( - 'group', - metavar='', - help=_('Name or ID of security group to remove from server'), + 'security_groups', + metavar='', + nargs='+', + help=_( + 'Security group(s) to remove from server (name or ID) ' + '(repeat option to remove multiple groups)' + ), ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, - ) - security_group = compute_client.api.security_group_find( - parsed_args.group, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) + if self.app.client_manager.is_network_endpoint_enabled(): + # the server handles both names and IDs for neutron SGs, so just + # pass things through + security_groups = parsed_args.security_groups + else: + # however, if using nova-network then it needs names, not IDs + security_groups = [] + for security_group in parsed_args.security_groups: + security_groups.append( + compute_v2.find_security_group( + compute_client, security_group + )['name'] + ) + + errors = 0 + for security_group in security_groups: + try: + compute_client.remove_security_group_from_server( + server, + {'name': security_group}, + ) + except sdk_exceptions.HttpException as e: + errors += 1 + LOG.error( + _( + "Failed to remove security group with name or ID " + "'%(security_group)s' from server '%(server)s': %(e)s" + ), + { + 'security_group': security_group, + 'server': server.id, + 'e': e, + }, + ) - server.remove_security_group(security_group['id']) + if errors > 0: + msg = _( + "%(errors)d of %(total)d security groups were not removed." + ) % {'errors': errors, 'total': len(security_groups)} + raise exceptions.CommandError(msg) class RemoveServerVolume(command.Command): - _description = _("""Remove volume from server. + _description = _( + """Remove volume from server. Specify ``--os-compute-api-version 2.20`` or higher to remove a -volume from a server with status ``SHELVED`` or ``SHELVED_OFFLOADED``.""") +volume from a server with status ``SHELVED`` or ``SHELVED_OFFLOADED``.""" + ) def get_parser(self, prog_name): - parser = super(RemoveServerVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3777,7 +4179,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute volume_client = self.app.client_manager.sdk_connection.volume server = compute_client.find_server( @@ -3797,13 +4199,15 @@ def take_action(self, parsed_args): class RescueServer(command.Command): - _description = _("""Put server in rescue mode. + _description = _( + """Put server in rescue mode. Specify ``--os-compute-api-version 2.87`` or higher to rescue a -server booted from a volume.""") +server booted from a volume.""" + ) def get_parser(self, prog_name): - parser = super(RescueServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3812,15 +4216,17 @@ def get_parser(self, prog_name): parser.add_argument( '--image', metavar='', - help=_('Image (name or ID) to use for the rescue mode.' - ' Defaults to the currently used one.'), + help=_( + 'Image (name or ID) to use for the rescue mode ' + '(defaults to the currently used one)' + ), ) parser.add_argument( '--password', metavar='', help=_( - 'Set the password on the rescued instance. ' - 'This option requires cloud support.' + 'Set the password on the rescued instance ' + '(requires cloud support)' ), ) return parser @@ -3829,34 +4235,39 @@ def take_action(self, parsed_args): compute_client = self.app.client_manager.compute image_client = self.app.client_manager.image - image = None + image_ref = None if parsed_args.image: - image = image_client.find_image(parsed_args.image) + image_ref = image_client.find_image( + parsed_args.image, ignore_missing=False + ).id - utils.find_resource( - compute_client.servers, - parsed_args.server, - ).rescue(image=image, - password=parsed_args.password) + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) + compute_client.rescue_server( + server, admin_pass=parsed_args.password, image_ref=image_ref + ) class ResizeServer(command.Command): - _description = _("""Scale server to a new flavor. + _description = _( + """Scale server to a new flavor. A resize operation is implemented by creating a new server and copying the contents of the original disk into a new one. It is a two-step process for the user: the first step is to perform the resize, and the second step is to either confirm (verify) success and release the old server or to declare a revert to -release the new server and restart the old one.""") +release the new server and restart the old one.""" + ) def get_parser(self, prog_name): - parser = super(ResizeServer, self).get_parser(prog_name) - phase_group = parser.add_mutually_exclusive_group() + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', help=_('Server (name or ID)'), ) + phase_group = parser.add_mutually_exclusive_group() phase_group.add_argument( '--flavor', metavar='', @@ -3875,7 +4286,7 @@ def get_parser(self, prog_name): '--revert', action="store_true", help=_( - '**Deprecated** Restore server state before resize' + '**Deprecated** Restore server state before resize. ' "Replaced by the 'openstack server resize revert' and " "'openstack server migration revert' commands" ), @@ -3888,55 +4299,65 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) if parsed_args.flavor: - flavor = utils.find_resource( - compute_client.flavors, - parsed_args.flavor, + if not server.image: + self.log.warning( + _( + "The root disk size in flavor will not be applied " + "while booting from a persistent volume." + ) + ) + flavor = compute_client.find_flavor( + parsed_args.flavor, ignore_missing=False ) - compute_client.servers.resize(server, flavor) + compute_client.resize_server(server, flavor) if parsed_args.wait: - if utils.wait_for_status( - compute_client.servers.get, + if not utils.wait_for_status( + compute_client.get_server, server.id, - success_status=['active', 'verify_resize'], + success_status=('active', 'verify_resize'), callback=_show_progress, ): - self.app.stdout.write(_('Complete\n')) - else: - LOG.error(_('Error resizing server: %s'), - server.id) - self.app.stdout.write(_('Error resizing server\n')) - raise SystemExit + msg = _('Error resizing server: %s') % server.id + raise exceptions.CommandError(msg) + + self.app.stdout.write(_('Complete\n')) elif parsed_args.confirm: - self.log.warning(_( - "The --confirm option has been deprecated. Please use the " - "'openstack server resize confirm' command instead.")) - compute_client.servers.confirm_resize(server) + self.log.warning( + _( + "The --confirm option has been deprecated. Please use the " + "'openstack server resize confirm' command instead." + ) + ) + compute_client.confirm_server_resize(server) elif parsed_args.revert: - self.log.warning(_( - "The --revert option has been deprecated. Please use the " - "'openstack server resize revert' command instead.")) - compute_client.servers.revert_resize(server) + self.log.warning( + _( + "The --revert option has been deprecated. Please use the " + "'openstack server resize revert' command instead." + ) + ) + compute_client.revert_server_resize(server) class ResizeConfirm(command.Command): - _description = _("""Confirm server resize. + _description = _( + """Confirm server resize. -Confirm (verify) success of resize operation and release the old server.""") +Confirm (verify) success of resize operation and release the old server.""" + ) def get_parser(self, prog_name): - parser = super(ResizeConfirm, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3945,13 +4366,11 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) - server.confirm_resize() + compute_client.confirm_server_resize(server) # TODO(stephenfin): Remove in OSC 7.0 @@ -3969,20 +4388,24 @@ def take_action(self, parsed_args): class ConfirmMigration(ResizeConfirm): - _description = _("""Confirm server migration. + _description = _( + """Confirm server migration. Confirm (verify) success of the migration operation and release the old -server.""") +server.""" + ) class ResizeRevert(command.Command): - _description = _("""Revert server resize. + _description = _( + """Revert server resize. Revert the resize operation. Release the new server and restart the old -one.""") +one.""" + ) def get_parser(self, prog_name): - parser = super(ResizeRevert, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -3991,13 +4414,11 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) - server.revert_resize() + compute_client.revert_server_resize(server) # TODO(stephenfin): Remove in OSC 7.0 @@ -4015,17 +4436,19 @@ def take_action(self, parsed_args): class RevertMigration(ResizeRevert): - _description = _("""Revert server migration. + _description = _( + """Revert server migration. Revert the migration operation. Release the new server and restart the old -one.""") +one.""" + ) class RestoreServer(command.Command): _description = _("Restore server(s)") def get_parser(self, prog_name): - parser = super(RestoreServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4037,17 +4460,18 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): compute_client = self.app.client_manager.compute for server in parsed_args.server: - utils.find_resource( - compute_client.servers, - server - ).restore() + server_id = compute_client.find_server( + server, + ignore_missing=False, + ).id + compute_client.restore_server(server_id) class ResumeServer(command.Command): _description = _("Resume server(s)") def get_parser(self, prog_name): - parser = super(ResumeServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4057,7 +4481,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute for server in parsed_args.server: server_id = compute_client.find_server( server, @@ -4070,7 +4494,7 @@ class SetServer(command.Command): _description = _("Set server properties") def get_parser(self, prog_name): - parser = super(SetServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4085,8 +4509,7 @@ def get_parser(self, prog_name): password_group.add_argument( '--password', help=_( - 'Set the server password. ' - 'This option requires cloud support.' + 'Set the server password. This option requires cloud support.' ), ) password_group.add_argument( @@ -4114,15 +4537,27 @@ def get_parser(self, prog_name): '(repeat option to set multiple properties)' ), ) + parser.add_argument( + '--auto-approve', + action='store_true', + help=_( + "Allow server state override without asking for confirmation" + ), + ) parser.add_argument( '--state', metavar='', choices=['active', 'error'], help=_( - 'New server state ' - '**WARNING** This can result in instances that are no longer ' - 'usable and should be used with caution ' - '(admin only)' + 'New server state.' + '**WARNING** Resetting the state is intended to work around ' + 'servers stuck in an intermediate state, such as deleting. ' + 'If the server is in an error state then this is almost ' + 'never the correct command to run and you should prefer hard ' + 'reboot where possible. In particular, if the server is in ' + 'an error state due to a move operation, setting the state ' + 'can result in instances that are no longer usable. Proceed ' + 'with caution. (admin only)' ), ) parser.add_argument( @@ -4157,16 +4592,28 @@ def get_parser(self, prog_name): ) return parser - def take_action(self, parsed_args): + @staticmethod + def ask_user_yesno(msg): + """Ask user Y/N question + :param str msg: question text + :return bool: User choice + """ + while True: + answer = getpass.getpass('{} [{}]: '.format(msg, 'y/n')) + if answer in ('y', 'Y', 'yes'): + return True + elif answer in ('n', 'N', 'no'): + return False + + def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) if parsed_args.description: - if server.api_version < api_versions.APIVersion("2.19"): + if not sdk_utils.supports_microversion(compute_client, '2.19'): msg = _( '--os-compute-api-version 2.19 or greater is required to ' 'support the --description option' @@ -4174,7 +4621,7 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) if parsed_args.tags: - if server.api_version < api_versions.APIVersion('2.26'): + if not sdk_utils.supports_microversion(compute_client, '2.26'): msg = _( '--os-compute-api-version 2.26 or greater is required to ' 'support the --tag option' @@ -4182,7 +4629,7 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) if parsed_args.hostname: - if server.api_version < api_versions.APIVersion('2.90'): + if not sdk_utils.supports_microversion(compute_client, '2.90'): msg = _( '--os-compute-api-version 2.90 or greater is required to ' 'support the --hostname option' @@ -4201,30 +4648,43 @@ def take_action(self, parsed_args): update_kwargs['hostname'] = parsed_args.hostname if update_kwargs: - server.update(**update_kwargs) + compute_client.update_server(server, **update_kwargs) if parsed_args.properties: - compute_client.servers.set_meta(server, parsed_args.properties) + compute_client.set_server_metadata( + server, **parsed_args.properties + ) if parsed_args.state: - server.reset_state(state=parsed_args.state) + if not parsed_args.auto_approve: + if not self.ask_user_yesno( + _( + "Resetting the server state can make it much harder " + "to recover a server from an error state. If the " + "server is in error status due to a failed move " + "operation then this is likely not the correct " + "approach to fix the problem. Do you wish to continue?" + ) + ): + return + compute_client.reset_server_state(server, state=parsed_args.state) if parsed_args.root_password: p1 = getpass.getpass(_('New password: ')) p2 = getpass.getpass(_('Retype new password: ')) if p1 == p2: - server.change_password(p1) + compute_client.change_server_password(server, p1) else: msg = _("Passwords do not match, password unchanged") raise exceptions.CommandError(msg) elif parsed_args.password: - server.change_password(parsed_args.password) + compute_client.change_server_password(server, parsed_args.password) elif parsed_args.no_password: - server.clear_password() + compute_client.clear_server_password(server) if parsed_args.tags: for tag in parsed_args.tags: - server.add_tag(tag=tag) + compute_client.add_tag_to_server(server, tag=tag) class ShelveServer(command.Command): @@ -4245,7 +4705,7 @@ class ShelveServer(command.Command): """ def get_parser(self, prog_name): - parser = super(ShelveServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'servers', metavar='', @@ -4271,82 +4731,81 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute + server_ids = [] for server in parsed_args.servers: - server_obj = utils.find_resource( - compute_client.servers, + server_obj = compute_client.find_server( server, + ignore_missing=False, ) if server_obj.status.lower() in ('shelved', 'shelved_offloaded'): continue - server_obj.shelve() + server_ids.append(server_obj.id) + + compute_client.shelve_server(server_obj.id) # if we don't have to wait, either because it was requested explicitly # or is required implicitly, then our job is done if not parsed_args.wait and not parsed_args.offload: return - for server in parsed_args.servers: + for server_id in server_ids: + # We use osc-lib's wait_for_status since that allows for a callback # TODO(stephenfin): We should wait for these in parallel using e.g. # https://review.opendev.org/c/openstack/osc-lib/+/762503/ if not utils.wait_for_status( - compute_client.servers.get, server_obj.id, + compute_client.get_server, + server_id, success_status=('shelved', 'shelved_offloaded'), callback=_show_progress, ): - LOG.error(_('Error shelving server: %s'), server_obj.id) - self.app.stdout.write( - _('Error shelving server: %s\n') % server_obj.id) - raise SystemExit + msg = _('Error shelving server: %s') % server_id + raise exceptions.CommandError(msg) if not parsed_args.offload: return - for server in parsed_args.servers: - server_obj = utils.find_resource( - compute_client.servers, - server, - ) + for server_id in server_ids: + server_obj = compute_client.get_server(server_id) if server_obj.status.lower() == 'shelved_offloaded': continue - server_obj.shelve_offload() + compute_client.shelve_offload_server(server_id) if not parsed_args.wait: return - for server in parsed_args.servers: + for server_id in server_ids: + # We use osc-lib's wait_for_status since that allows for a callback # TODO(stephenfin): We should wait for these in parallel using e.g. # https://review.opendev.org/c/openstack/osc-lib/+/762503/ if not utils.wait_for_status( - compute_client.servers.get, server_obj.id, + compute_client.get_server, + server_id, success_status=('shelved_offloaded',), callback=_show_progress, ): - LOG.error( - _('Error offloading shelved server %s'), server_obj.id) - self.app.stdout.write( - _('Error offloading shelved server: %s\n') % ( - server_obj.id)) - raise SystemExit + msg = _('Error offloading shelved server: %s') % server_id + raise exceptions.CommandError(msg) class ShowServer(command.ShowOne): - _description = _("""Show server details. + _description = _( + """Show server details. Specify ``--os-compute-api-version 2.47`` or higher to see the embedded flavor -information for the server.""") +information for the server.""" + ) def get_parser(self, prog_name): - parser = super(ShowServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4372,12 +4831,14 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute + image_client = self.app.client_manager.image - # Find by name or ID, then get the full details of the server server = compute_client.find_server( - parsed_args.server, ignore_missing=False) - server = compute_client.get_server(server) + parsed_args.server, + ignore_missing=False, + details=True, + ) if parsed_args.diagnostics: data = compute_client.get_server_diagnostics(server) @@ -4395,16 +4856,10 @@ def take_action(self, parsed_args): topology = server.fetch_topology(compute_client) data = _prep_server_detail( - # TODO(dannosliwcd): Replace these clients with SDK clients after - # all callers of _prep_server_detail() are using the SDK. - self.app.client_manager.compute, - self.app.client_manager.image, - server, - refresh=False) - + compute_client, image_client, server, refresh=False + ) if topology: data['topology'] = format_columns.DictColumn(topology) - return zip(*sorted(data.items())) @@ -4412,7 +4867,7 @@ class SshServer(command.Command): _description = _("SSH to server") def get_parser(self, prog_name): - parser = super(SshServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4420,26 +4875,30 @@ def get_parser(self, prog_name): ) # Deprecated during the Yoga cycle parser.add_argument( - '--login', '-l', + '--login', + '-l', metavar='', help=argparse.SUPPRESS, ) # Deprecated during the Yoga cycle parser.add_argument( - '--port', '-p', + '--port', + '-p', metavar='', type=int, help=argparse.SUPPRESS, ) # Deprecated during the Yoga cycle parser.add_argument( - '--identity', '-i', + '--identity', + '-i', metavar='', help=argparse.SUPPRESS, ) # Deprecated during the Yoga cycle parser.add_argument( - '--option', '-o', + '--option', + '-o', metavar='', help=argparse.SUPPRESS, ) @@ -4502,22 +4961,22 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) # first, handle the deprecated options - if any(( - parsed_args.port, - parsed_args.identity, - parsed_args.option, - parsed_args.login, - parsed_args.verbose, - )): + if any( + ( + parsed_args.port, + parsed_args.identity, + parsed_args.option, + parsed_args.login, + parsed_args.verbose, + ) + ): msg = _( 'The ssh options have been deprecated. The ssh equivalent ' 'options can be used instead as arguments after "--" on ' @@ -4559,15 +5018,17 @@ def take_action(self, parsed_args): ) cmd = ' '.join(['ssh', ip_address] + args) - LOG.debug("ssh command: {cmd}".format(cmd=cmd)) - os.system(cmd) + LOG.debug(f"ssh command: {cmd}") + # we intentionally pass through user-provided arguments and run this in + # the user's shell + os.system(cmd) # noqa: S605 class StartServer(command.Command): _description = _("Start server(s)") def get_parser(self, prog_name): - parser = super(StartServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4577,9 +5038,9 @@ def get_parser(self, prog_name): parser.add_argument( '--all-projects', action='store_true', - default=boolenv('ALL_PROJECTS'), + default=envvars.boolenv('ALL_PROJECTS'), help=_( - 'Start server(s) in another project by name (admin only)' + 'Start server(s) in another project by name (admin only) ' '(can be specified using the ALL_PROJECTS envvar)' ), ) @@ -4588,18 +5049,21 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): compute_client = self.app.client_manager.compute for server in parsed_args.server: - utils.find_resource( - compute_client.servers, + server_id = compute_client.find_server( server, - all_tenants=parsed_args.all_projects, - ).start() + ignore_missing=False, + details=False, + all_projects=parsed_args.all_projects, + ).id + + compute_client.start_server(server_id) class StopServer(command.Command): _description = _("Stop server(s)") def get_parser(self, prog_name): - parser = super(StopServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4609,9 +5073,9 @@ def get_parser(self, prog_name): parser.add_argument( '--all-projects', action='store_true', - default=boolenv('ALL_PROJECTS'), + default=envvars.boolenv('ALL_PROJECTS'), help=_( - 'Stop server(s) in another project by name (admin only)' + 'Stop server(s) in another project by name (admin only) ' '(can be specified using the ALL_PROJECTS envvar)' ), ) @@ -4620,18 +5084,20 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): compute_client = self.app.client_manager.compute for server in parsed_args.server: - utils.find_resource( - compute_client.servers, + server_id = compute_client.find_server( server, - all_tenants=parsed_args.all_projects, - ).stop() + ignore_missing=False, + details=False, + all_projects=parsed_args.all_projects, + ).id + compute_client.stop_server(server_id) class SuspendServer(command.Command): _description = _("Suspend server(s)") def get_parser(self, prog_name): - parser = super(SuspendServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4641,7 +5107,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute for server in parsed_args.server: server_id = compute_client.find_server( server, @@ -4654,7 +5120,7 @@ class UnlockServer(command.Command): _description = _("Unlock server(s)") def get_parser(self, prog_name): - parser = super(UnlockServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4666,17 +5132,18 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): compute_client = self.app.client_manager.compute for server in parsed_args.server: - utils.find_resource( - compute_client.servers, + server_id = compute_client.find_server( server, - ).unlock() + ignore_missing=False, + ).id + compute_client.unlock_server(server_id) class UnpauseServer(command.Command): _description = _("Unpause server(s)") def get_parser(self, prog_name): - parser = super(UnpauseServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4686,7 +5153,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute for server in parsed_args.server: server_id = compute_client.find_server( server, @@ -4699,7 +5166,7 @@ class UnrescueServer(command.Command): _description = _("Restore server from rescue mode") def get_parser(self, prog_name): - parser = super(UnrescueServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4708,41 +5175,51 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.compute - utils.find_resource( - compute_client.servers, - parsed_args.server, - ).unrescue() + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) + compute_client.unrescue_server(server) class UnsetServer(command.Command): _description = _("Unset server properties and tags") def get_parser(self, prog_name): - parser = super(UnsetServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', help=_('Server (name or ID)'), ) - parser.add_argument( + property_group = parser.add_mutually_exclusive_group() + property_group.add_argument( '--property', metavar='', action='append', default=[], dest='properties', - help=_('Property key to remove from server ' - '(repeat option to remove multiple values)'), + help=_( + 'Property key to remove from server ' + '(repeat option to remove multiple values)' + ), + ) + property_group.add_argument( + '--all-properties', + action='store_true', + help=_('Remove all properties'), ) parser.add_argument( '--description', dest='description', action='store_true', - help=_('Unset server description (supported by ' - '--os-compute-api-version 2.19 or above)'), + help=_( + 'Unset server description ' + '(supported by --os-compute-api-version 2.19 or above)' + ), ) - parser.add_argument( + tag_group = parser.add_mutually_exclusive_group() + tag_group.add_argument( '--tag', metavar='', action='append', @@ -4754,30 +5231,40 @@ def get_parser(self, prog_name): '(supported by --os-compute-api-version 2.26 or above)' ), ) + tag_group.add_argument( + '--all-tags', + action='store_true', + help=_( + 'Remove all tags ' + '(supported by --os-compute-api-version 2.26 or above)' + ), + ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) - if parsed_args.properties: - compute_client.servers.delete_meta(server, parsed_args.properties) + if parsed_args.properties or parsed_args.all_properties: + compute_client.delete_server_metadata( + server, parsed_args.properties or None + ) if parsed_args.description: - if compute_client.api_version < api_versions.APIVersion("2.19"): - msg = _("Description is not supported for " - "--os-compute-api-version less than 2.19") + if not sdk_utils.supports_microversion(compute_client, '2.19'): + msg = _( + '--os-compute-api-version 2.19 or greater is required to ' + 'support the --description option' + ) raise exceptions.CommandError(msg) - compute_client.servers.update( - server, - description="", - ) - if parsed_args.tags: - if compute_client.api_version < api_versions.APIVersion('2.26'): + compute_client.update_server(server, description="") + + if parsed_args.tags or parsed_args.all_tags: + if not sdk_utils.supports_microversion(compute_client, '2.26'): msg = _( '--os-compute-api-version 2.26 or greater is required to ' 'support the --tag option' @@ -4785,14 +5272,17 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) for tag in parsed_args.tags: - compute_client.servers.delete_tag(server, tag=tag) + compute_client.remove_tag_from_server(server, tag) + + if parsed_args.all_tags: + compute_client.remove_tags_from_server(server) class UnshelveServer(command.Command): _description = _("Unshelve server(s)") def get_parser(self, prog_name): - parser = super(UnshelveServer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -4803,25 +5293,31 @@ def get_parser(self, prog_name): group.add_argument( '--availability-zone', default=None, - help=_('Name of the availability zone in which to unshelve a ' - 'SHELVED_OFFLOADED server (supported by ' - '--os-compute-api-version 2.77 or above)'), + help=_( + 'Name of the availability zone in which to unshelve a ' + 'SHELVED_OFFLOADED server ' + '(supported by --os-compute-api-version 2.77 or above)' + ), ) group.add_argument( '--no-availability-zone', action='store_true', default=False, - help=_('Unpin the availability zone of a SHELVED_OFFLOADED ' - 'server. Server will be unshelved on a host without ' - 'availability zone constraint (supported by ' - '--os-compute-api-version 2.91 or above)'), + help=_( + 'Unpin the availability zone of a SHELVED_OFFLOADED ' + 'server. Server will be unshelved on a host without ' + 'availability zone constraint ' + '(supported by --os-compute-api-version 2.91 or above)' + ), ) parser.add_argument( '--host', default=None, - help=_('Name of the destination host in which to unshelve a ' - 'SHELVED_OFFLOADED server (supported by ' - '--os-compute-api-version 2.91 or above)'), + help=_( + 'Name of the destination host in which to unshelve a ' + 'SHELVED_OFFLOADED server ' + '(supported by --os-compute-api-version 2.91 or above)' + ), ) parser.add_argument( '--wait', @@ -4832,17 +5328,16 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() compute_client = self.app.client_manager.compute kwargs = {} if parsed_args.availability_zone: - if compute_client.api_version < api_versions.APIVersion('2.77'): + if not sdk_utils.supports_microversion(compute_client, '2.77'): msg = _( '--os-compute-api-version 2.77 or greater is required ' 'to support the --availability-zone option' @@ -4852,7 +5347,7 @@ def _show_progress(progress): kwargs['availability_zone'] = parsed_args.availability_zone if parsed_args.host: - if compute_client.api_version < api_versions.APIVersion('2.91'): + if not sdk_utils.supports_microversion(compute_client, '2.91'): msg = _( '--os-compute-api-version 2.91 or greater is required ' 'to support the --host option' @@ -4862,7 +5357,7 @@ def _show_progress(progress): kwargs['host'] = parsed_args.host if parsed_args.no_availability_zone: - if compute_client.api_version < api_versions.APIVersion('2.91'): + if not sdk_utils.supports_microversion(compute_client, '2.91'): msg = _( '--os-compute-api-version 2.91 or greater is required ' 'to support the --no-availability-zone option' @@ -4872,25 +5367,25 @@ def _show_progress(progress): kwargs['availability_zone'] = None for server in parsed_args.server: - server_obj = utils.find_resource( - compute_client.servers, + server_obj = compute_client.find_server( server, + ignore_missing=False, ) if server_obj.status.lower() not in ( - 'shelved', 'shelved_offloaded', + 'shelved', + 'shelved_offloaded', ): continue - server_obj.unshelve(**kwargs) + compute_client.unshelve_server(server_obj.id, **kwargs) if parsed_args.wait: if not utils.wait_for_status( - compute_client.servers.get, server_obj.id, + compute_client.get_server, + server_obj.id, success_status=('active', 'shutoff'), callback=_show_progress, ): - LOG.error(_('Error unshelving server %s'), server_obj.id) - self.app.stdout.write( - _('Error unshelving server: %s\n') % server_obj.id) - raise SystemExit + msg = _('Error unshelving server: %s') % server_obj.id + raise exceptions.CommandError(msg) diff --git a/openstackclient/compute/v2/server_backup.py b/openstackclient/compute/v2/server_backup.py index 53891991b4..bb06f761c9 100644 --- a/openstackclient/compute/v2/server_backup.py +++ b/openstackclient/compute/v2/server_backup.py @@ -17,10 +17,10 @@ import importlib -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -33,7 +33,7 @@ class CreateServerBackup(command.ShowOne): } def get_parser(self, prog_name): - parser = super(CreateServerBackup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -66,15 +66,16 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stderr.write('\rProgress: %s' % progress) + self.app.stderr.write(f'\rProgress: {progress}') self.app.stderr.flush() - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute - server = compute_client.find_server(parsed_args.server) + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) # Set sane defaults as this API wants all mouths to be fed if parsed_args.name is None: diff --git a/openstackclient/compute/v2/server_event.py b/openstackclient/compute/v2/server_event.py index ebf0d526ac..0e1e2b4632 100644 --- a/openstackclient/compute/v2/server_event.py +++ b/openstackclient/compute/v2/server_event.py @@ -16,20 +16,83 @@ """Compute v2 Server operation event implementations""" import logging +import uuid +from cliff import columns import iso8601 -from novaclient import api_versions -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions +from openstack import utils as sdk_utils from osc_lib import exceptions from osc_lib import utils -from oslo_utils import uuidutils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) +# TODO(stephenfin): Move this to osc_lib since it's useful elsewhere (e.g. +# glance) +def is_uuid_like(value) -> bool: + """Returns validation of a value as a UUID. + + :param val: Value to verify + :type val: string + :returns: bool + + .. versionchanged:: 1.1.1 + Support non-lowercase UUIDs. + """ + try: + formatted_value = ( + value.replace('urn:', '') + .replace('uuid:', '') + .strip('{}') + .replace('-', '') + .lower() + ) + return str(uuid.UUID(value)).replace('-', '') == formatted_value + except (TypeError, ValueError, AttributeError): + return False + + +class ServerActionEventColumn(columns.FormattableColumn): + """Custom formatter for server action events. + + Format the :class:`~openstack.compute.v2.server_action.ServerActionEvent` + objects as we'd like. + """ + + def _format_event(self, event): + hidden_columns = ['id', 'name', 'location'] + _, columns = utils.get_osc_show_columns_for_sdk_resource( + event, {}, hidden_columns + ) + data = utils.get_item_properties(event, columns) + return dict(zip(columns, data)) + + def human_readable(self): + events = [self._format_event(event) for event in self._value] + return utils.format_list_of_dicts(events) + + def machine_readable(self): + events = [self._format_event(event) for event in self._value] + return events + + +def _get_server_event_columns(item, client): + hidden_columns = ['name', 'server_id', 'links', 'location', 'finish_time'] + + if not sdk_utils.supports_microversion(client, '2.58'): + # updated_at was introduced in 2.58 + hidden_columns.append('updated_at') + + return utils.get_osc_show_columns_for_sdk_resource( + item, {}, hidden_columns + ) + + class ListServerEvent(command.Lister): """List recent events of a server. @@ -38,7 +101,7 @@ class ListServerEvent(command.Lister): """ def get_parser(self, prog_name): - parser = super(ListServerEvent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -48,7 +111,7 @@ def get_parser(self, prog_name): '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) parser.add_argument( '--changes-since', @@ -72,21 +135,7 @@ def get_parser(self, prog_name): "(supported with --os-compute-api-version 2.66 or above)" ), ) - parser.add_argument( - '--marker', - help=_( - 'The last server event ID of the previous page ' - '(supported by --os-compute-api-version 2.58 or above)' - ), - ) - parser.add_argument( - '--limit', - type=int, - help=_( - 'Maximum number of server events to display ' - '(supported by --os-compute-api-version 2.58 or above)' - ), - ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): @@ -95,25 +144,28 @@ def take_action(self, parsed_args): kwargs = {} if parsed_args.marker: - if compute_client.api_version < api_versions.APIVersion('2.58'): + if not sdk_utils.supports_microversion(compute_client, '2.58'): msg = _( '--os-compute-api-version 2.58 or greater is required to ' 'support the --marker option' ) raise exceptions.CommandError(msg) + kwargs['marker'] = parsed_args.marker if parsed_args.limit: - if compute_client.api_version < api_versions.APIVersion('2.58'): + if not sdk_utils.supports_microversion(compute_client, '2.58'): msg = _( '--os-compute-api-version 2.58 or greater is required to ' 'support the --limit option' ) raise exceptions.CommandError(msg) + kwargs['limit'] = parsed_args.limit + kwargs['paginated'] = False if parsed_args.changes_since: - if compute_client.api_version < api_versions.APIVersion('2.58'): + if not sdk_utils.supports_microversion(compute_client, '2.58'): msg = _( '--os-compute-api-version 2.58 or greater is required to ' 'support the --changes-since option' @@ -129,7 +181,7 @@ def take_action(self, parsed_args): kwargs['changes_since'] = parsed_args.changes_since if parsed_args.changes_before: - if compute_client.api_version < api_versions.APIVersion('2.66'): + if not sdk_utils.supports_microversion(compute_client, '2.66'): msg = _( '--os-compute-api-version 2.66 or greater is required to ' 'support the --changes-before option' @@ -145,27 +197,27 @@ def take_action(self, parsed_args): kwargs['changes_before'] = parsed_args.changes_before try: - server_id = utils.find_resource( - compute_client.servers, parsed_args.server, + server_id = compute_client.find_server( + parsed_args.server, ignore_missing=False ).id - except exceptions.CommandError: + except sdk_exceptions.ResourceNotFound: # If we fail to find the resource, it is possible the server is # deleted. Try once more using the arg directly if it is a # UUID. - if uuidutils.is_uuid_like(parsed_args.server): + if is_uuid_like(parsed_args.server): server_id = parsed_args.server else: raise - data = compute_client.instance_action.list(server_id, **kwargs) + data = compute_client.server_actions(server_id, **kwargs) - columns = ( + columns: tuple[str, ...] = ( 'request_id', - 'instance_uuid', + 'server_id', 'action', 'start_time', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'Request ID', 'Server ID', 'Action', @@ -200,7 +252,7 @@ class ShowServerEvent(command.ShowOne): """ def get_parser(self, prog_name): - parser = super(ShowServerEvent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -217,20 +269,34 @@ def take_action(self, parsed_args): compute_client = self.app.client_manager.compute try: - server_id = utils.find_resource( - compute_client.servers, parsed_args.server, + server_id = compute_client.find_server( + parsed_args.server, + ignore_missing=False, ).id - except exceptions.CommandError: + except sdk_exceptions.ResourceNotFound: # If we fail to find the resource, it is possible the server is # deleted. Try once more using the arg directly if it is a # UUID. - if uuidutils.is_uuid_like(parsed_args.server): + if is_uuid_like(parsed_args.server): server_id = parsed_args.server else: raise - action_detail = compute_client.instance_action.get( - server_id, parsed_args.request_id + server_action = compute_client.get_server_action( + parsed_args.request_id, + server_id, + ) + + column_headers, columns = _get_server_event_columns( + server_action, + compute_client, ) - return zip(*sorted(action_detail.to_dict().items())) + return ( + column_headers, + utils.get_item_properties( + server_action, + columns, + formatters={'events': ServerActionEventColumn}, + ), + ) diff --git a/openstackclient/compute/v2/server_group.py b/openstackclient/compute/v2/server_group.py index eadc3ffbc9..b74c25626b 100644 --- a/openstackclient/compute/v2/server_group.py +++ b/openstackclient/compute/v2/server_group.py @@ -16,21 +16,23 @@ """Compute v2 Server Group action implementations""" import logging +import typing as ty +from cliff import columns from openstack import utils as sdk_utils from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) -_formatters = { +_formatters: dict[str, type[columns.FormattableColumn[ty.Any]]] = { 'member_ids': format_columns.ListColumn, 'policies': format_columns.ListColumn, 'rules': format_columns.DictColumn, @@ -48,14 +50,15 @@ def _get_server_group_columns(item, client): hidden_columns.append('rules') return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, column_map, hidden_columns + ) class CreateServerGroup(command.ShowOne): _description = _("Create a new server group.") def get_parser(self, prog_name): - parser = super(CreateServerGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -92,7 +95,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute if parsed_args.policy in ('soft-affinity', 'soft-anti-affinity'): if not sdk_utils.supports_microversion(compute_client, '2.15'): @@ -142,7 +145,7 @@ class DeleteServerGroup(command.Command): _description = _("Delete existing server group(s).") def get_parser(self, prog_name): - parser = super(DeleteServerGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server_group', metavar='', @@ -152,11 +155,13 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute result = 0 for group in parsed_args.server_group: try: - group_obj = compute_client.find_server_group(group) + group_obj = compute_client.find_server_group( + group, ignore_missing=False + ) compute_client.delete_server_group(group_obj.id) # Catch all exceptions in order to avoid to block the next deleting except Exception as e: @@ -167,8 +172,7 @@ def take_action(self, parsed_args): total = len(parsed_args.server_group) msg = _("%(result)s of %(total)s server groups failed to delete.") raise exceptions.CommandError( - msg % {"result": result, - "total": total} + msg % {"result": result, "total": total} ) @@ -176,7 +180,7 @@ class ListServerGroup(command.Lister): _description = _("List all server groups.") def get_parser(self, prog_name): - parser = super(ListServerGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', action='store_true', @@ -191,32 +195,11 @@ def get_parser(self, prog_name): ) # TODO(stephenfin): This should really be a --marker option, but alas # the API doesn't support that for some reason - parser.add_argument( - '--offset', - metavar='', - type=int, - default=None, - help=_( - 'Index from which to start listing servers. This should ' - 'typically be a factor of --limit. Display all servers groups ' - 'if not specified.' - ), - ) - parser.add_argument( - '--limit', - metavar='', - type=int, - default=None, - help=_( - "Maximum number of server groups to display. " - "If limit is greater than 'osapi_max_limit' option of Nova " - "API, 'osapi_max_limit' will be used instead." - ), - ) + pagination.add_offset_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute kwargs = {} @@ -235,12 +218,12 @@ def take_action(self, parsed_args): if sdk_utils.supports_microversion(compute_client, '2.64'): policy_key = 'Policy' - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', policy_key.lower(), ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Name', policy_key, @@ -261,8 +244,11 @@ def take_action(self, parsed_args): column_headers, ( utils.get_item_properties( - s, columns, formatters=_formatters, - ) for s in data + s, + columns, + formatters=_formatters, + ) + for s in data ), ) @@ -271,7 +257,7 @@ class ShowServerGroup(command.ShowOne): _description = _("Display server group details.") def get_parser(self, prog_name): - parser = super(ShowServerGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server_group', metavar='', @@ -280,8 +266,10 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute - group = compute_client.find_server_group(parsed_args.server_group) + compute_client = self.app.client_manager.compute + group = compute_client.find_server_group( + parsed_args.server_group, ignore_missing=False + ) display_columns, columns = _get_server_group_columns( group, compute_client, diff --git a/openstackclient/compute/v2/server_image.py b/openstackclient/compute/v2/server_image.py index 2021fae7c0..26dbb4ccc3 100644 --- a/openstackclient/compute/v2/server_image.py +++ b/openstackclient/compute/v2/server_image.py @@ -19,10 +19,10 @@ import logging from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -38,7 +38,7 @@ class CreateServerImage(command.ShowOne): } def get_parser(self, prog_name): - parser = super(CreateServerImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -67,17 +67,17 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - def _show_progress(progress): if progress: - self.app.stdout.write('\rProgress: %s' % progress) + self.app.stdout.write(f'\rProgress: {progress}') self.app.stdout.flush() - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute image_client = self.app.client_manager.image server = compute_client.find_server( - parsed_args.server, ignore_missing=False, + parsed_args.server, + ignore_missing=False, ) if parsed_args.name: @@ -100,7 +100,8 @@ def _show_progress(progress): self.app.stdout.write('\n') else: LOG.error( - _('Error creating server image: %s'), parsed_args.server) + _('Error creating server image: %s'), parsed_args.server + ) raise exceptions.CommandError image = image_client.find_image(image_id, ignore_missing=False) diff --git a/openstackclient/compute/v2/server_migration.py b/openstackclient/compute/v2/server_migration.py index 91575c1e5b..f2ea68343f 100644 --- a/openstackclient/compute/v2/server_migration.py +++ b/openstackclient/compute/v2/server_migration.py @@ -15,10 +15,11 @@ import uuid from openstack import utils as sdk_utils -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -27,56 +28,34 @@ class ListMigration(command.Lister): _description = _("""List server migrations""") def get_parser(self, prog_name): - parser = super(ListMigration, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--server', metavar='', - help=_( - 'Filter migrations by server (name or ID)' - ) + help=_('Filter migrations by server (name or ID)'), ) parser.add_argument( '--host', metavar='', - help=_( - 'Filter migrations by source or destination host' - ), + help=_('Filter migrations by source or destination host'), ) parser.add_argument( '--status', metavar='', - help=_('Filter migrations by status') + help=_('Filter migrations by status'), ) parser.add_argument( '--type', metavar='', choices=[ - 'evacuation', 'live-migration', 'cold-migration', 'resize', + 'evacuation', + 'live-migration', + 'cold-migration', + 'resize', ], help=_('Filter migrations by type'), ) - parser.add_argument( - '--marker', - metavar='', - help=_( - "The last migration of the previous page; displays list " - "of migrations after 'marker'. Note that the marker is " - "the migration UUID. " - "(supported with --os-compute-api-version 2.59 or above)" - ), - ) - parser.add_argument( - '--limit', - metavar='', - type=int, - help=_( - "Maximum number of migrations to display. Note that there " - "is a configurable max limit on the server, and the limit " - "that is used will be the minimum of what is requested " - "here and what is configured in the server. " - "(supported with --os-compute-api-version 2.59 or above)" - ), - ) + pagination.add_marker_pagination_option_to_parser(parser) parser.add_argument( '--changes-since', dest='changes_since', @@ -121,17 +100,33 @@ def get_parser(self, prog_name): def print_migrations(self, parsed_args, compute_client, migrations): column_headers = [ - 'Source Node', 'Dest Node', 'Source Compute', 'Dest Compute', - 'Dest Host', 'Status', 'Server UUID', 'Old Flavor', 'New Flavor', - 'Created At', 'Updated At', + 'Source Node', + 'Dest Node', + 'Source Compute', + 'Dest Compute', + 'Dest Host', + 'Status', + 'Server UUID', + 'Old Flavor', + 'New Flavor', + 'Created At', + 'Updated At', ] # Response fields coming back from the REST API are not always exactly # the same as the column header names. columns = [ - 'source_node', 'dest_node', 'source_compute', 'dest_compute', - 'dest_host', 'status', 'server_id', 'old_flavor_id', - 'new_flavor_id', 'created_at', 'updated_at', + 'source_node', + 'dest_node', + 'source_compute', + 'dest_compute', + 'dest_host', + 'status', + 'server_id', + 'old_flavor_id', + 'new_flavor_id', + 'created_at', + 'updated_at', ] # Insert migrations UUID after ID @@ -159,7 +154,7 @@ def print_migrations(self, parsed_args, compute_client, migrations): ) def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute identity_client = self.app.client_manager.identity search_opts = {} @@ -171,10 +166,9 @@ def take_action(self, parsed_args): search_opts['status'] = parsed_args.status if parsed_args.server: - server = compute_client.find_server(parsed_args.server) - if server is None: - msg = _('Unable to find server: %s') % parsed_args.server - raise exceptions.CommandError(msg) + server = compute_client.find_server( + parsed_args.server, ignore_missing=False + ) search_opts['instance_uuid'] = server.id if parsed_args.type: @@ -258,12 +252,18 @@ def _get_migration_by_uuid(compute_client, server_id, migration_uuid): for migration in compute_client.server_migrations(server_id): if migration.uuid == migration_uuid: return migration - break else: msg = _( - 'In-progress live migration %s is not found for server %s.' + 'In-progress live migration %(migration)s is not found for ' + 'server %(server)s' + ) + raise exceptions.CommandError( + msg + % { + 'migration': migration_uuid, + 'server': server_id, + } ) - raise exceptions.CommandError(msg % (migration_uuid, server_id)) class ShowMigration(command.ShowOne): @@ -289,7 +289,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute if not sdk_utils.supports_microversion(compute_client, '2.24'): msg = _( @@ -302,9 +302,7 @@ def take_action(self, parsed_args): try: uuid.UUID(parsed_args.migration) except ValueError: - msg = _( - 'The argument must be an ID or UUID' - ) + msg = _('The argument must be an ID or UUID') raise exceptions.CommandError(msg) if not sdk_utils.supports_microversion(compute_client, '2.59'): @@ -324,7 +322,9 @@ def take_action(self, parsed_args): # migrations - the responses are identical if not parsed_args.migration.isdigit(): server_migration = _get_migration_by_uuid( - compute_client, server.id, parsed_args.migration, + compute_client, + server.id, + parsed_args.migration, ) else: server_migration = compute_client.get_server_migration( @@ -333,7 +333,7 @@ def take_action(self, parsed_args): ignore_missing=False, ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Server UUID', 'Status', @@ -352,7 +352,7 @@ def take_action(self, parsed_args): 'Updated At', ) - columns = ( + columns: tuple[str, ...] = ( 'id', 'server_id', 'status', @@ -390,7 +390,7 @@ class AbortMigration(command.Command): """ def get_parser(self, prog_name): - parser = super(AbortMigration, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', @@ -404,7 +404,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute if not sdk_utils.supports_microversion(compute_client, '2.24'): msg = _( @@ -417,9 +417,7 @@ def take_action(self, parsed_args): try: uuid.UUID(parsed_args.migration) except ValueError: - msg = _( - 'The argument must be an ID or UUID' - ) + msg = _('The argument must be an ID or UUID') raise exceptions.CommandError(msg) if not sdk_utils.supports_microversion(compute_client, '2.59'): @@ -440,7 +438,9 @@ def take_action(self, parsed_args): migration_id = parsed_args.migration if not parsed_args.migration.isdigit(): migration_id = _get_migration_by_uuid( - compute_client, server.id, parsed_args.migration, + compute_client, + server.id, + parsed_args.migration, ).id compute_client.abort_server_migration( @@ -457,21 +457,19 @@ class ForceCompleteMigration(command.Command): """ def get_parser(self, prog_name): - parser = super(ForceCompleteMigration, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'server', metavar='', help=_('Server (name or ID)'), ) parser.add_argument( - 'migration', - metavar='', - help=_('Migration (ID)') + 'migration', metavar='', help=_('Migration (ID)') ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute if not sdk_utils.supports_microversion(compute_client, '2.22'): msg = _( @@ -484,9 +482,7 @@ def take_action(self, parsed_args): try: uuid.UUID(parsed_args.migration) except ValueError: - msg = _( - 'The argument must be an ID or UUID' - ) + msg = _('The argument must be an ID or UUID') raise exceptions.CommandError(msg) if not sdk_utils.supports_microversion(compute_client, '2.59'): @@ -507,9 +503,9 @@ def take_action(self, parsed_args): migration_id = parsed_args.migration if not parsed_args.migration.isdigit(): migration_id = _get_migration_by_uuid( - compute_client, server.id, parsed_args.migration, + compute_client, + server.id, + parsed_args.migration, ).id - compute_client.force_complete_server_migration( - migration_id, server.id - ) + compute_client.force_complete_server_migration(migration_id, server.id) diff --git a/openstackclient/compute/v2/server_volume.py b/openstackclient/compute/v2/server_volume.py index b4322c0b1e..d92d137b73 100644 --- a/openstackclient/compute/v2/server_volume.py +++ b/openstackclient/compute/v2/server_volume.py @@ -15,10 +15,10 @@ """Compute v2 Server action implementations""" from openstack import utils as sdk_utils -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -34,7 +34,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute server = compute_client.find_server( parsed_args.server, @@ -42,8 +42,8 @@ def take_action(self, parsed_args): ) volumes = compute_client.volume_attachments(server) - columns = () - column_headers = () + columns: tuple[str, ...] = () + column_headers: tuple[str, ...] = () if not sdk_utils.supports_microversion(compute_client, '2.89'): columns += ('id',) @@ -114,7 +114,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute volume_client = self.app.client_manager.sdk_connection.volume if parsed_args.delete_on_termination is not None: diff --git a/openstackclient/compute/v2/service.py b/openstackclient/compute/v2/service.py index fad717c9fa..b911835f80 100644 --- a/openstackclient/compute/v2/service.py +++ b/openstackclient/compute/v2/service.py @@ -18,10 +18,10 @@ import logging from openstack import utils as sdk_utils -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -32,75 +32,84 @@ class DeleteService(command.Command): _description = _("Delete compute service(s)") def get_parser(self, prog_name): - parser = super(DeleteService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "service", metavar="", nargs='+', - help=_("Compute service(s) to delete (ID only). If using " - "``--os-compute-api-version`` 2.53 or greater, the ID is " - "a UUID which can be retrieved by listing compute services " - "using the same 2.53+ microversion. " - "If deleting a compute service, be sure to stop the actual " - "compute process on the physical host before deleting the " - "service with this command. Failing to do so can lead to " - "the running service re-creating orphaned compute_nodes " - "table records in the database.") + help=_( + "Compute service(s) to delete (ID only). If using " + "``--os-compute-api-version`` 2.53 or greater, the ID is " + "a UUID which can be retrieved by listing compute services " + "using the same 2.53+ microversion. " + "If deleting a compute service, be sure to stop the actual " + "compute process on the physical host before deleting the " + "service with this command. Failing to do so can lead to " + "the running service re-creating orphaned compute_nodes " + "table records in the database." + ), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute result = 0 for s in parsed_args.service: try: - compute_client.delete_service( - s, - ignore_missing=False - ) + compute_client.delete_service(s, ignore_missing=False) except Exception as e: result += 1 - LOG.error(_("Failed to delete compute service with " - "ID '%(service)s': %(e)s"), {'service': s, 'e': e}) + LOG.error( + _( + "Failed to delete compute service with " + "ID '%(service)s': %(e)s" + ), + {'service': s, 'e': e}, + ) if result > 0: total = len(parsed_args.service) - msg = (_("%(result)s of %(total)s compute services failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s compute services failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) class ListService(command.Lister): - _description = _("""List compute services. + _description = _( + """List compute services. Using ``--os-compute-api-version`` 2.53 or greater will return the ID as a UUID value which can be used to uniquely identify the service in a multi-cell -deployment.""") +deployment.""" + ) def get_parser(self, prog_name): - parser = super(ListService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--host", metavar="", - help=_("List services on specified host (name only)") + help=_("List services on specified host (name only)"), ) parser.add_argument( "--service", metavar="", - help=_("List only specified service binaries (name only). For " - "example, ``nova-compute``, ``nova-conductor``, etc.") + help=_( + "List only specified service binaries (name only). For " + "example, ``nova-compute``, ``nova-conductor``, etc." + ), ) parser.add_argument( "--long", action="store_true", default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute - columns = ( + compute_client = self.app.client_manager.compute + columns: tuple[str, ...] = ( "id", "binary", "host", @@ -109,7 +118,7 @@ def take_action(self, parsed_args): "state", "updated_at", ) - column_headers = ( + column_headers: tuple[str, ...] = ( "ID", "Binary", "Host", @@ -126,12 +135,11 @@ def take_action(self, parsed_args): column_headers += ("Forced Down",) data = compute_client.services( - host=parsed_args.host, - binary=parsed_args.service + host=parsed_args.host, binary=parsed_args.service ) return ( column_headers, - (utils.get_item_properties(s, columns) for s in data) + (utils.get_item_properties(s, columns) for s in data), ) @@ -139,48 +147,47 @@ class SetService(command.Command): _description = _("Set compute service properties") def get_parser(self, prog_name): - parser = super(SetService, self).get_parser(prog_name) - parser.add_argument( - "host", - metavar="", - help=_("Name of host") - ) + parser = super().get_parser(prog_name) + parser.add_argument("host", metavar="", help=_("Name of host")) parser.add_argument( "service", metavar="", - help=_("Name of service (Binary name), for example " - "``nova-compute``") + help=_( + "Name of service (Binary name), for example ``nova-compute``" + ), ) enabled_group = parser.add_mutually_exclusive_group() enabled_group.add_argument( - "--enable", - action="store_true", - help=_("Enable service") + "--enable", action="store_true", help=_("Enable service") ) enabled_group.add_argument( - "--disable", - action="store_true", - help=_("Disable service") + "--disable", action="store_true", help=_("Disable service") ) parser.add_argument( "--disable-reason", default=None, metavar="", - help=_("Reason for disabling the service (in quotes). " - "Should be used with --disable option.") + help=_( + "Reason for disabling the service (in quotes). " + "Should be used with --disable option." + ), ) up_down_group = parser.add_mutually_exclusive_group() up_down_group.add_argument( '--up', action='store_true', - help=_('Force up service. Requires ``--os-compute-api-version`` ' - '2.11 or greater.'), + help=_( + 'Force up service. Requires ``--os-compute-api-version`` ' + '2.11 or greater.' + ), ) up_down_group.add_argument( '--down', action='store_true', - help=_('Force down service. Requires ``--os-compute-api-version`` ' - '2.11 or greater.'), + help=_( + 'Force down service. Requires ``--os-compute-api-version`` ' + '2.11 or greater.' + ), ) return parser @@ -190,51 +197,55 @@ def _find_service_by_host_and_binary(compute_client, host, binary): :param host: the name of the compute service host :param binary: the compute service binary, e.g. nova-compute - :returns: novaclient.v2.services.Service dict-like object + :returns: The service. :raises: CommandError if no or multiple results were found """ services = list(compute_client.services(host=host, binary=binary)) # Did we find anything? if not len(services): - msg = _('Compute service for host "%(host)s" and binary ' - '"%(binary)s" not found.') % { - 'host': host, 'binary': binary} + msg = _( + 'Compute service for host "%(host)s" and binary ' + '"%(binary)s" not found.' + ) % {'host': host, 'binary': binary} raise exceptions.CommandError(msg) # Did we find more than one result? This should not happen but let's # be safe. if len(services) > 1: # TODO(mriedem): If we have an --id option for 2.53+ then we can # say to use that option to uniquely identify the service. - msg = _('Multiple compute services found for host "%(host)s" and ' - 'binary "%(binary)s". Unable to proceed.') % { - 'host': host, 'binary': binary} + msg = _( + 'Multiple compute services found for host "%(host)s" and ' + 'binary "%(binary)s". Unable to proceed.' + ) % {'host': host, 'binary': binary} raise exceptions.CommandError(msg) return services[0] def take_action(self, parsed_args): - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute - if (parsed_args.enable or not parsed_args.disable) and \ - parsed_args.disable_reason: - msg = _("Cannot specify option --disable-reason without " - "--disable specified.") + if ( + parsed_args.enable or not parsed_args.disable + ) and parsed_args.disable_reason: + msg = _( + "Cannot specify option --disable-reason without " + "--disable specified." + ) raise exceptions.CommandError(msg) # Starting with microversion 2.53, there is a single # PUT /os-services/{service_id} API for updating nova-compute # services. If 2.53+ is used we need to find the nova-compute # service using the --host and --service (binary) values. - requires_service_id = ( - sdk_utils.supports_microversion(compute_client, '2.53')) + requires_service_id = sdk_utils.supports_microversion( + compute_client, '2.53' + ) service_id = None if requires_service_id: # TODO(mriedem): Add an --id option so users can pass the service # id (as a uuid) directly rather than make us look it up using # host/binary. service_id = SetService._find_service_by_host_and_binary( - compute_client, - parsed_args.host, - parsed_args.service + compute_client, parsed_args.host, parsed_args.service ).id result = 0 @@ -248,16 +259,14 @@ def take_action(self, parsed_args): if enabled is not None: if enabled: compute_client.enable_service( - service_id, - parsed_args.host, - parsed_args.service + service_id, parsed_args.host, parsed_args.service ) else: compute_client.disable_service( service_id, parsed_args.host, parsed_args.service, - parsed_args.disable_reason + parsed_args.disable_reason, ) except Exception: status = "enabled" if enabled else "disabled" @@ -271,15 +280,14 @@ def take_action(self, parsed_args): force_down = False if force_down is not None: if not sdk_utils.supports_microversion(compute_client, '2.11'): - msg = _('--os-compute-api-version 2.11 or later is ' - 'required') + msg = _('--os-compute-api-version 2.11 or later is required') raise exceptions.CommandError(msg) try: compute_client.update_service_forced_down( service_id, parsed_args.host, parsed_args.service, - force_down + force_down, ) except Exception: state = "down" if force_down else "up" @@ -287,7 +295,7 @@ def take_action(self, parsed_args): result += 1 if result > 0: - msg = _("Compute service %(service)s of host %(host)s failed to " - "set.") % {"service": parsed_args.service, - "host": parsed_args.host} + msg = _( + "Compute service %(service)s of host %(host)s failed to set." + ) % {"service": parsed_args.service, "host": parsed_args.host} raise exceptions.CommandError(msg) diff --git a/openstackclient/compute/v2/usage.py b/openstackclient/compute/v2/usage.py index 86f538a7d9..3015a9e709 100644 --- a/openstackclient/compute/v2/usage.py +++ b/openstackclient/compute/v2/usage.py @@ -15,19 +15,21 @@ """Usage action implementations""" +from collections.abc import Collection import datetime import functools +import typing as ty from cliff import columns as cliff_columns -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ # TODO(stephenfin): This exists in a couple of places and should be moved to a # common module -class ProjectColumn(cliff_columns.FormattableColumn): +class ProjectColumn(cliff_columns.FormattableColumn[str]): """Formattable column for project column. Unlike the parent FormattableColumn class, the initializer of the class @@ -53,22 +55,21 @@ def human_readable(self): return project -class CountColumn(cliff_columns.FormattableColumn): - +class CountColumn(cliff_columns.FormattableColumn[Collection[ty.Any]]): def human_readable(self): return len(self._value) if self._value is not None else None -class FloatColumn(cliff_columns.FormattableColumn): - +class FloatColumn(cliff_columns.FormattableColumn[float]): def human_readable(self): - return float("%.2f" % self._value) + return float(f"{self._value:.2f}") def _formatters(project_cache): return { 'project_id': functools.partial( - ProjectColumn, project_cache=project_cache), + ProjectColumn, project_cache=project_cache + ), 'server_usages': CountColumn, 'total_memory_mb_usage': FloatColumn, 'total_vcpus_usage': FloatColumn, @@ -110,24 +111,24 @@ class ListUsage(command.Lister): _description = _("List resource usage per project") def get_parser(self, prog_name): - parser = super(ListUsage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--start", metavar="", default=None, - help=_("Usage range start date, ex 2012-01-20" - " (default: 4 weeks ago)") + help=_( + "Usage range start date, ex 2012-01-20 (default: 4 weeks ago)" + ), ) parser.add_argument( "--end", metavar="", default=None, - help=_("Usage range end date, ex 2012-01-20 (default: tomorrow)") + help=_("Usage range end date, ex 2012-01-20 (default: tomorrow)"), ) return parser def take_action(self, parsed_args): - def _format_project(project): if not project: return "" @@ -136,29 +137,29 @@ def _format_project(project): else: return project - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute columns = ( "project_id", "server_usages", "total_memory_mb_usage", "total_vcpus_usage", - "total_local_gb_usage" + "total_local_gb_usage", ) column_headers = ( "Project", "Servers", "RAM MB-Hours", "CPU Hours", - "Disk GB-Hours" + "Disk GB-Hours", ) date_cli_format = "%Y-%m-%d" - date_api_format = "%Y-%m-%dT%H:%M:%S" - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) if parsed_args.start: start = datetime.datetime.strptime( - parsed_args.start, date_cli_format) + parsed_args.start, date_cli_format + ) else: start = now - datetime.timedelta(weeks=4) @@ -167,33 +168,41 @@ def _format_project(project): else: end = now + datetime.timedelta(days=1) - usage_list = list(compute_client.usages( - start=start.strftime(date_api_format), - end=end.strftime(date_api_format), - detailed=True)) + usage_list = list( + compute_client.usages( + start=start, + end=end, + detailed=True, + ) + ) # Cache the project list project_cache = {} try: for p in self.app.client_manager.identity.projects.list(): project_cache[p.id] = p - except Exception: + except Exception: # noqa: S110 # Just forget it if there's any trouble pass if parsed_args.formatter == 'table' and len(usage_list) > 0: - self.app.stdout.write(_("Usage from %(start)s to %(end)s: \n") % { - "start": start.strftime(date_cli_format), - "end": end.strftime(date_cli_format), - }) + self.app.stdout.write( + _("Usage from %(start)s to %(end)s: \n") + % { + "start": start.strftime(date_cli_format), + "end": end.strftime(date_cli_format), + } + ) return ( column_headers, ( utils.get_item_properties( - s, columns, + s, + columns, formatters=_formatters(project_cache), - ) for s in usage_list + ) + for s in usage_list ), ) @@ -202,38 +211,39 @@ class ShowUsage(command.ShowOne): _description = _("Show resource usage for a single project") def get_parser(self, prog_name): - parser = super(ShowUsage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--project", metavar="", default=None, - help=_("Name or ID of project to show usage for") + help=_("Name or ID of project to show usage for"), ) parser.add_argument( "--start", metavar="", default=None, - help=_("Usage range start date, ex 2012-01-20" - " (default: 4 weeks ago)") + help=_( + "Usage range start date, ex 2012-01-20 (default: 4 weeks ago)" + ), ) parser.add_argument( "--end", metavar="", default=None, - help=_("Usage range end date, ex 2012-01-20 (default: tomorrow)") + help=_("Usage range end date, ex 2012-01-20 (default: tomorrow)"), ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity - compute_client = self.app.client_manager.sdk_connection.compute + compute_client = self.app.client_manager.compute date_cli_format = "%Y-%m-%d" - date_api_format = "%Y-%m-%dT%H:%M:%S" - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) if parsed_args.start: start = datetime.datetime.strptime( - parsed_args.start, date_cli_format) + parsed_args.start, date_cli_format + ) else: start = now - datetime.timedelta(weeks=4) @@ -252,33 +262,37 @@ def take_action(self, parsed_args): project = self.app.client_manager.auth_ref.project_id usage = compute_client.get_usage( - project=project, start=start.strftime(date_api_format), - end=end.strftime(date_api_format)) + project=project, + start=start, + end=end, + ) if parsed_args.formatter == 'table': - self.app.stdout.write(_( - "Usage from %(start)s to %(end)s on project %(project)s: \n" - ) % { - "start": start.strftime(date_cli_format), - "end": end.strftime(date_cli_format), - "project": project, - }) + self.app.stdout.write( + _("Usage from %(start)s to %(end)s on project %(project)s: \n") + % { + "start": start.strftime(date_cli_format), + "end": end.strftime(date_cli_format), + "project": project, + } + ) columns = ( "project_id", "server_usages", "total_memory_mb_usage", "total_vcpus_usage", - "total_local_gb_usage" + "total_local_gb_usage", ) column_headers = ( "Project", "Servers", "RAM MB-Hours", "CPU Hours", - "Disk GB-Hours" + "Disk GB-Hours", ) data = utils.get_item_properties( - usage, columns, formatters=_formatters(None)) + usage, columns, formatters=_formatters(None) + ) return column_headers, data diff --git a/openstackclient/identity/client.py b/openstackclient/identity/client.py index 0292aac216..707112cd05 100644 --- a/openstackclient/identity/client.py +++ b/openstackclient/identity/client.py @@ -11,7 +11,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# import logging @@ -20,9 +19,9 @@ from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) +# global variables used when building the shell DEFAULT_API_VERSION = '3' API_VERSION_OPTION = 'os_identity_api_version' API_NAME = 'identity' @@ -43,18 +42,15 @@ def make_client(instance): """Returns an identity service client.""" identity_client = utils.get_client_class( - API_NAME, - instance._api_version[API_NAME], - API_VERSIONS) + API_NAME, instance._api_version[API_NAME], API_VERSIONS + ) LOG.debug('Instantiating identity client: %s', identity_client) # Remember interface only if interface is set kwargs = utils.build_kwargs_dict('interface', instance.interface) client = identity_client( - session=instance.session, - region_name=instance.region_name, - **kwargs + session=instance.session, region_name=instance.region_name, **kwargs ) return client @@ -66,8 +62,10 @@ def build_option_parser(parser): '--os-identity-api-version', metavar='', default=utils.env('OS_IDENTITY_API_VERSION'), - help=_('Identity API version, default=%s ' - '(Env: OS_IDENTITY_API_VERSION)') % DEFAULT_API_VERSION, + help=_( + 'Identity API version, default=%s (Env: OS_IDENTITY_API_VERSION)' + ) + % DEFAULT_API_VERSION, ) return parser diff --git a/openstackclient/identity/common.py b/openstackclient/identity/common.py index a75db4f8dd..0684764706 100644 --- a/openstackclient/identity/common.py +++ b/openstackclient/identity/common.py @@ -20,6 +20,7 @@ from keystoneclient.v3 import groups from keystoneclient.v3 import projects from keystoneclient.v3 import users +from openstack import exceptions as sdk_exceptions from osc_lib import exceptions from osc_lib import utils @@ -33,7 +34,8 @@ def find_service_in_list(service_list, service_id): if service.id == service_id: return service raise exceptions.CommandError( - "No service with a type, name or ID of '%s' exists." % service_id) + f"No service with a type, name or ID of '{service_id}' exists." + ) def find_service(identity_client, name_type_or_id): @@ -52,8 +54,10 @@ def find_service(identity_client, name_type_or_id): except identity_exc.NotFound: pass except identity_exc.NoUniqueMatch: - msg = _("Multiple service matches found for '%s', " - "use an ID to be more specific.") + msg = _( + "Multiple service matches found for '%s', " + "use an ID to be more specific." + ) raise exceptions.CommandError(msg % name_type_or_id) try: @@ -63,11 +67,46 @@ def find_service(identity_client, name_type_or_id): msg = _("No service with a type, name or ID of '%s' exists.") raise exceptions.CommandError(msg % name_type_or_id) except identity_exc.NoUniqueMatch: - msg = _("Multiple service matches found for '%s', " - "use an ID to be more specific.") + msg = _( + "Multiple service matches found for '%s', " + "use an ID to be more specific." + ) raise exceptions.CommandError(msg % name_type_or_id) +def find_service_sdk(identity_client, name_type_or_id): + """Find a service by id, name or type.""" + + try: + # search for service name or ID + return identity_client.find_service( + name_type_or_id, ignore_missing=False + ) + except sdk_exceptions.ResourceNotFound: + pass + except sdk_exceptions.DuplicateResource as e: + raise exceptions.CommandError(e.message) + + # search for service type + services = identity_client.services(type=name_type_or_id) + try: + service = next(services) + except StopIteration: + msg = _( + "No service with a type, name or ID of '%(query)s' exists." + ) % {"query": name_type_or_id} + raise exceptions.CommandError(msg) + + if next(services, None): + msg = _( + "Multiple service matches found for '%(query)s', " + "use an ID to be more specific." + ) % {"query": name_type_or_id} + raise exceptions.CommandError(msg) + + return service + + def get_resource(manager, name_type_or_id): # NOTE (vishakha): Due to bug #1799153 and for any another related case # where GET resource API does not support the filter by name, @@ -87,6 +126,18 @@ def get_resource(manager, name_type_or_id): raise exceptions.CommandError(msg % name_type_or_id) +def get_resource_by_id(manager, resource_id): + """Get resource by ID + + Raises CommandError if the resource is not found + """ + try: + return manager.get(resource_id) + except identity_exc.NotFound: + msg = _("Resource with id {} not found") + raise exceptions.CommandError(msg.format(resource_id)) + + def _get_token_resource(client, resource, parsed_name, parsed_domain=None): """Peek into the user's auth token to get resource IDs @@ -133,50 +184,118 @@ def _get_token_resource(client, resource, parsed_name, parsed_domain=None): return parsed_name -def _get_domain_id_if_requested(identity_client, domain_name_or_id): - if not domain_name_or_id: - return None - domain = find_domain(identity_client, domain_name_or_id) - return domain.id +def find_domain(identity_client, name_or_id): + return _find_identity_resource( + identity_client.domains, name_or_id, domains.Domain + ) -def find_domain(identity_client, name_or_id): - return _find_identity_resource(identity_client.domains, name_or_id, - domains.Domain) +def find_domain_id_sdk( + identity_client, name_or_id, *, validate_actor_existence=True +): + return _find_sdk_id( + identity_client.find_domain, + name_or_id=name_or_id, + validate_actor_existence=validate_actor_existence, + ) def find_group(identity_client, name_or_id, domain_name_or_id=None): - domain_id = _get_domain_id_if_requested(identity_client, domain_name_or_id) - if not domain_id: - return _find_identity_resource(identity_client.groups, name_or_id, - groups.Group) - else: - return _find_identity_resource(identity_client.groups, name_or_id, - groups.Group, domain_id=domain_id) + if domain_name_or_id is None: + return _find_identity_resource( + identity_client.groups, name_or_id, groups.Group + ) + + domain_id = find_domain(identity_client, domain_name_or_id).id + return _find_identity_resource( + identity_client.groups, + name_or_id, + groups.Group, + domain_id=domain_id, + ) + + +def find_group_id_sdk( + identity_client, + name_or_id, + domain_name_or_id=None, + *, + validate_actor_existence=True, +): + if domain_name_or_id is None: + return _find_sdk_id( + identity_client.find_group, + name_or_id=name_or_id, + validate_actor_existence=validate_actor_existence, + ) + + domain_id = find_domain_id_sdk( + identity_client, + name_or_id=domain_name_or_id, + validate_actor_existence=validate_actor_existence, + ) + return _find_sdk_id( + identity_client.find_group, + name_or_id=name_or_id, + validate_actor_existence=validate_actor_existence, + domain_id=domain_id, + ) def find_project(identity_client, name_or_id, domain_name_or_id=None): - domain_id = _get_domain_id_if_requested(identity_client, domain_name_or_id) - if not domain_id: - return _find_identity_resource(identity_client.projects, name_or_id, - projects.Project) - else: - return _find_identity_resource(identity_client.projects, name_or_id, - projects.Project, domain_id=domain_id) + if domain_name_or_id is None: + return _find_identity_resource( + identity_client.projects, name_or_id, projects.Project + ) + domain_id = find_domain(identity_client, domain_name_or_id).id + return _find_identity_resource( + identity_client.projects, + name_or_id, + projects.Project, + domain_id=domain_id, + ) def find_user(identity_client, name_or_id, domain_name_or_id=None): - domain_id = _get_domain_id_if_requested(identity_client, domain_name_or_id) - if not domain_id: - return _find_identity_resource(identity_client.users, name_or_id, - users.User) - else: - return _find_identity_resource(identity_client.users, name_or_id, - users.User, domain_id=domain_id) + if domain_name_or_id is None: + return _find_identity_resource( + identity_client.users, name_or_id, users.User + ) + domain_id = find_domain(identity_client, domain_name_or_id).id + return _find_identity_resource( + identity_client.users, name_or_id, users.User, domain_id=domain_id + ) + + +def find_user_id_sdk( + identity_client, + name_or_id, + domain_name_or_id=None, + *, + validate_actor_existence=True, +): + if domain_name_or_id is None: + return _find_sdk_id( + identity_client.find_user, + name_or_id=name_or_id, + validate_actor_existence=validate_actor_existence, + ) + domain_id = find_domain_id_sdk( + identity_client, + name_or_id=domain_name_or_id, + validate_actor_existence=validate_actor_existence, + ) + return _find_sdk_id( + identity_client.find_user, + name_or_id=name_or_id, + validate_actor_existence=validate_actor_existence, + domain_id=domain_id, + ) -def _find_identity_resource(identity_client_manager, name_or_id, - resource_type, **kwargs): +def _find_identity_resource( + identity_client_manager, name_or_id, resource_type, **kwargs +): """Find a specific identity resource. Using keystoneclient's manager, attempt to find a specific resource by its @@ -203,8 +322,9 @@ def _find_identity_resource(identity_client_manager, name_or_id, """ try: - identity_resource = utils.find_resource(identity_client_manager, - name_or_id, **kwargs) + identity_resource = utils.find_resource( + identity_client_manager, name_or_id, **kwargs + ) if identity_resource is not None: return identity_resource except (exceptions.Forbidden, identity_exc.Forbidden): @@ -213,22 +333,31 @@ def _find_identity_resource(identity_client_manager, name_or_id, return resource_type(None, {'id': name_or_id, 'name': name_or_id}) -def get_immutable_options(parsed_args): - options = {} - if parsed_args.immutable: - options['immutable'] = True - if parsed_args.no_immutable: - options['immutable'] = False - return options +def _find_sdk_id( + find_command, name_or_id, *, validate_actor_existence=True, **kwargs +): + try: + resource = find_command( + name_or_id=name_or_id, ignore_missing=False, **kwargs + ) + except sdk_exceptions.ForbiddenException: + return name_or_id + except sdk_exceptions.ResourceNotFound as exc: + if not validate_actor_existence: + return name_or_id + raise exceptions.CommandError from exc + return resource.id def add_user_domain_option_to_parser(parser): parser.add_argument( '--user-domain', metavar='', - help=_('Domain the user belongs to (name or ID). ' - 'This can be used in case collisions between user names ' - 'exist.'), + help=_( + 'Domain the user belongs to (name or ID). ' + 'This can be used in case collisions between user names ' + 'exist.' + ), ) @@ -236,9 +365,11 @@ def add_group_domain_option_to_parser(parser): parser.add_argument( '--group-domain', metavar='', - help=_('Domain the group belongs to (name or ID). ' - 'This can be used in case collisions between group names ' - 'exist.'), + help=_( + 'Domain the group belongs to (name or ID). ' + 'This can be used in case collisions between group names ' + 'exist.' + ), ) @@ -246,9 +377,13 @@ def add_project_domain_option_to_parser(parser, enhance_help=lambda _h: _h): parser.add_argument( '--project-domain', metavar='', - help=enhance_help(_('Domain the project belongs to (name or ID). This ' - 'can be used in case collisions between project ' - 'names exist.')), + help=enhance_help( + _( + 'Domain the project belongs to (name or ID). This ' + 'can be used in case collisions between project ' + 'names exist.' + ) + ), ) @@ -256,9 +391,11 @@ def add_role_domain_option_to_parser(parser): parser.add_argument( '--role-domain', metavar='', - help=_('Domain the role belongs to (name or ID). ' - 'This must be specified when the name of a domain specific ' - 'role is used.'), + help=_( + 'Domain the role belongs to (name or ID). ' + 'This must be specified when the name of a domain specific ' + 'role is used.' + ), ) @@ -267,21 +404,28 @@ def add_inherited_option_to_parser(parser): '--inherited', action='store_true', default=False, - help=_('Specifies if the role grant is inheritable to the sub ' - 'projects'), + help=_( + 'Specifies if the role grant is inheritable to the sub projects' + ), ) def add_resource_option_to_parser(parser): - enable_group = parser.add_mutually_exclusive_group() - enable_group.add_argument( + immutable_group = parser.add_mutually_exclusive_group() + immutable_group.add_argument( '--immutable', action='store_true', - help=_('Make resource immutable. An immutable project may not ' - 'be deleted or modified except to remove the immutable flag'), + dest='immutable', + default=None, + help=_( + 'Make resource immutable. An immutable project may not ' + 'be deleted or modified except to remove the immutable flag' + ), ) - enable_group.add_argument( + immutable_group.add_argument( '--no-immutable', - action='store_true', + action='store_false', + dest='immutable', + default=None, help=_('Make resource mutable (default)'), ) diff --git a/openstackclient/identity/v2_0/catalog.py b/openstackclient/identity/v2_0/catalog.py index 05d0e9aebc..437cad2fc2 100644 --- a/openstackclient/identity/v2_0/catalog.py +++ b/openstackclient/identity/v2_0/catalog.py @@ -14,19 +14,20 @@ """Identity v2 Service Catalog action implementations""" import logging +import typing as ty from cliff import columns as cliff_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -class EndpointsColumn(cliff_columns.FormattableColumn): +class EndpointsColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): if not self._value: return "" @@ -39,7 +40,7 @@ def human_readable(self): for endpoint_type in ['publicURL', 'internalURL', 'adminURL']: url = ep.get(endpoint_type) if url: - ret += " %s: %s\n" % (endpoint_type, url) + ret += f" {endpoint_type}: {url}\n" return ret @@ -47,7 +48,6 @@ class ListCatalog(command.Lister): _description = _("List services in the service catalog") def take_action(self, parsed_args): - # Trigger auth if it has not happened yet auth_ref = self.app.client_manager.auth_ref if not auth_ref: @@ -57,20 +57,26 @@ def take_action(self, parsed_args): data = auth_ref.service_catalog.catalog columns = ('Name', 'Type', 'Endpoints') - return (columns, - (utils.get_dict_properties( - s, columns, + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, formatters={ 'Endpoints': EndpointsColumn, }, - ) for s in data)) + ) + for s in data + ), + ) class ShowCatalog(command.ShowOne): _description = _("Display service catalog details") def get_parser(self, prog_name): - parser = super(ShowCatalog, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -79,7 +85,6 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - # Trigger auth if it has not happened yet auth_ref = self.app.client_manager.auth_ref if not auth_ref: @@ -89,8 +94,10 @@ def take_action(self, parsed_args): data = None for service in auth_ref.service_catalog.catalog: - if (service.get('name') == parsed_args.service or - service.get('type') == parsed_args.service): + if ( + service.get('name') == parsed_args.service + or service.get('type') == parsed_args.service + ): data = service.copy() data['endpoints'] = EndpointsColumn(data['endpoints']) if 'endpoints_links' in data: diff --git a/openstackclient/identity/v2_0/ec2creds.py b/openstackclient/identity/v2_0/ec2creds.py index f712bf4584..360a090246 100644 --- a/openstackclient/identity/v2_0/ec2creds.py +++ b/openstackclient/identity/v2_0/ec2creds.py @@ -18,10 +18,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -32,7 +32,7 @@ class CreateEC2Creds(command.ShowOne): _description = _("Create EC2 credentials") def get_parser(self, prog_name): - parser = super(CreateEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', @@ -77,9 +77,7 @@ def take_action(self, parsed_args): info.update(creds._info) if 'tenant_id' in info: - info.update( - {'project_id': info.pop('tenant_id')} - ) + info.update({'project_id': info.pop('tenant_id')}) return zip(*sorted(info.items())) @@ -88,7 +86,7 @@ class DeleteEC2Creds(command.Command): _description = _("Delete EC2 credentials") def get_parser(self, prog_name): - parser = super(DeleteEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'access_keys', metavar='', @@ -120,14 +118,20 @@ def take_action(self, parsed_args): identity_client.ec2.delete(user, access_key) except Exception as e: result += 1 - LOG.error(_("Failed to delete EC2 credentials with " - "access key '%(access_key)s': %(e)s"), - {'access_key': access_key, 'e': e}) + LOG.error( + _( + "Failed to delete EC2 credentials with " + "access key '%(access_key)s': %(e)s" + ), + {'access_key': access_key, 'e': e}, + ) if result > 0: total = len(parsed_args.access_keys) - msg = (_("%(result)s of %(total)s EC2 keys failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s EC2 keys failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -135,7 +139,7 @@ class ListEC2Creds(command.Lister): _description = _("List EC2 credentials") def get_parser(self, prog_name): - parser = super(ListEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--user', metavar='', @@ -159,18 +163,24 @@ def take_action(self, parsed_args): column_headers = ('Access', 'Secret', 'Project ID', 'User ID') data = identity_client.ec2.list(user) - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class ShowEC2Creds(command.ShowOne): _description = _("Display EC2 credentials details") def get_parser(self, prog_name): - parser = super(ShowEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'access_key', metavar='', @@ -201,8 +211,6 @@ def take_action(self, parsed_args): info.update(creds._info) if 'tenant_id' in info: - info.update( - {'project_id': info.pop('tenant_id')} - ) + info.update({'project_id': info.pop('tenant_id')}) return zip(*sorted(info.items())) diff --git a/openstackclient/identity/v2_0/endpoint.py b/openstackclient/identity/v2_0/endpoint.py index 57906ddff6..38f7f4e56c 100644 --- a/openstackclient/identity/v2_0/endpoint.py +++ b/openstackclient/identity/v2_0/endpoint.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -32,7 +32,7 @@ class CreateEndpoint(command.ShowOne): _description = _("Create new endpoint") def get_parser(self, prog_name): - parser = super(CreateEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -69,7 +69,8 @@ def take_action(self, parsed_args): service.id, parsed_args.publicurl, parsed_args.adminurl, - parsed_args.internalurl,) + parsed_args.internalurl, + ) info = {} info.update(endpoint._info) @@ -82,7 +83,7 @@ class DeleteEndpoint(command.Command): _description = _("Delete endpoint(s)") def get_parser(self, prog_name): - parser = super(DeleteEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoints', metavar='', @@ -100,14 +101,20 @@ def take_action(self, parsed_args): identity_client.endpoints.delete(endpoint) except Exception as e: result += 1 - LOG.error(_("Failed to delete endpoint with " - "ID '%(endpoint)s': %(e)s"), - {'endpoint': endpoint, 'e': e}) + LOG.error( + _( + "Failed to delete endpoint with " + "ID '%(endpoint)s': %(e)s" + ), + {'endpoint': endpoint, 'e': e}, + ) if result > 0: total = len(parsed_args.endpoints) - msg = (_("%(result)s of %(total)s endpoints failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s endpoints failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -115,7 +122,7 @@ class ListEndpoint(command.Lister): _description = _("List endpoints") def get_parser(self, prog_name): - parser = super(ListEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -126,34 +133,50 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity + + columns: tuple[str, ...] = ( + 'ID', + 'Region', + 'Service Name', + 'Service Type', + ) if parsed_args.long: - columns = ('ID', 'Region', 'Service Name', 'Service Type', - 'PublicURL', 'AdminURL', 'InternalURL') - else: - columns = ('ID', 'Region', 'Service Name', 'Service Type') + columns += ( + 'PublicURL', + 'AdminURL', + 'InternalURL', + ) data = identity_client.endpoints.list() for ep in data: service = common.find_service(identity_client, ep.service_id) ep.service_name = service.name ep.service_type = service.type - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class ShowEndpoint(command.ShowOne): _description = _("Display endpoint details") def get_parser(self, prog_name): - parser = super(ShowEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoint_or_service', metavar='', - help=_('Endpoint to display (endpoint ID, service ID,' - ' service name, service type)'), + help=_( + 'Endpoint to display (endpoint ID, service ID,' + ' service name, service type)' + ), ) return parser @@ -166,8 +189,9 @@ def take_action(self, parsed_args): match = ep service = common.find_service(identity_client, ep.service_id) if match is None: - service = common.find_service(identity_client, - parsed_args.endpoint_or_service) + service = common.find_service( + identity_client, parsed_args.endpoint_or_service + ) for ep in data: if ep.service_id == service.id: match = ep diff --git a/openstackclient/identity/v2_0/project.py b/openstackclient/identity/v2_0/project.py index f431c02144..bf19d7d09c 100644 --- a/openstackclient/identity/v2_0/project.py +++ b/openstackclient/identity/v2_0/project.py @@ -20,10 +20,10 @@ from keystoneauth1 import exceptions as ks_exc from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -34,7 +34,7 @@ class CreateProject(command.ShowOne): _description = _("Create new project") def get_parser(self, prog_name): - parser = super(CreateProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -59,9 +59,12 @@ def get_parser(self, prog_name): parser.add_argument( '--property', metavar='', + dest='properties', action=parseractions.KeyValueAction, - help=_('Add a property to ' - '(repeat option to set multiple properties)'), + help=_( + 'Add a property to ' + '(repeat option to set multiple properties)' + ), ) parser.add_argument( '--or-show', @@ -77,15 +80,15 @@ def take_action(self, parsed_args): if parsed_args.disable: enabled = False kwargs = {} - if parsed_args.property: - kwargs = parsed_args.property.copy() + if parsed_args.properties: + kwargs.update(parsed_args.properties) try: project = identity_client.tenants.create( parsed_args.name, description=parsed_args.description, enabled=enabled, - **kwargs + **kwargs, ) except ks_exc.Conflict: if parsed_args.or_show: @@ -103,10 +106,17 @@ def take_action(self, parsed_args): class DeleteProject(command.Command): - _description = _("Delete project(s)") + _description = _( + "Delete project(s). This command will remove specified " + "existing project(s) if an active user is authorized to do " + "this. If there are resources managed by other services " + "(for example, Nova, Neutron, Cinder) associated with " + "specified project(s), delete operation will proceed " + "regardless." + ) def get_parser(self, prog_name): - parser = super(DeleteProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'projects', metavar='', @@ -128,14 +138,20 @@ def take_action(self, parsed_args): identity_client.tenants.delete(project_obj.id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete project with " - "name or ID '%(project)s': %(e)s"), - {'project': project, 'e': e}) + LOG.error( + _( + "Failed to delete project with " + "name or ID '%(project)s': %(e)s" + ), + {'project': project, 'e': e}, + ) if errors > 0: total = len(parsed_args.projects) - msg = (_("%(errors)s of %(total)s projects failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s projects failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -143,7 +159,7 @@ class ListProject(command.Lister): _description = _("List projects") def get_parser(self, prog_name): - parser = super(ListProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -153,32 +169,39 @@ def get_parser(self, prog_name): parser.add_argument( '--sort', metavar='[:]', - help=_('Sort output by selected keys and directions (asc or desc) ' - '(default: asc), repeat this option to specify multiple ' - 'keys and directions.'), + help=_( + 'Sort output by selected keys and directions (asc or desc) ' + '(default: asc), repeat this option to specify multiple ' + 'keys and directions.' + ), ) return parser def take_action(self, parsed_args): + columns: tuple[str, ...] = ('ID', 'Name') if parsed_args.long: - columns = ('ID', 'Name', 'Description', 'Enabled') - else: - columns = ('ID', 'Name') + columns += ('Description', 'Enabled') data = self.app.client_manager.identity.tenants.list() if parsed_args.sort: data = utils.sort_items(data, parsed_args.sort) - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetProject(command.Command): _description = _("Set project properties") def get_parser(self, prog_name): - parser = super(SetProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'project', metavar='', @@ -208,9 +231,12 @@ def get_parser(self, prog_name): parser.add_argument( '--property', metavar='', + dest='properties', action=parseractions.KeyValueAction, - help=_('Set a project property ' - '(repeat option to set multiple properties)'), + help=_( + 'Set a project property ' + '(repeat option to set multiple properties)' + ), ) return parser @@ -231,8 +257,8 @@ def take_action(self, parsed_args): kwargs['enabled'] = True if parsed_args.disable: kwargs['enabled'] = False - if parsed_args.property: - kwargs.update(parsed_args.property) + if parsed_args.properties: + kwargs.update(parsed_args.properties) if 'id' in kwargs: del kwargs['id'] if 'name' in kwargs: @@ -247,7 +273,7 @@ class ShowProject(command.ShowOne): _description = _("Display project details") def get_parser(self, prog_name): - parser = super(ShowProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'project', metavar='', @@ -268,8 +294,8 @@ def take_action(self, parsed_args): except ks_exc.Forbidden: auth_ref = self.app.client_manager.auth_ref if ( - parsed_args.project == auth_ref.project_id or - parsed_args.project == auth_ref.project_name + parsed_args.project == auth_ref.project_id + or parsed_args.project == auth_ref.project_name ): # Ask for currently auth'ed project so return it info = { @@ -305,7 +331,7 @@ class UnsetProject(command.Command): _description = _("Unset project properties") def get_parser(self, prog_name): - parser = super(UnsetProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'project', metavar='', @@ -314,10 +340,13 @@ def get_parser(self, prog_name): parser.add_argument( '--property', metavar='', + dest='properties', action='append', default=[], - help=_('Unset a project property ' - '(repeat option to unset multiple properties)'), + help=_( + 'Unset a project property ' + '(repeat option to unset multiple properties)' + ), ) return parser @@ -328,7 +357,7 @@ def take_action(self, parsed_args): parsed_args.project, ) kwargs = project._info - for key in parsed_args.property: + for key in parsed_args.properties: if key in kwargs: kwargs[key] = None identity_client.tenants.update(project.id, **kwargs) diff --git a/openstackclient/identity/v2_0/role.py b/openstackclient/identity/v2_0/role.py index 5c53fbcd5c..e54c07af08 100644 --- a/openstackclient/identity/v2_0/role.py +++ b/openstackclient/identity/v2_0/role.py @@ -18,10 +18,10 @@ import logging from keystoneauth1 import exceptions as ks_exc -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -32,7 +32,7 @@ class AddRole(command.ShowOne): _description = _("Add role to project:user") def get_parser(self, prog_name): - parser = super(AddRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -75,7 +75,7 @@ class CreateRole(command.ShowOne): _description = _("Create new role") def get_parser(self, prog_name): - parser = super(CreateRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role_name', metavar='', @@ -111,7 +111,7 @@ class DeleteRole(command.Command): _description = _("Delete role(s)") def get_parser(self, prog_name): - parser = super(DeleteRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'roles', metavar='', @@ -133,14 +133,20 @@ def take_action(self, parsed_args): identity_client.roles.delete(role_obj.id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete role with " - "name or ID '%(role)s': %(e)s"), - {'role': role, 'e': e}) + LOG.error( + _( + "Failed to delete role with " + "name or ID '%(role)s': %(e)s" + ), + {'role': role, 'e': e}, + ) if errors > 0: total = len(parsed_args.roles) - msg = (_("%(errors)s of %(total)s roles failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s roles failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -153,18 +159,24 @@ def take_action(self, parsed_args): columns = ('ID', 'Name') data = identity_client.roles.list() - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class RemoveRole(command.Command): _description = _("Remove role from project : user") def get_parser(self, prog_name): - parser = super(RemoveRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -192,17 +204,14 @@ def take_action(self, parsed_args): parsed_args.project, ) user = utils.find_resource(identity_client.users, parsed_args.user) - identity_client.roles.remove_user_role( - user.id, - role.id, - project.id) + identity_client.roles.remove_user_role(user.id, role.id, project.id) class ShowRole(command.ShowOne): _description = _("Display role details") def get_parser(self, prog_name): - parser = super(ShowRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', diff --git a/openstackclient/identity/v2_0/role_assignment.py b/openstackclient/identity/v2_0/role_assignment.py index 8236bbfc68..0aa800ef84 100644 --- a/openstackclient/identity/v2_0/role_assignment.py +++ b/openstackclient/identity/v2_0/role_assignment.py @@ -11,12 +11,12 @@ # under the License. # -"""Identity v2 Assignment action implementations """ +"""Identity v2 Assignment action implementations""" -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ # noqa @@ -24,7 +24,7 @@ class ListRoleAssignment(command.Lister): _description = _("List role assignments") def get_parser(self, prog_name): - parser = super(ListRoleAssignment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--user', metavar='', @@ -51,7 +51,7 @@ def get_parser(self, prog_name): action="store_true", dest='authproject', help='Only list assignments for the project to which the ' - 'authenticated user\'s token is scoped', + 'authenticated user\'s token is scoped', ) return parser @@ -68,10 +68,9 @@ def take_action(self, parsed_args): parsed_args.user, ) elif parsed_args.authuser: - if auth_ref: + if auth_ref and auth_ref.user_id: user = utils.find_resource( - identity_client.users, - auth_ref.user_id + identity_client.users, auth_ref.user_id ) project = None @@ -81,10 +80,9 @@ def take_action(self, parsed_args): parsed_args.project, ) elif parsed_args.authproject: - if auth_ref: + if auth_ref and auth_ref.project_id: project = utils.find_resource( - identity_client.projects, - auth_ref.project_id + identity_client.projects, auth_ref.project_id ) # If user or project is not specified, we would ideally list all @@ -107,8 +105,14 @@ def take_action(self, parsed_args): user_role.user = user.id user_role.project = project.id - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) diff --git a/openstackclient/identity/v2_0/service.py b/openstackclient/identity/v2_0/service.py index afc0b3d7ad..5e8dca7354 100644 --- a/openstackclient/identity/v2_0/service.py +++ b/openstackclient/identity/v2_0/service.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -32,7 +32,7 @@ class CreateService(command.ShowOne): _description = _("Create new service") def get_parser(self, prog_name): - parser = super(CreateService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'type', metavar='', @@ -71,7 +71,7 @@ class DeleteService(command.Command): _description = _("Delete service(s)") def get_parser(self, prog_name): - parser = super(DeleteService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'services', metavar='', @@ -90,14 +90,20 @@ def take_action(self, parsed_args): identity_client.services.delete(service.id) except Exception as e: result += 1 - LOG.error(_("Failed to delete service with " - "name or ID '%(service)s': %(e)s"), - {'service': service, 'e': e}) + LOG.error( + _( + "Failed to delete service with " + "name or ID '%(service)s': %(e)s" + ), + {'service': service, 'e': e}, + ) if result > 0: total = len(parsed_args.services) - msg = (_("%(result)s of %(total)s services failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s services failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -105,7 +111,7 @@ class ListService(command.Lister): _description = _("List services") def get_parser(self, prog_name): - parser = super(ListService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -115,11 +121,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - + columns: tuple[str, ...] = ('ID', 'Name', 'Type') if parsed_args.long: - columns = ('ID', 'Name', 'Type', 'Description') - else: - columns = ('ID', 'Name', 'Type') + columns += ('Description',) data = self.app.client_manager.identity.services.list() return ( columns, @@ -131,7 +135,7 @@ class ShowService(command.ShowOne): _description = _("Display service details") def get_parser(self, prog_name): - parser = super(ShowService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -151,15 +155,17 @@ def take_action(self, parsed_args): if parsed_args.catalog: endpoints = auth_ref.service_catalog.get_endpoints( - service_type=parsed_args.service) - for (service, service_endpoints) in endpoints.items(): + service_type=parsed_args.service + ) + for service, service_endpoints in endpoints.items(): if service_endpoints: info = {"type": service} info.update(service_endpoints[0]) return zip(*sorted(info.items())) - msg = _("No service catalog with a type, name or ID of '%s' " - "exists.") % (parsed_args.service) + msg = _( + "No service catalog with a type, name or ID of '%s' exists." + ) % (parsed_args.service) raise exceptions.CommandError(msg) else: service = common.find_service(identity_client, parsed_args.service) diff --git a/openstackclient/identity/v2_0/token.py b/openstackclient/identity/v2_0/token.py index 205e15d30b..ebb2269a92 100644 --- a/openstackclient/identity/v2_0/token.py +++ b/openstackclient/identity/v2_0/token.py @@ -15,9 +15,9 @@ """Identity v2 Token action implementations""" -from osc_lib.command import command from osc_lib import exceptions +from openstackclient import command from openstackclient.i18n import _ @@ -28,14 +28,15 @@ class IssueToken(command.ShowOne): required_scope = False def get_parser(self, prog_name): - parser = super(IssueToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) return parser def take_action(self, parsed_args): auth_ref = self.app.client_manager.auth_ref if not auth_ref: raise exceptions.AuthorizationFailure( - "Only an authorized user may issue a new token.") + "Only an authorized user may issue a new token." + ) data = {} if auth_ref.auth_token: @@ -55,7 +56,7 @@ class RevokeToken(command.Command): _description = _("Revoke existing token") def get_parser(self, prog_name): - parser = super(RevokeToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'token', metavar='', diff --git a/openstackclient/identity/v2_0/user.py b/openstackclient/identity/v2_0/user.py index 8dac093ef4..244b9e9da9 100644 --- a/openstackclient/identity/v2_0/user.py +++ b/openstackclient/identity/v2_0/user.py @@ -20,17 +20,17 @@ from cliff import columns as cliff_columns from keystoneauth1 import exceptions as ks_exc -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -class ProjectColumn(cliff_columns.FormattableColumn): +class ProjectColumn(cliff_columns.FormattableColumn[str]): """Formattable column for project column. Unlike the parent FormattableColumn class, the initializer of the @@ -42,7 +42,7 @@ class takes project_cache as the second argument. """ def __init__(self, value, project_cache=None): - super(ProjectColumn, self).__init__(value) + super().__init__(value) self.project_cache = project_cache or {} def human_readable(self): @@ -59,7 +59,7 @@ class CreateUser(command.ShowOne): _description = _("Create new user") def get_parser(self, prog_name): - parser = super(CreateUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -122,8 +122,12 @@ def take_action(self, parsed_args): parsed_args.password = utils.get_password(self.app.stdin) if not parsed_args.password: - LOG.warning(_("No password was supplied, authentication will fail " - "when a user does not have a password.")) + LOG.warning( + _( + "No password was supplied, authentication will fail " + "when a user does not have a password." + ) + ) try: user = identity_client.users.create( @@ -147,9 +151,7 @@ def take_action(self, parsed_args): # the returned resource has 'tenantId'. Sigh. # We're using project_id now inside OSC so there. if 'tenantId' in user._info: - user._info.update( - {'project_id': user._info.pop('tenantId')} - ) + user._info.update({'project_id': user._info.pop('tenantId')}) info = {} info.update(user._info) @@ -160,7 +162,7 @@ class DeleteUser(command.Command): _description = _("Delete user(s)") def get_parser(self, prog_name): - parser = super(DeleteUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'users', metavar='', @@ -182,14 +184,20 @@ def take_action(self, parsed_args): identity_client.users.delete(user_obj.id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete user with " - "name or ID '%(user)s': %(e)s"), - {'user': user, 'e': e}) + LOG.error( + _( + "Failed to delete user with " + "name or ID '%(user)s': %(e)s" + ), + {'user': user, 'e': e}, + ) if errors > 0: total = len(parsed_args.users) - msg = (_("%(errors)s of %(total)s users failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s users failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -197,7 +205,7 @@ class ListUser(command.Lister): _description = _("List users") def get_parser(self, prog_name): - parser = super(ListUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', @@ -222,40 +230,26 @@ def take_action(self, parsed_args): ) project = project.id + columns: tuple[str, ...] = ('id', 'name') + column_headers: tuple[str, ...] = ('ID', 'Name') if parsed_args.long: - columns = ( - 'ID', - 'Name', - 'tenantId', - 'Email', - 'Enabled', - ) - column_headers = ( - 'ID', - 'Name', - 'Project', - 'Email', - 'Enabled', - ) + columns += ('tenantId', 'email', 'enabled') + column_headers += ('Project', 'Email', 'Enabled') # Cache the project list project_cache = {} try: for p in identity_client.tenants.list(): project_cache[p.id] = p - except Exception: + except Exception: # noqa: S110 # Just forget it if there's any trouble pass formatters['tenantId'] = functools.partial( - ProjectColumn, project_cache=project_cache) - else: - columns = column_headers = ('ID', 'Name') + ProjectColumn, project_cache=project_cache + ) data = identity_client.users.list(tenant_id=project) if parsed_args.project: - d = {} - for s in data: - d[s.id] = s - data = d.values() + data = {s.id: s for s in data}.values() if parsed_args.long: # FIXME(dtroyer): Sometimes user objects have 'tenant_id' instead @@ -267,19 +261,25 @@ def take_action(self, parsed_args): d._info['tenantId'] = d._info.pop('tenant_id') d._add_details(d._info) - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, mixed_case_fields=('tenantId',), formatters=formatters, - ) for s in data)) + ) + for s in data + ), + ) class SetUser(command.Command): _description = _("Set user properties") def get_parser(self, prog_name): - parser = super(SetUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'user', metavar='', @@ -331,8 +331,12 @@ def take_action(self, parsed_args): parsed_args.password = utils.get_password(self.app.stdin) if '' == parsed_args.password: - LOG.warning(_("No password was supplied, authentication will fail " - "when a user does not have a password.")) + LOG.warning( + _( + "No password was supplied, authentication will fail " + "when a user does not have a password." + ) + ) user = utils.find_resource( identity_client.users, @@ -373,7 +377,7 @@ class ShowUser(command.ShowOne): _description = _("Display user details") def get_parser(self, prog_name): - parser = super(ShowUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'user', metavar='', @@ -394,8 +398,8 @@ def take_action(self, parsed_args): except ks_exc.Forbidden: auth_ref = self.app.client_manager.auth_ref if ( - parsed_args.user == auth_ref.user_id or - parsed_args.user == auth_ref.username + parsed_args.user == auth_ref.user_id + or parsed_args.user == auth_ref.username ): # Ask for currently auth'ed project so return it info = { @@ -409,12 +413,8 @@ def take_action(self, parsed_args): raise if 'tenantId' in info: - info.update( - {'project_id': info.pop('tenantId')} - ) + info.update({'project_id': info.pop('tenantId')}) if 'tenant_id' in info: - info.update( - {'project_id': info.pop('tenant_id')} - ) + info.update({'project_id': info.pop('tenant_id')}) return zip(*sorted(info.items())) diff --git a/openstackclient/identity/v3/access_rule.py b/openstackclient/identity/v3/access_rule.py index ffda04f9e5..1859ef6fa5 100644 --- a/openstackclient/identity/v3/access_rule.py +++ b/openstackclient/identity/v3/access_rule.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -32,34 +32,41 @@ class DeleteAccessRule(command.Command): _description = _("Delete access rule(s)") def get_parser(self, prog_name): - parser = super(DeleteAccessRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'access_rule', metavar='', nargs="+", - help=_('Access rule(s) to delete (name or ID)'), + help=_('Access rule ID(s) to delete'), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + user_id = auth.get_user_id(conn.identity) errors = 0 for ac in parsed_args.access_rule: try: - access_rule = utils.find_resource( - identity_client.access_rules, ac) - identity_client.access_rules.delete(access_rule.id) + access_rule = identity_client.get_access_rule(user_id, ac) + identity_client.delete_access_rule(user_id, access_rule.id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete access rule with " - "ID '%(ac)s': %(e)s"), - {'ac': ac, 'e': e}) + LOG.error( + _("Failed to delete access rule with ID '%(ac)s': %(e)s"), + {'ac': ac, 'e': e}, + ) if errors > 0: total = len(parsed_args.access_rule) - msg = (_("%(errors)s of %(total)s access rules failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _( + "%(errors)s of %(total)s access rules failed to delete." + ) % {'errors': errors, 'total': total} raise exceptions.CommandError(msg) @@ -67,7 +74,7 @@ class ListAccessRule(command.Lister): _description = _("List access rules") def get_parser(self, prog_name): - parser = super(ListAccessRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--user', metavar='', @@ -77,41 +84,67 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity if parsed_args.user: - user_id = common.find_user(identity_client, - parsed_args.user, - parsed_args.user_domain).id + user_id = common.find_user( + identity_client, parsed_args.user, parsed_args.user_domain + ).id else: - user_id = None + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + user_id = auth.get_user_id(conn.identity) columns = ('ID', 'Service', 'Method', 'Path') - data = identity_client.access_rules.list( - user=user_id) - return (columns, - (utils.get_item_properties( - s, columns, + data = identity_client.access_rules(user=user_id) + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class ShowAccessRule(command.ShowOne): _description = _("Display access rule details") def get_parser(self, prog_name): - parser = super(ShowAccessRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'access_rule', metavar='', - help=_('Access rule to display (name or ID)'), + help=_('Access rule ID to display'), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - access_rule = utils.find_resource(identity_client.access_rules, - parsed_args.access_rule) - - access_rule._info.pop('links', None) + identity_client = self.app.client_manager.sdk_connection.identity + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + user_id = auth.get_user_id(conn.identity) + + access_rule = identity_client.get_access_rule( + user_id, parsed_args.access_rule + ) - return zip(*sorted(access_rule._info.items())) + columns = ('ID', 'Method', 'Path', 'Service') + return ( + columns, + ( + utils.get_item_properties( + access_rule, + columns, + formatters={}, + ) + ), + ) diff --git a/openstackclient/identity/v3/application_credential.py b/openstackclient/identity/v3/application_credential.py index a208985624..3b38f17b5a 100644 --- a/openstackclient/identity/v3/application_credential.py +++ b/openstackclient/identity/v3/application_credential.py @@ -18,23 +18,121 @@ import datetime import json import logging +import typing as ty +import uuid -from osc_lib.command import command +from cliff import columns as cliff_columns from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common - LOG = logging.getLogger(__name__) +class RolesColumn(cliff_columns.FormattableColumn[ty.Any]): + """Generate a formatted string of role names.""" + + def human_readable(self): + return utils.format_list(list(r['name'] for r in self._value)) + + +def _format_application_credential( + application_credential, *, include_secret=False +): + column_headers: tuple[str, ...] = ( + 'ID', + 'Name', + 'Description', + 'Project ID', + 'Roles', + 'Unrestricted', + 'Access Rules', + 'Expires At', + ) + columns: tuple[str, ...] = ( + 'id', + 'name', + 'description', + 'project_id', + 'roles', + 'unrestricted', + 'access_rules', + 'expires_at', + ) + if include_secret: + column_headers += ('Secret',) + columns += ('secret',) + + return ( + column_headers, + utils.get_item_properties( + application_credential, columns, formatters={'roles': RolesColumn} + ), + ) + + +def _format_application_credentials(application_credentials): + column_headers = ( + 'ID', + 'Name', + 'Description', + 'Project ID', + 'Roles', + 'Unrestricted', + 'Access Rules', + 'Expires At', + ) + columns = ( + 'id', + 'name', + 'description', + 'project_id', + 'roles', + 'unrestricted', + 'access_rules', + 'expires_at', + ) + + return ( + column_headers, + ( + utils.get_item_properties( + x, columns, formatters={'roles': RolesColumn} + ) + for x in application_credentials + ), + ) + + +# TODO(stephenfin): Move this to osc_lib since it's useful elsewhere +def is_uuid_like(value) -> bool: + """Returns validation of a value as a UUID. + + :param val: Value to verify + :type val: string + :returns: bool + """ + try: + formatted_value = ( + value.replace('urn:', '') + .replace('uuid:', '') + .strip('{}') + .replace('-', '') + .lower() + ) + return str(uuid.UUID(value)).replace('-', '') == formatted_value + except (TypeError, ValueError, AttributeError): + return False + + class CreateApplicationCredential(command.ShowOne): _description = _("Create new application credential") def get_parser(self, prog_name): - parser = super(CreateApplicationCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -43,23 +141,30 @@ def get_parser(self, prog_name): parser.add_argument( '--secret', metavar='', - help=_('Secret to use for authentication (if not provided, one' - ' will be generated)'), + help=_( + 'Secret to use for authentication (if not provided, one' + ' will be generated)' + ), ) parser.add_argument( '--role', metavar='', + dest='roles', action='append', default=[], - help=_('Roles to authorize (name or ID) (repeat option to set' - ' multiple values)'), + help=_( + 'Roles to authorize (name or ID) (repeat option to set' + ' multiple values)' + ), ) parser.add_argument( '--expiration', metavar='', - help=_('Sets an expiration date for the application credential,' - ' format of YYYY-mm-ddTHH:MM:SS (if not provided, the' - ' application credential will not expire)'), + help=_( + 'Sets an expiration date for the application credential,' + ' format of YYYY-mm-ddTHH:MM:SS (if not provided, the' + ' application credential will not expire)' + ), ) parser.add_argument( '--description', @@ -69,48 +174,57 @@ def get_parser(self, prog_name): parser.add_argument( '--unrestricted', action="store_true", - help=_('Enable application credential to create and delete other' - ' application credentials and trusts (this is potentially' - ' dangerous behavior and is disabled by default)'), + help=_( + 'Enable application credential to create and delete other' + ' application credentials and trusts (this is potentially' + ' dangerous behavior and is disabled by default)' + ), ) parser.add_argument( '--restricted', action="store_true", - help=_('Prohibit application credential from creating and deleting' - ' other application credentials and trusts (this is the' - ' default behavior)'), + help=_( + 'Prohibit application credential from creating and deleting' + ' other application credentials and trusts (this is the' + ' default behavior)' + ), ) parser.add_argument( '--access-rules', metavar='', - help=_('Either a string or file path containing a JSON-formatted ' - 'list of access rules, each containing a request method, ' - 'path, and service, for example ' - '\'[{"method": "GET", ' - '"path": "/v2.1/servers", ' - '"service": "compute"}]\''), - + help=_( + 'Either a string or file path containing a JSON-formatted ' + 'list of access rules, each containing a request method, ' + 'path, and service, for example ' + '\'[{"method": "GET", ' + '"path": "/v2.1/servers", ' + '"service": "compute"}]\'' + ), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + + user_id = auth.get_user_id(conn.identity) role_ids = [] - for role in parsed_args.role: - # A user can only create an application credential for themself, - # not for another user even as an admin, and only on the project to - # which they are currently scoped with a subset of the role - # assignments they have on that project. Don't bother trying to - # look up roles via keystone, just introspect the token. - role_id = common._get_token_resource(identity_client, "roles", - role) - role_ids.append(role_id) + for role in parsed_args.roles: + if is_uuid_like(role): + role_ids.append({'id': role}) + else: + role_ids.append({'name': role}) expires_at = None if parsed_args.expiration: - expires_at = datetime.datetime.strptime(parsed_args.expiration, - '%Y-%m-%dT%H:%M:%S') + expires_at = datetime.datetime.strptime( + parsed_args.expiration, '%Y-%m-%dT%H:%M:%S' + ) if parsed_args.restricted: unrestricted = False @@ -124,15 +238,17 @@ def take_action(self, parsed_args): try: with open(parsed_args.access_rules) as f: access_rules = json.load(f) - except IOError: - raise exceptions.CommandError( - _("Access rules is not valid JSON string or file does" - " not exist.")) + except OSError: + msg = _( + "Access rules is not valid JSON string or file does" + " not exist." + ) + raise exceptions.CommandError(msg) else: - access_rules = None + access_rules = [] - app_cred_manager = identity_client.application_credentials - application_credential = app_cred_manager.create( + application_credential = identity_client.create_application_credential( + user_id, parsed_args.name, roles=role_ids, expires_at=expires_at, @@ -142,21 +258,16 @@ def take_action(self, parsed_args): access_rules=access_rules, ) - application_credential._info.pop('links', None) - - # Format roles into something sensible - roles = application_credential._info.pop('roles') - msg = ' '.join(r['name'] for r in roles) - application_credential._info['roles'] = msg - - return zip(*sorted(application_credential._info.items())) + return _format_application_credential( + application_credential, include_secret=True + ) class DeleteApplicationCredential(command.Command): _description = _("Delete application credentials(s)") def get_parser(self, prog_name): - parser = super(DeleteApplicationCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'application_credential', metavar='', @@ -166,32 +277,50 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + + user_id = auth.get_user_id(conn.identity) errors = 0 for ac in parsed_args.application_credential: try: - app_cred = utils.find_resource( - identity_client.application_credentials, ac) - identity_client.application_credentials.delete(app_cred.id) + app_cred = identity_client.find_application_credential( + user_id, ac, ignore_missing=False + ) + identity_client.delete_application_credential( + user_id, app_cred.id + ) except Exception as e: errors += 1 - LOG.error(_("Failed to delete application credential with " - "name or ID '%(ac)s': %(e)s"), - {'ac': ac, 'e': e}) + LOG.error( + _( + "Failed to delete application credential with " + "name or ID '%(ac)s': %(e)s" + ), + {'ac': ac, 'e': e}, + ) if errors > 0: total = len(parsed_args.application_credential) - msg = (_("%(errors)s of %(total)s application credentials failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _( + "%(errors)s of %(total)s application credentials failed " + "to delete." + ) % {'errors': errors, 'total': total} raise exceptions.CommandError(msg) + return None + class ListApplicationCredential(command.Lister): _description = _("List application credentials") def get_parser(self, prog_name): - parser = super(ListApplicationCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--user', metavar='', @@ -201,29 +330,31 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity if parsed_args.user: - user_id = common.find_user(identity_client, - parsed_args.user, - parsed_args.user_domain).id + user_id = common.find_user_id_sdk( + identity_client, parsed_args.user, parsed_args.user_domain + ) else: - user_id = None + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + user_id = auth.get_user_id(conn.identity) + + application_credentials = identity_client.application_credentials( + user=user_id + ) - columns = ('ID', 'Name', 'Project ID', 'Description', 'Expires At') - data = identity_client.application_credentials.list( - user=user_id) - return (columns, - (utils.get_item_properties( - s, columns, - formatters={}, - ) for s in data)) + return _format_application_credentials(application_credentials) class ShowApplicationCredential(command.ShowOne): _description = _("Display application credential details") def get_parser(self, prog_name): - parser = super(ShowApplicationCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'application_credential', metavar='', @@ -232,15 +363,16 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - app_cred = utils.find_resource(identity_client.application_credentials, - parsed_args.application_credential) - - app_cred._info.pop('links', None) - - # Format roles into something sensible - roles = app_cred._info.pop('roles') - msg = ' '.join(r['name'] for r in roles) - app_cred._info['roles'] = msg + identity_client = self.app.client_manager.sdk_connection.identity + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + user_id = auth.get_user_id(conn.identity) + + application_credential = identity_client.find_application_credential( + user_id, parsed_args.application_credential, ignore_missing=False + ) - return zip(*sorted(app_cred._info.items())) + return _format_application_credential(application_credential) diff --git a/openstackclient/identity/v3/catalog.py b/openstackclient/identity/v3/catalog.py index d1f7d31909..7d37e6cbd2 100644 --- a/openstackclient/identity/v3/catalog.py +++ b/openstackclient/identity/v3/catalog.py @@ -9,24 +9,24 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# """Identity v3 Service Catalog action implementations""" import logging +import typing as ty from cliff import columns as cliff_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -class EndpointsColumn(cliff_columns.FormattableColumn): +class EndpointsColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): if not self._value: return "" @@ -34,7 +34,7 @@ def human_readable(self): for ep in self._value: region = ep.get('region_id') or ep.get('region') or '' ret += region + '\n' - ret += " %s: %s\n" % (ep['interface'], ep['url']) + ret += " {}: {}\n".format(ep['interface'], ep['url']) return ret @@ -42,7 +42,6 @@ class ListCatalog(command.Lister): _description = _("List services in the service catalog") def take_action(self, parsed_args): - # Trigger auth if it has not happened yet auth_ref = self.app.client_manager.auth_ref if not auth_ref: @@ -52,20 +51,26 @@ def take_action(self, parsed_args): data = auth_ref.service_catalog.catalog columns = ('Name', 'Type', 'Endpoints') - return (columns, - (utils.get_dict_properties( - s, columns, + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, formatters={ 'Endpoints': EndpointsColumn, }, - ) for s in data)) + ) + for s in data + ), + ) class ShowCatalog(command.ShowOne): _description = _("Display service catalog details") def get_parser(self, prog_name): - parser = super(ShowCatalog, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -74,7 +79,6 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - # Trigger auth if it has not happened yet auth_ref = self.app.client_manager.auth_ref if not auth_ref: @@ -84,8 +88,10 @@ def take_action(self, parsed_args): data = None for service in auth_ref.service_catalog.catalog: - if (service.get('name') == parsed_args.service or - service.get('type') == parsed_args.service): + if ( + service.get('name') == parsed_args.service + or service.get('type') == parsed_args.service + ): data = dict(service) data['endpoints'] = EndpointsColumn(data['endpoints']) if 'links' in data: diff --git a/openstackclient/identity/v3/consumer.py b/openstackclient/identity/v3/consumer.py index 2f925aba87..c58441ca83 100644 --- a/openstackclient/identity/v3/consumer.py +++ b/openstackclient/identity/v3/consumer.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -31,7 +31,7 @@ class CreateConsumer(command.ShowOne): _description = _("Create new consumer") def get_parser(self, prog_name): - parser = super(CreateConsumer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--description', metavar='', @@ -52,7 +52,7 @@ class DeleteConsumer(command.Command): _description = _("Delete consumer(s)") def get_parser(self, prog_name): - parser = super(DeleteConsumer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consumer', metavar='', @@ -67,17 +67,25 @@ def take_action(self, parsed_args): for i in parsed_args.consumer: try: consumer = utils.find_resource( - identity_client.oauth1.consumers, i) + identity_client.oauth1.consumers, i + ) identity_client.oauth1.consumers.delete(consumer.id) except Exception as e: result += 1 - LOG.error(_("Failed to delete consumer with name or " - "ID '%(consumer)s': %(e)s"), {'consumer': i, 'e': e}) + LOG.error( + _( + "Failed to delete consumer with name or " + "ID '%(consumer)s': %(e)s" + ), + {'consumer': i, 'e': e}, + ) if result > 0: total = len(parsed_args.consumer) - msg = (_("%(result)s of %(total)s consumers failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s consumers failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -87,18 +95,24 @@ class ListConsumer(command.Lister): def take_action(self, parsed_args): columns = ('ID', 'Description') data = self.app.client_manager.identity.oauth1.consumers.list() - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetConsumer(command.Command): _description = _("Set consumer properties") def get_parser(self, prog_name): - parser = super(SetConsumer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consumer', metavar='', @@ -114,20 +128,22 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity consumer = utils.find_resource( - identity_client.oauth1.consumers, parsed_args.consumer) + identity_client.oauth1.consumers, parsed_args.consumer + ) kwargs = {} if parsed_args.description: kwargs['description'] = parsed_args.description consumer = identity_client.oauth1.consumers.update( - consumer.id, **kwargs) + consumer.id, **kwargs + ) class ShowConsumer(command.ShowOne): _description = _("Display consumer details") def get_parser(self, prog_name): - parser = super(ShowConsumer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consumer', metavar='', @@ -138,7 +154,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity consumer = utils.find_resource( - identity_client.oauth1.consumers, parsed_args.consumer) + identity_client.oauth1.consumers, parsed_args.consumer + ) consumer._info.pop('links', None) return zip(*sorted(consumer._info.items())) diff --git a/openstackclient/identity/v3/credential.py b/openstackclient/identity/v3/credential.py index bf48df83d5..02eef649c7 100644 --- a/openstackclient/identity/v3/credential.py +++ b/openstackclient/identity/v3/credential.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -28,11 +28,28 @@ LOG = logging.getLogger(__name__) +def _format_credential(credential): + columns = ( + 'blob', + 'id', + 'project_id', + 'type', + 'user_id', + ) + return ( + columns, + utils.get_item_properties( + credential, + columns, + ), + ) + + class CreateCredential(command.ShowOne): _description = _("Create new credential") def get_parser(self, prog_name): - parser = super(CreateCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'user', metavar='', @@ -52,35 +69,38 @@ def get_parser(self, prog_name): parser.add_argument( '--project', metavar='', - help=_('Project which limits the scope of ' - 'the credential (name or ID)'), + help=_( + 'Project which limits the scope of the credential (name or ID)' + ), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - user_id = utils.find_resource(identity_client.users, - parsed_args.user).id + identity_client = self.app.client_manager.sdk_connection.identity + user_id = identity_client.find_user( + parsed_args.user, ignore_missing=False + ).id if parsed_args.project: - project = utils.find_resource(identity_client.projects, - parsed_args.project).id + project = identity_client.find_project( + parsed_args.project, ignore_missing=False + ).id else: project = None - credential = identity_client.credentials.create( - user=user_id, + credential = identity_client.create_credential( + user_id=user_id, type=parsed_args.type, blob=parsed_args.data, - project=project) + project_id=project, + ) - credential._info.pop('links') - return zip(*sorted(credential._info.items())) + return _format_credential(credential) class DeleteCredential(command.Command): _description = _("Delete credential(s)") def get_parser(self, prog_name): - parser = super(DeleteCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'credential', metavar='', @@ -90,21 +110,27 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity result = 0 for i in parsed_args.credential: try: - identity_client.credentials.delete(i) + identity_client.delete_credential(i) except Exception as e: result += 1 - LOG.error(_("Failed to delete credentials with " - "ID '%(credential)s': %(e)s"), - {'credential': i, 'e': e}) + LOG.error( + _( + "Failed to delete credentials with " + "ID '%(credential)s': %(e)s" + ), + {'credential': i, 'e': e}, + ) if result > 0: total = len(parsed_args.credential) - msg = (_("%(result)s of %(total)s credential failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s credential failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -112,7 +138,7 @@ class ListCredential(command.Lister): _description = _("List credentials") def get_parser(self, prog_name): - parser = super(ListCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--user', metavar='', @@ -127,14 +153,17 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity kwargs = {} if parsed_args.user: - user_id = common.find_user( - identity_client, - parsed_args.user, - parsed_args.user_domain, + domain_id = None + if parsed_args.user_domain: + domain_id = identity_client.find_domain( + parsed_args.user_domain, ignore_missing=False + ) + user_id = identity_client.find_user( + parsed_args.user, domain_id=domain_id, ignore_missing=False ).id kwargs["user_id"] = user_id @@ -143,19 +172,26 @@ def take_action(self, parsed_args): columns = ('ID', 'Type', 'User ID', 'Blob', 'Project ID') column_headers = ('ID', 'Type', 'User ID', 'Data', 'Project ID') - data = self.app.client_manager.identity.credentials.list(**kwargs) - return (column_headers, - (utils.get_item_properties( - s, columns, + data = identity_client.credentials(**kwargs) + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetCredential(command.Command): _description = _("Set credential properties") def get_parser(self, prog_name): - parser = super(SetCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'credential', metavar='', @@ -182,35 +218,40 @@ def get_parser(self, prog_name): parser.add_argument( '--project', metavar='', - help=_('Project which limits the scope of ' - 'the credential (name or ID)'), + help=_( + 'Project which limits the scope of the credential (name or ID)' + ), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - user_id = utils.find_resource(identity_client.users, - parsed_args.user).id + user_id = identity_client.find_user( + parsed_args.user, ignore_missing=False + ).id if parsed_args.project: - project = utils.find_resource(identity_client.projects, - parsed_args.project).id + project = identity_client.find_project( + parsed_args.project, ignore_missing=False + ).id else: project = None - identity_client.credentials.update(parsed_args.credential, - user=user_id, - type=parsed_args.type, - blob=parsed_args.data, - project=project) + identity_client.update_credential( + parsed_args.credential, + user=user_id, + type=parsed_args.type, + blob=parsed_args.data, + project=project, + ) class ShowCredential(command.ShowOne): _description = _("Display credential details") def get_parser(self, prog_name): - parser = super(ShowCredential, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'credential', metavar='', @@ -219,9 +260,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - credential = utils.find_resource(identity_client.credentials, - parsed_args.credential) + identity_client = self.app.client_manager.sdk_connection.identity + credential = identity_client.get_credential(parsed_args.credential) - credential._info.pop('links') - return zip(*sorted(credential._info.items())) + return _format_credential(credential) diff --git a/openstackclient/identity/v3/domain.py b/openstackclient/identity/v3/domain.py index e0bd10202a..28481c00d6 100644 --- a/openstackclient/identity/v3/domain.py +++ b/openstackclient/identity/v3/domain.py @@ -17,11 +17,11 @@ import logging -from keystoneauth1 import exceptions as ks_exc -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -29,11 +29,36 @@ LOG = logging.getLogger(__name__) +def _format_domain(domain): + columns = ( + 'id', + 'name', + 'is_enabled', + 'description', + 'options', + ) + column_headers = ( + 'id', + 'name', + 'enabled', + 'description', + 'options', + ) + + return ( + column_headers, + utils.get_item_properties( + domain, + columns, + ), + ) + + class CreateDomain(command.ShowOne): _description = _("Create new domain") def get_parser(self, prog_name): - parser = super(CreateDomain, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -47,12 +72,15 @@ def get_parser(self, prog_name): enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( '--enable', + dest='is_enabled', action='store_true', + default=True, help=_('Enable domain (default)'), ) enable_group.add_argument( '--disable', - action='store_true', + dest='is_enabled', + action='store_false', help=_('Disable domain'), ) parser.add_argument( @@ -64,38 +92,36 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - - enabled = True - if parsed_args.disable: - enabled = False + identity_client = self.app.client_manager.sdk_connection.identity - options = common.get_immutable_options(parsed_args) + options = {} + if parsed_args.immutable is not None: + options['immutable'] = parsed_args.immutable try: - domain = identity_client.domains.create( + domain = identity_client.create_domain( name=parsed_args.name, description=parsed_args.description, options=options, - enabled=enabled, + is_enabled=parsed_args.is_enabled, ) - except ks_exc.Conflict: + except sdk_exceptions.ConflictException: if parsed_args.or_show: - domain = utils.find_resource(identity_client.domains, - parsed_args.name) + domain = identity_client.find_domain( + parsed_args.name, ignore_missing=False + ) LOG.info(_('Returning existing domain %s'), domain.name) else: raise - domain._info.pop('links') - return zip(*sorted(domain._info.items())) + return _format_domain(domain) class DeleteDomain(command.Command): _description = _("Delete domain(s)") def get_parser(self, prog_name): - parser = super(DeleteDomain, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'domain', metavar='', @@ -105,21 +131,28 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity result = 0 for i in parsed_args.domain: try: - domain = utils.find_resource(identity_client.domains, i) - identity_client.domains.delete(domain.id) + domain = identity_client.find_domain(i, ignore_missing=False) + identity_client.delete_domain(domain.id) except Exception as e: result += 1 - LOG.error(_("Failed to delete domain with name or " - "ID '%(domain)s': %(e)s"), {'domain': i, 'e': e}) + LOG.error( + _( + "Failed to delete domain with name or " + "ID '%(domain)s': %(e)s" + ), + {'domain': i, 'e': e}, + ) if result > 0: total = len(parsed_args.domain) - msg = (_("%(result)s of %(total)s domains failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s domains failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -127,7 +160,7 @@ class ListDomain(command.Lister): _description = _("List domains") def get_parser(self, prog_name): - parser = super(ListDomain, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--name', metavar='', @@ -135,7 +168,7 @@ def get_parser(self, prog_name): ) parser.add_argument( '--enabled', - dest='enabled', + dest='is_enabled', action='store_true', help=_('The domains that are enabled will be returned'), ) @@ -145,23 +178,33 @@ def take_action(self, parsed_args): kwargs = {} if parsed_args.name: kwargs['name'] = parsed_args.name - if parsed_args.enabled: - kwargs['enabled'] = True - - columns = ('ID', 'Name', 'Enabled', 'Description') - data = self.app.client_manager.identity.domains.list(**kwargs) - return (columns, - (utils.get_item_properties( - s, columns, + if parsed_args.is_enabled: + kwargs['is_enabled'] = True + + columns = ('id', 'name', 'is_enabled', 'description') + column_headers = ('ID', 'Name', 'Enabled', 'Description') + data = self.app.client_manager.sdk_connection.identity.domains( + **kwargs + ) + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetDomain(command.Command): _description = _("Set domain properties") def get_parser(self, prog_name): - parser = super(SetDomain, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'domain', metavar='', @@ -180,44 +223,44 @@ def get_parser(self, prog_name): enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( '--enable', + dest='is_enabled', action='store_true', + default=None, help=_('Enable domain'), ) enable_group.add_argument( '--disable', - action='store_true', + dest='is_enabled', + action='store_false', + default=None, help=_('Disable domain'), ) common.add_resource_option_to_parser(parser) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - domain = utils.find_resource(identity_client.domains, - parsed_args.domain) + identity_client = self.app.client_manager.sdk_connection.identity + domain = identity_client.find_domain( + parsed_args.domain, ignore_missing=False + ) kwargs = {} if parsed_args.name: kwargs['name'] = parsed_args.name if parsed_args.description: kwargs['description'] = parsed_args.description + if parsed_args.is_enabled is not None: + kwargs['is_enabled'] = parsed_args.is_enabled + if parsed_args.immutable is not None: + kwargs['options'] = {'immutable': parsed_args.immutable} - if parsed_args.enable: - kwargs['enabled'] = True - if parsed_args.disable: - kwargs['enabled'] = False - - options = common.get_immutable_options(parsed_args) - if options: - kwargs['options'] = options - - identity_client.domains.update(domain.id, **kwargs) + identity_client.update_domain(domain.id, **kwargs) class ShowDomain(command.ShowOne): _description = _("Display domain details") def get_parser(self, prog_name): - parser = super(ShowDomain, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'domain', metavar='', @@ -226,13 +269,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - - domain_str = common._get_token_resource(identity_client, 'domain', - parsed_args.domain) - - domain = utils.find_resource(identity_client.domains, - domain_str) + identity_client = self.app.client_manager.sdk_connection.identity + domain = identity_client.find_domain( + parsed_args.domain, ignore_missing=False + ) - domain._info.pop('links') - return zip(*sorted(domain._info.items())) + return _format_domain(domain) diff --git a/openstackclient/identity/v3/ec2creds.py b/openstackclient/identity/v3/ec2creds.py index 921b9168b0..dbbf7a2474 100644 --- a/openstackclient/identity/v3/ec2creds.py +++ b/openstackclient/identity/v3/ec2creds.py @@ -14,10 +14,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -36,17 +36,20 @@ def _determine_ec2_user(parsed_args, client_manager): user_domain = None if parsed_args.user_domain: - user_domain = common.find_domain(client_manager.identity, - parsed_args.user_domain) + user_domain = common.find_domain( + client_manager.identity, parsed_args.user_domain + ) if parsed_args.user: if user_domain is not None: - user = utils.find_resource(client_manager.identity.users, - parsed_args.user, - domain_id=user_domain.id).id - else: user = utils.find_resource( client_manager.identity.users, - parsed_args.user).id + parsed_args.user, + domain_id=user_domain.id, + ).id + else: + user = utils.find_resource( + client_manager.identity.users, parsed_args.user + ).id else: # Get the user from the current auth user = client_manager.auth_ref.user_id @@ -57,18 +60,22 @@ class CreateEC2Creds(command.ShowOne): _description = _("Create EC2 credentials") def get_parser(self, prog_name): - parser = super(CreateEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', - help=_('Create credentials in project ' - '(name or ID; default: current authenticated project)'), + help=_( + 'Create credentials in project ' + '(name or ID; default: current authenticated project)' + ), ) parser.add_argument( '--user', metavar='', - help=_('Create credentials for user ' - '(name or ID; default: current authenticated user)'), + help=_( + 'Create credentials for user ' + '(name or ID; default: current authenticated user)' + ), ) common.add_user_domain_option_to_parser(parser) common.add_project_domain_option_to_parser(parser) @@ -81,18 +88,21 @@ def take_action(self, parsed_args): project_domain = None if parsed_args.project_domain: - project_domain = common.find_domain(identity_client, - parsed_args.project_domain) + project_domain = common.find_domain( + identity_client, parsed_args.project_domain + ) if parsed_args.project: if project_domain is not None: - project = utils.find_resource(identity_client.projects, - parsed_args.project, - domain_id=project_domain.id).id - else: project = utils.find_resource( identity_client.projects, - parsed_args.project).id + parsed_args.project, + domain_id=project_domain.id, + ).id + else: + project = utils.find_resource( + identity_client.projects, parsed_args.project + ).id else: # Get the project from the current auth project = self.app.client_manager.auth_ref.project_id @@ -103,9 +113,7 @@ def take_action(self, parsed_args): info.update(creds._info) if 'tenant_id' in info: - info.update( - {'project_id': info.pop('tenant_id')} - ) + info.update({'project_id': info.pop('tenant_id')}) return zip(*sorted(info.items())) @@ -114,7 +122,7 @@ class DeleteEC2Creds(command.Command): _description = _("Delete EC2 credentials") def get_parser(self, prog_name): - parser = super(DeleteEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'access_key', metavar='', @@ -138,14 +146,20 @@ def take_action(self, parsed_args): client_manager.identity.ec2.delete(user, i) except Exception as e: result += 1 - LOG.error(_("Failed to delete EC2 credentials with " - "access key '%(access_key)s': %(e)s"), - {'access_key': i, 'e': e}) + LOG.error( + _( + "Failed to delete EC2 credentials with " + "access key '%(access_key)s': %(e)s" + ), + {'access_key': i, 'e': e}, + ) if result > 0: total = len(parsed_args.access_key) - msg = (_("%(result)s of %(total)s EC2 keys failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s EC2 keys failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -153,7 +167,7 @@ class ListEC2Creds(command.Lister): _description = _("List EC2 credentials") def get_parser(self, prog_name): - parser = super(ListEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--user', metavar='', @@ -170,18 +184,24 @@ def take_action(self, parsed_args): column_headers = ('Access', 'Secret', 'Project ID', 'User ID') data = client_manager.identity.ec2.list(user) - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class ShowEC2Creds(command.ShowOne): _description = _("Display EC2 credentials details") def get_parser(self, prog_name): - parser = super(ShowEC2Creds, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'access_key', metavar='', @@ -204,8 +224,6 @@ def take_action(self, parsed_args): info.update(creds._info) if 'tenant_id' in info: - info.update( - {'project_id': info.pop('tenant_id')} - ) + info.update({'project_id': info.pop('tenant_id')}) return zip(*sorted(info.items())) diff --git a/openstackclient/identity/v3/endpoint.py b/openstackclient/identity/v3/endpoint.py index a3bd2683ee..9083fdc708 100644 --- a/openstackclient/identity/v3/endpoint.py +++ b/openstackclient/identity/v3/endpoint.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -28,30 +28,49 @@ LOG = logging.getLogger(__name__) -def get_service_name(service): - if hasattr(service, 'name'): - return service.name - else: - return '' +def _format_endpoint(endpoint, service): + columns = ( + 'is_enabled', + 'id', + 'interface', + 'region_id', + 'region_id', + 'service_id', + 'url', + ) + column_headers = ( + 'enabled', + 'id', + 'interface', + 'region', + 'region_id', + 'service_id', + 'url', + 'service_name', + 'service_type', + ) + + data = utils.get_item_properties(endpoint, columns) + data += (getattr(service, 'name', ''), service.type) + return column_headers, data class AddProjectToEndpoint(command.Command): _description = _("Associate a project to an endpoint") def get_parser(self, prog_name): - parser = super( - AddProjectToEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoint', metavar='', - help=_('Endpoint to associate with ' - 'specified project (name or ID)'), + help=_( + 'Endpoint to associate with specified project (name or ID)' + ), ) parser.add_argument( 'project', metavar='', - help=_('Project to associate with ' - 'specified endpoint name or ID)'), + help=_('Project to associate with specified endpoint name or ID)'), ) common.add_project_domain_option_to_parser(parser) return parser @@ -59,23 +78,22 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.identity - endpoint = utils.find_resource(client.endpoints, - parsed_args.endpoint) + endpoint = utils.find_resource(client.endpoints, parsed_args.endpoint) - project = common.find_project(client, - parsed_args.project, - parsed_args.project_domain) + project = common.find_project( + client, parsed_args.project, parsed_args.project_domain + ) client.endpoint_filter.add_endpoint_to_project( - project=project.id, - endpoint=endpoint.id) + project=project.id, endpoint=endpoint.id + ) class CreateEndpoint(command.ShowOne): _description = _("Create new endpoint") def get_parser(self, prog_name): - parser = super(CreateEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -114,30 +132,30 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - service = common.find_service(identity_client, parsed_args.service) + identity_client = self.app.client_manager.sdk_connection.identity + service = common.find_service_sdk(identity_client, parsed_args.service) - endpoint = identity_client.endpoints.create( - service=service.id, - url=parsed_args.url, - interface=parsed_args.interface, - region=parsed_args.region, - enabled=parsed_args.enabled - ) + kwargs = {} + + kwargs['service_id'] = service.id + kwargs['url'] = parsed_args.url + kwargs['interface'] = parsed_args.interface + kwargs['is_enabled'] = parsed_args.enabled - info = {} - endpoint._info.pop('links') - info.update(endpoint._info) - info['service_name'] = get_service_name(service) - info['service_type'] = service.type - return zip(*sorted(info.items())) + if parsed_args.region: + region = identity_client.get_region(parsed_args.region) + kwargs['region_id'] = region.id + + endpoint = identity_client.create_endpoint(**kwargs) + + return _format_endpoint(endpoint, service=service) class DeleteEndpoint(command.Command): _description = _("Delete endpoint(s)") def get_parser(self, prog_name): - parser = super(DeleteEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoint', metavar='', @@ -147,22 +165,30 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity result = 0 for i in parsed_args.endpoint: try: - endpoint_id = utils.find_resource( - identity_client.endpoints, i).id - identity_client.endpoints.delete(endpoint_id) + endpoint_id = identity_client.find_endpoint( + i, ignore_missing=False + ).id + identity_client.delete_endpoint(endpoint_id) except Exception as e: result += 1 - LOG.error(_("Failed to delete endpoint with " - "ID '%(endpoint)s': %(e)s"), {'endpoint': i, 'e': e}) + LOG.error( + _( + "Failed to delete endpoint with " + "ID '%(endpoint)s': %(e)s" + ), + {'endpoint': i, 'e': e}, + ) if result > 0: total = len(parsed_args.endpoint) - msg = (_("%(result)s of %(total)s endpoints failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s endpoints failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -170,7 +196,7 @@ class ListEndpoint(command.Lister): _description = _("List endpoints") def get_parser(self, prog_name): - parser = super(ListEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--service', metavar='', @@ -202,77 +228,111 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity endpoint = None if parsed_args.endpoint: - endpoint = utils.find_resource(identity_client.endpoints, - parsed_args.endpoint) - project = None + endpoint = identity_client.find_endpoint( + parsed_args.endpoint, ignore_missing=False + ) + + project_domain_id = None + if parsed_args.project_domain: + project_domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.project_domain, + ) + + project_id = None if parsed_args.project: - project = common.find_project(identity_client, - parsed_args.project, - parsed_args.project_domain) + project_id = common._find_sdk_id( + identity_client.find_project, + name_or_id=common._get_token_resource( + identity_client, 'project', parsed_args.project + ), + domain_id=project_domain_id, + ) if endpoint: - columns = ('ID', 'Name') - data = ( - identity_client.endpoint_filter - .list_projects_for_endpoint(endpoint=endpoint.id) - ) + column_headers: tuple[str, ...] = ('ID', 'Name') + columns: tuple[str, ...] = ('id', 'name') + data = identity_client.endpoint_projects(endpoint=endpoint.id) else: - columns = ('ID', 'Region', 'Service Name', 'Service Type', - 'Enabled', 'Interface', 'URL') + column_headers = ( + 'ID', + 'Region', + 'Service Name', + 'Service Type', + 'Enabled', + 'Interface', + 'URL', + ) + columns = ( + 'id', + 'region_id', + 'service_name', + 'service_type', + 'is_enabled', + 'interface', + 'url', + ) kwargs = {} if parsed_args.service: - service = common.find_service(identity_client, - parsed_args.service) - kwargs['service'] = service.id + service = common.find_service_sdk( + identity_client, parsed_args.service + ) + kwargs['service_id'] = service.id if parsed_args.interface: kwargs['interface'] = parsed_args.interface if parsed_args.region: - kwargs['region'] = parsed_args.region + region = identity_client.get_region(parsed_args.region) + kwargs['region_id'] = region.id - if project: - data = ( - identity_client.endpoint_filter - .list_endpoints_for_project(project=project.id) + if project_id: + data = list( + identity_client.project_endpoints(project=project_id) ) else: - data = identity_client.endpoints.list(**kwargs) - - service_list = identity_client.services.list() + data = list(identity_client.endpoints(**kwargs)) for ep in data: - service = common.find_service_in_list(service_list, - ep.service_id) - ep.service_name = get_service_name(service) + service = identity_client.find_service( + ep.service_id, ignore_missing=False + ) + ep.service_name = getattr(service, 'name', '') ep.service_type = service.type - return (columns, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class RemoveProjectFromEndpoint(command.Command): _description = _("Dissociate a project from an endpoint") def get_parser(self, prog_name): - parser = super( - RemoveProjectFromEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoint', metavar='', - help=_('Endpoint to dissociate from ' - 'specified project (name or ID)'), + help=_( + 'Endpoint to dissociate from specified project (name or ID)' + ), ) parser.add_argument( 'project', metavar='', - help=_('Project to dissociate from ' - 'specified endpoint name or ID)'), + help=_( + 'Project to dissociate from specified endpoint name or ID)' + ), ) common.add_project_domain_option_to_parser(parser) return parser @@ -280,23 +340,22 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.identity - endpoint = utils.find_resource(client.endpoints, - parsed_args.endpoint) + endpoint = utils.find_resource(client.endpoints, parsed_args.endpoint) - project = common.find_project(client, - parsed_args.project, - parsed_args.project_domain) + project = common.find_project( + client, parsed_args.project, parsed_args.project_domain + ) client.endpoint_filter.delete_endpoint_from_project( - project=project.id, - endpoint=endpoint.id) + project=project.id, endpoint=endpoint.id + ) class SetEndpoint(command.Command): _description = _("Set endpoint properties") def get_parser(self, prog_name): - parser = super(SetEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoint', metavar='', @@ -339,27 +398,36 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - endpoint = utils.find_resource(identity_client.endpoints, - parsed_args.endpoint) + identity_client = self.app.client_manager.sdk_connection.identity + endpoint = identity_client.find_endpoint( + parsed_args.endpoint, ignore_missing=False + ) + + kwargs = {} - service_id = None if parsed_args.service: - service = common.find_service(identity_client, parsed_args.service) - service_id = service.id - enabled = None + service = common.find_service_sdk( + identity_client, parsed_args.service + ) + kwargs['service_id'] = service.id + if parsed_args.enabled: - enabled = True + kwargs['is_enabled'] = True if parsed_args.disabled: - enabled = False + kwargs['is_enabled'] = False + + if parsed_args.url: + kwargs['url'] = parsed_args.url + + if parsed_args.interface: + kwargs['interface'] = parsed_args.interface - identity_client.endpoints.update( + if parsed_args.region: + kwargs['region_id'] = parsed_args.region + + identity_client.update_endpoint( endpoint.id, - service=service_id, - url=parsed_args.url, - interface=parsed_args.interface, - region=parsed_args.region, - enabled=enabled + **kwargs, ) @@ -367,25 +435,23 @@ class ShowEndpoint(command.ShowOne): _description = _("Display endpoint details") def get_parser(self, prog_name): - parser = super(ShowEndpoint, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpoint', metavar='', - help=_('Endpoint to display (endpoint ID, service ID,' - ' service name, service type)'), + help=_( + 'Endpoint to display (endpoint ID, service ID,' + ' service name, service type)' + ), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - endpoint = utils.find_resource(identity_client.endpoints, - parsed_args.endpoint) - - service = common.find_service(identity_client, endpoint.service_id) - - info = {} - endpoint._info.pop('links') - info.update(endpoint._info) - info['service_name'] = get_service_name(service) - info['service_type'] = service.type - return zip(*sorted(info.items())) + identity_client = self.app.client_manager.sdk_connection.identity + endpoint = identity_client.find_endpoint( + parsed_args.endpoint, ignore_missing=False + ) + + service = common.find_service_sdk(identity_client, endpoint.service_id) + + return _format_endpoint(endpoint, service) diff --git a/openstackclient/identity/v3/endpoint_group.py b/openstackclient/identity/v3/endpoint_group.py index 9bb026a9b8..3965f31978 100644 --- a/openstackclient/identity/v3/endpoint_group.py +++ b/openstackclient/identity/v3/endpoint_group.py @@ -16,10 +16,10 @@ import json import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__) -class _FiltersReader(object): +class _FiltersReader: _description = _("Helper class capable of reading filters from files") def _read_filters(self, path): @@ -50,8 +50,10 @@ def _read_filters(self, path): try: rules = json.loads(blob) except ValueError as e: - msg = _("An error occurred when reading filters from file " - "%(path)s: %(error)s") % {"path": path, "error": e} + msg = _( + "An error occurred when reading filters from file " + "%(path)s: %(error)s" + ) % {"path": path, "error": e} raise exceptions.CommandError(msg) else: return rules @@ -61,8 +63,7 @@ class AddProjectToEndpointGroup(command.Command): _description = _("Add a project to an endpoint group") def get_parser(self, prog_name): - parser = super( - AddProjectToEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpointgroup', metavar='', @@ -79,23 +80,24 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.identity - endpointgroup = utils.find_resource(client.endpoint_groups, - parsed_args.endpointgroup) + endpointgroup = utils.find_resource( + client.endpoint_groups, parsed_args.endpointgroup + ) - project = common.find_project(client, - parsed_args.project, - parsed_args.project_domain) + project = common.find_project( + client, parsed_args.project, parsed_args.project_domain + ) client.endpoint_filter.add_endpoint_group_to_project( - endpoint_group=endpointgroup.id, - project=project.id) + endpoint_group=endpointgroup.id, project=project.id + ) class CreateEndpointGroup(command.ShowOne, _FiltersReader): _description = _("Create new endpoint group") def get_parser(self, prog_name): - parser = super(CreateEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -122,7 +124,7 @@ def take_action(self, parsed_args): endpoint_group = identity_client.endpoint_groups.create( name=parsed_args.name, filters=filters, - description=parsed_args.description + description=parsed_args.description, ) info = {} @@ -135,7 +137,7 @@ class DeleteEndpointGroup(command.Command): _description = _("Delete endpoint group(s)") def get_parser(self, prog_name): - parser = super(DeleteEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpointgroup', metavar='', @@ -150,18 +152,24 @@ def take_action(self, parsed_args): for i in parsed_args.endpointgroup: try: endpoint_id = utils.find_resource( - identity_client.endpoint_groups, i).id + identity_client.endpoint_groups, i + ).id identity_client.endpoint_groups.delete(endpoint_id) except Exception as e: result += 1 - LOG.error(_("Failed to delete endpoint group with " - "ID '%(endpointgroup)s': %(e)s"), - {'endpointgroup': i, 'e': e}) + LOG.error( + _( + "Failed to delete endpoint group with " + "ID '%(endpointgroup)s': %(e)s" + ), + {'endpointgroup': i, 'e': e}, + ) if result > 0: total = len(parsed_args.endpointgroup) - msg = (_("%(result)s of %(total)s endpointgroups failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s endpointgroups failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -169,7 +177,7 @@ class ListEndpointGroup(command.Lister): _description = _("List endpoint groups") def get_parser(self, prog_name): - parser = super(ListEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) list_group = parser.add_mutually_exclusive_group() list_group.add_argument( '--endpointgroup', @@ -193,40 +201,48 @@ def take_action(self, parsed_args): endpointgroup = None if parsed_args.endpointgroup: - endpointgroup = utils.find_resource(client.endpoint_groups, - parsed_args.endpointgroup) + endpointgroup = utils.find_resource( + client.endpoint_groups, parsed_args.endpointgroup + ) project = None if parsed_args.project: - project = common.find_project(client, - parsed_args.project, - parsed_args.domain) + project = common.find_project( + client, parsed_args.project, parsed_args.domain + ) if endpointgroup: # List projects associated to the endpoint group columns = ('ID', 'Name', 'Description') data = client.endpoint_filter.list_projects_for_endpoint_group( - endpoint_group=endpointgroup.id) + endpoint_group=endpointgroup.id + ) elif project: columns = ('ID', 'Name', 'Description') data = client.endpoint_filter.list_endpoint_groups_for_project( - project=project.id) + project=project.id + ) else: columns = ('ID', 'Name', 'Description') data = client.endpoint_groups.list() - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class RemoveProjectFromEndpointGroup(command.Command): _description = _("Remove project from endpoint group") def get_parser(self, prog_name): - parser = super( - RemoveProjectFromEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpointgroup', metavar='', @@ -243,23 +259,24 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.identity - endpointgroup = utils.find_resource(client.endpoint_groups, - parsed_args.endpointgroup) + endpointgroup = utils.find_resource( + client.endpoint_groups, parsed_args.endpointgroup + ) - project = common.find_project(client, - parsed_args.project, - parsed_args.project_domain) + project = common.find_project( + client, parsed_args.project, parsed_args.project_domain + ) client.endpoint_filter.delete_endpoint_group_from_project( - endpoint_group=endpointgroup.id, - project=project.id) + endpoint_group=endpointgroup.id, project=project.id + ) class SetEndpointGroup(command.Command, _FiltersReader): _description = _("Set endpoint group properties") def get_parser(self, prog_name): - parser = super(SetEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpointgroup', metavar='', @@ -285,8 +302,9 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity - endpointgroup = utils.find_resource(identity_client.endpoint_groups, - parsed_args.endpointgroup) + endpointgroup = utils.find_resource( + identity_client.endpoint_groups, parsed_args.endpointgroup + ) filters = None if parsed_args.filters: @@ -296,7 +314,7 @@ def take_action(self, parsed_args): endpointgroup.id, name=parsed_args.name, filters=filters, - description=parsed_args.description + description=parsed_args.description, ) @@ -304,7 +322,7 @@ class ShowEndpointGroup(command.ShowOne): _description = _("Display endpoint group details") def get_parser(self, prog_name): - parser = super(ShowEndpointGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'endpointgroup', metavar='', @@ -314,8 +332,9 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity - endpoint_group = utils.find_resource(identity_client.endpoint_groups, - parsed_args.endpointgroup) + endpoint_group = utils.find_resource( + identity_client.endpoint_groups, parsed_args.endpointgroup + ) info = {} endpoint_group._info.pop('links') diff --git a/openstackclient/identity/v3/federation_protocol.py b/openstackclient/identity/v3/federation_protocol.py index 0929469e7e..850ec0ac7f 100644 --- a/openstackclient/identity/v3/federation_protocol.py +++ b/openstackclient/identity/v3/federation_protocol.py @@ -16,10 +16,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -30,19 +30,23 @@ class CreateProtocol(command.ShowOne): _description = _("Create new federation protocol") def get_parser(self, prog_name): - parser = super(CreateProtocol, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'federation_protocol', metavar='', - help=_('New federation protocol name (must be unique ' - 'per identity provider)'), + help=_( + 'New federation protocol name (must be unique ' + 'per identity provider)' + ), ) parser.add_argument( '--identity-provider', metavar='', required=True, - help=_('Identity provider that will support the new federation ' - ' protocol (name or ID) (required)'), + help=_( + 'Identity provider that will support the new federation ' + ' protocol (name or ID) (required)' + ), ) parser.add_argument( '--mapping', @@ -58,7 +62,8 @@ def take_action(self, parsed_args): protocol = identity_client.federation.protocols.create( protocol_id=parsed_args.federation_protocol, identity_provider=parsed_args.identity_provider, - mapping=parsed_args.mapping) + mapping=parsed_args.mapping, + ) info = dict(protocol._info) # NOTE(marek-denis): Identity provider is not included in a response # from Keystone, however it should be listed to the user. Add it @@ -74,7 +79,7 @@ class DeleteProtocol(command.Command): _description = _("Delete federation protocol(s)") def get_parser(self, prog_name): - parser = super(DeleteProtocol, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'federation_protocol', metavar='', @@ -85,8 +90,10 @@ def get_parser(self, prog_name): '--identity-provider', metavar='', required=True, - help=_('Identity provider that supports ' - '(name or ID) (required)'), + help=_( + 'Identity provider that supports ' + '(name or ID) (required)' + ), ) return parser @@ -97,17 +104,24 @@ def take_action(self, parsed_args): for i in parsed_args.federation_protocol: try: identity_client.federation.protocols.delete( - parsed_args.identity_provider, i) + parsed_args.identity_provider, i + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete federation protocol " - "with name or ID '%(protocol)s': %(e)s"), - {'protocol': i, 'e': e}) + LOG.error( + _( + "Failed to delete federation protocol " + "with name or ID '%(protocol)s': %(e)s" + ), + {'protocol': i, 'e': e}, + ) if result > 0: total = len(parsed_args.federation_protocol) - msg = (_("%(result)s of %(total)s federation protocols failed" - " to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s federation protocols failed" + " to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -115,7 +129,7 @@ class ListProtocols(command.Lister): _description = _("List federation protocols") def get_parser(self, prog_name): - parser = super(ListProtocols, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--identity-provider', metavar='', @@ -129,11 +143,14 @@ def take_action(self, parsed_args): identity_client = self.app.client_manager.identity protocols = identity_client.federation.protocols.list( - parsed_args.identity_provider) + parsed_args.identity_provider + ) columns = ('id', 'mapping') response_attributes = ('id', 'mapping_id') - items = [utils.get_item_properties(s, response_attributes) - for s in protocols] + items = [ + utils.get_item_properties(s, response_attributes) + for s in protocols + ] return (columns, items) @@ -141,7 +158,7 @@ class SetProtocol(command.Command): _description = _("Set federation protocol properties") def get_parser(self, prog_name): - parser = super(SetProtocol, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'federation_protocol', metavar='', @@ -151,8 +168,10 @@ def get_parser(self, prog_name): '--identity-provider', metavar='', required=True, - help=_('Identity provider that supports ' - '(name or ID) (required)'), + help=_( + 'Identity provider that supports ' + '(name or ID) (required)' + ), ) parser.add_argument( '--mapping', @@ -165,8 +184,10 @@ def take_action(self, parsed_args): identity_client = self.app.client_manager.identity protocol = identity_client.federation.protocols.update( - parsed_args.identity_provider, parsed_args.federation_protocol, - parsed_args.mapping) + parsed_args.identity_provider, + parsed_args.federation_protocol, + parsed_args.mapping, + ) info = dict(protocol._info) # NOTE(marek-denis): Identity provider is not included in a response # from Keystone, however it should be listed to the user. Add it @@ -181,7 +202,7 @@ class ShowProtocol(command.ShowOne): _description = _("Display federation protocol details") def get_parser(self, prog_name): - parser = super(ShowProtocol, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'federation_protocol', metavar='', @@ -191,8 +212,10 @@ def get_parser(self, prog_name): '--identity-provider', metavar='', required=True, - help=_('Identity provider that supports ' - '(name or ID) (required)'), + help=_( + 'Identity provider that supports ' + '(name or ID) (required)' + ), ) return parser @@ -200,7 +223,8 @@ def take_action(self, parsed_args): identity_client = self.app.client_manager.identity protocol = identity_client.federation.protocols.get( - parsed_args.identity_provider, parsed_args.federation_protocol) + parsed_args.identity_provider, parsed_args.federation_protocol + ) info = dict(protocol._info) info['mapping'] = info.pop('mapping_id') info.pop('links', None) diff --git a/openstackclient/identity/v3/group.py b/openstackclient/identity/v3/group.py index 46c3142cdd..a2c2fd3367 100644 --- a/openstackclient/identity/v3/group.py +++ b/openstackclient/identity/v3/group.py @@ -17,11 +17,11 @@ import logging -from keystoneauth1 import exceptions as ks_exc -from osc_lib.command import command +from openstack import exceptions as sdk_exc from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -29,11 +29,30 @@ LOG = logging.getLogger(__name__) +def _format_group(group): + columns = ( + 'description', + 'domain_id', + 'id', + 'name', + ) + column_headers = ( + 'description', + 'domain_id', + 'id', + 'name', + ) + return ( + column_headers, + utils.get_item_properties(group, columns), + ) + + class AddUserToGroup(command.Command): _description = _("Add user to group") def get_parser(self, prog_name): - parser = super(AddUserToGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'group', metavar='', @@ -43,27 +62,29 @@ def get_parser(self, prog_name): 'user', metavar='', nargs='+', - help=_('User(s) to add to (name or ID) ' - '(repeat option to add multiple users)'), + help=_( + 'User(s) to add to (name or ID) ' + '(repeat option to add multiple users)' + ), ) common.add_group_domain_option_to_parser(parser) common.add_user_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - group_id = common.find_group(identity_client, - parsed_args.group, - parsed_args.group_domain).id + group_id = common.find_group_id_sdk( + identity_client, parsed_args.group, parsed_args.group_domain + ) result = 0 for i in parsed_args.user: try: - user_id = common.find_user(identity_client, - i, - parsed_args.user_domain).id - identity_client.users.add_to_group(user_id, group_id) + user_id = common.find_user_id_sdk( + identity_client, i, parsed_args.user_domain + ) + identity_client.add_user_to_group(user_id, group_id) except Exception as e: result += 1 msg = _("%(user)s not added to group %(group)s: %(e)s") % { @@ -74,8 +95,12 @@ def take_action(self, parsed_args): LOG.error(msg) if result > 0: total = len(parsed_args.user) - msg = (_("%(result)s of %(total)s users not added to group " - "%(group)s.")) % { + msg = ( + _( + "%(result)s of %(total)s users not added to group " + "%(group)s." + ) + ) % { 'result': result, 'total': total, 'group': parsed_args.group, @@ -87,7 +112,7 @@ class CheckUserInGroup(command.Command): _description = _("Check user membership in group") def get_parser(self, prog_name): - parser = super(CheckUserInGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'group', metavar='', @@ -103,39 +128,48 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - user_id = common.find_user(identity_client, - parsed_args.user, - parsed_args.user_domain).id - group_id = common.find_group(identity_client, - parsed_args.group, - parsed_args.group_domain).id + user_id = common.find_user_id_sdk( + identity_client, + parsed_args.user, + parsed_args.user_domain, + validate_actor_existence=False, + ) + group_id = common.find_group_id_sdk( + identity_client, + parsed_args.group, + parsed_args.group_domain, + validate_actor_existence=False, + ) + user_in_group = False try: - identity_client.users.check_in_group(user_id, group_id) - except ks_exc.http.HTTPClientError as e: - if e.http_status == 403 or e.http_status == 404: - msg = _("%(user)s not in group %(group)s\n") % { - 'user': parsed_args.user, - 'group': parsed_args.group, - } - self.app.stderr.write(msg) - else: - raise e - else: + user_in_group = identity_client.check_user_in_group( + user_id, group_id + ) + except sdk_exc.ForbiddenException: + # Assume False if forbidden + pass + if user_in_group: msg = _("%(user)s in group %(group)s\n") % { 'user': parsed_args.user, 'group': parsed_args.group, } self.app.stdout.write(msg) + else: + msg = _("%(user)s not in group %(group)s\n") % { + 'user': parsed_args.user, + 'group': parsed_args.group, + } + self.app.stderr.write(msg) class CreateGroup(command.ShowOne): _description = _("Create new group") def get_parser(self, prog_name): - parser = super(CreateGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -159,36 +193,45 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - domain = None + kwargs = {} + if parsed_args.name: + kwargs['name'] = parsed_args.name + if parsed_args.description: + kwargs['description'] = parsed_args.description if parsed_args.domain: - domain = common.find_domain(identity_client, - parsed_args.domain).id + kwargs['domain_id'] = common.find_domain_id_sdk( + identity_client, parsed_args.domain + ) try: - group = identity_client.groups.create( - name=parsed_args.name, - domain=domain, - description=parsed_args.description) - except ks_exc.Conflict: + group = identity_client.create_group(**kwargs) + except sdk_exc.ConflictException: if parsed_args.or_show: - group = utils.find_resource(identity_client.groups, - parsed_args.name, - domain_id=domain) + if parsed_args.domain: + group = identity_client.find_group( + parsed_args.name, + domain_id=parsed_args.domain, + ignore_missing=False, + ) + else: + group = identity_client.find_group( + parsed_args.name, + ignore_missing=False, + ) LOG.info(_('Returning existing group %s'), group.name) else: raise - group._info.pop('links') - return zip(*sorted(group._info.items())) + return _format_group(group) class DeleteGroup(command.Command): _description = _("Delete group(s)") def get_parser(self, prog_name): - parser = super(DeleteGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'groups', metavar='', @@ -203,25 +246,31 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity errors = 0 for group in parsed_args.groups: try: - group_obj = common.find_group(identity_client, - group, - parsed_args.domain) - identity_client.groups.delete(group_obj.id) + group_id = common.find_group_id_sdk( + identity_client, group, parsed_args.domain + ) + identity_client.delete_group(group_id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete group with " - "name or ID '%(group)s': %(e)s"), - {'group': group, 'e': e}) + LOG.error( + _( + "Failed to delete group with " + "name or ID '%(group)s': %(e)s" + ), + {'group': group, 'e': e}, + ) if errors > 0: total = len(parsed_args.groups) - msg = (_("%(errors)s of %(total)s groups failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s groups failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -229,7 +278,7 @@ class ListGroup(command.Lister): _description = _("List groups") def get_parser(self, prog_name): - parser = super(ListGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--domain', metavar='', @@ -250,38 +299,49 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity domain = None if parsed_args.domain: - domain = common.find_domain(identity_client, - parsed_args.domain).id + domain = common.find_domain_id_sdk( + identity_client, parsed_args.domain + ) + data = [] if parsed_args.user: - user = common.find_user( + user = common.find_user_id_sdk( identity_client, parsed_args.user, parsed_args.user_domain, - ).id + ) + if domain: + # NOTE(0weng): The API doesn't actually support filtering + # additionally by domain_id, so this doesn't really do + # anything. + data = identity_client.user_groups(user, domain_id=domain) + else: + data = identity_client.user_groups(user) else: - user = None + if domain: + data = identity_client.groups(domain_id=domain) + else: + data = identity_client.groups() # List groups + columns: tuple[str, ...] = ('ID', 'Name') if parsed_args.long: - columns = ('ID', 'Name', 'Domain ID', 'Description') - else: - columns = ('ID', 'Name') - data = identity_client.groups.list( - domain=domain, - user=user, - ) + columns += ('Domain ID', 'Description') return ( columns, - (utils.get_item_properties( - s, columns, - formatters={}, - ) for s in data) + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), ) @@ -289,7 +349,7 @@ class RemoveUserFromGroup(command.Command): _description = _("Remove user from group") def get_parser(self, prog_name): - parser = super(RemoveUserFromGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'group', metavar='', @@ -299,27 +359,29 @@ def get_parser(self, prog_name): 'user', metavar='', nargs='+', - help=_('User(s) to remove from (name or ID) ' - '(repeat option to remove multiple users)'), + help=_( + 'User(s) to remove from (name or ID) ' + '(repeat option to remove multiple users)' + ), ) common.add_group_domain_option_to_parser(parser) common.add_user_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - group_id = common.find_group(identity_client, - parsed_args.group, - parsed_args.group_domain).id + group_id = common.find_group_id_sdk( + identity_client, parsed_args.group, parsed_args.group_domain + ) result = 0 for i in parsed_args.user: try: - user_id = common.find_user(identity_client, - i, - parsed_args.user_domain).id - identity_client.users.remove_from_group(user_id, group_id) + user_id = common.find_user_id_sdk( + identity_client, i, parsed_args.user_domain + ) + identity_client.remove_user_from_group(user_id, group_id) except Exception as e: result += 1 msg = _("%(user)s not removed from group %(group)s: %(e)s") % { @@ -330,8 +392,12 @@ def take_action(self, parsed_args): LOG.error(msg) if result > 0: total = len(parsed_args.user) - msg = (_("%(result)s of %(total)s users not removed from group " - "%(group)s.")) % { + msg = ( + _( + "%(result)s of %(total)s users not removed from group " + "%(group)s." + ) + ) % { 'result': result, 'total': total, 'group': parsed_args.group, @@ -343,7 +409,7 @@ class SetGroup(command.Command): _description = _("Set group properties") def get_parser(self, prog_name): - parser = super(SetGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'group', metavar='', @@ -367,23 +433,24 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - group = common.find_group(identity_client, parsed_args.group, - parsed_args.domain) + identity_client = self.app.client_manager.sdk_connection.identity + group = common.find_group_id_sdk( + identity_client, parsed_args.group, parsed_args.domain + ) kwargs = {} if parsed_args.name: kwargs['name'] = parsed_args.name if parsed_args.description: kwargs['description'] = parsed_args.description - identity_client.groups.update(group.id, **kwargs) + identity_client.update_group(group, **kwargs) class ShowGroup(command.ShowOne): _description = _("Display group details") def get_parser(self, prog_name): - parser = super(ShowGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'group', metavar='', @@ -397,11 +464,18 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - group = common.find_group(identity_client, - parsed_args.group, - domain_name_or_id=parsed_args.domain) + if parsed_args.domain: + domain = common.find_domain_id_sdk( + identity_client, parsed_args.domain + ) + group = identity_client.find_group( + parsed_args.group, domain_id=domain, ignore_missing=False + ) + else: + group = identity_client.find_group( + parsed_args.group, ignore_missing=False + ) - group._info.pop('links') - return zip(*sorted(group._info.items())) + return _format_group(group) diff --git a/openstackclient/identity/v3/identity_provider.py b/openstackclient/identity/v3/identity_provider.py index 19a6214487..f1af03f05c 100644 --- a/openstackclient/identity/v3/identity_provider.py +++ b/openstackclient/identity/v3/identity_provider.py @@ -16,10 +16,10 @@ import logging from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -31,7 +31,7 @@ class CreateIdentityProvider(command.ShowOne): _description = _("Create new identity provider") def get_parser(self, prog_name): - parser = super(CreateIdentityProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'identity_provider_id', metavar='', @@ -41,15 +41,20 @@ def get_parser(self, prog_name): identity_remote_id_provider.add_argument( '--remote-id', metavar='', + dest='remote_ids', action='append', - help=_('Remote IDs to associate with the Identity Provider ' - '(repeat option to provide multiple values)'), + help=_( + 'Remote IDs to associate with the Identity Provider ' + '(repeat option to provide multiple values)' + ), ) identity_remote_id_provider.add_argument( '--remote-id-file', metavar='', - help=_('Name of a file that contains many remote IDs to associate ' - 'with the identity provider, one per line'), + help=_( + 'Name of a file that contains many remote IDs to associate ' + 'with the identity provider, one per line' + ), ) parser.add_argument( '--description', @@ -59,19 +64,23 @@ def get_parser(self, prog_name): parser.add_argument( '--domain', metavar='', - help=_('Domain to associate with the identity provider. If not ' - 'specified, a domain will be created automatically. ' - '(Name or ID)'), + help=_( + 'Domain to associate with the identity provider. If not ' + 'specified, a domain will be created automatically. ' + '(Name or ID)' + ), ) parser.add_argument( '--authorization-ttl', metavar='', type=int, - help=_('Time to keep the role assignments for users ' - 'authenticating via this identity provider. ' - 'When not provided, global default configured in the ' - 'Identity service will be used. ' - 'Available since Identity API version 3.14 (Ussuri).'), + help=_( + 'Time to keep the role assignments for users ' + 'authenticating via this identity provider. ' + 'When not provided, global default configured in the ' + 'Identity service will be used. ' + 'Available since Identity API version 3.14 (Ussuri).' + ), ) enable_identity_provider = parser.add_mutually_exclusive_group() enable_identity_provider.add_argument( @@ -91,27 +100,30 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity + remote_ids: list[str] | None = None if parsed_args.remote_id_file: file_content = utils.read_blob_file_contents( - parsed_args.remote_id_file) + parsed_args.remote_id_file + ) remote_ids = file_content.splitlines() remote_ids = list(map(str.strip, remote_ids)) - else: - remote_ids = (parsed_args.remote_id - if parsed_args.remote_id else None) + elif parsed_args.remote_ids: + remote_ids = parsed_args.remote_ids domain_id = None if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + domain_id = common.find_domain( + identity_client, parsed_args.domain + ).id # TODO(pas-ha) actually check for 3.14 microversion kwargs = {} auth_ttl = parsed_args.authorization_ttl if auth_ttl is not None: if auth_ttl < 0: - msg = (_("%(param)s must be positive integer or zero." - ) % {"param": "authorization-ttl"}) + msg = _("%(param)s must be positive integer or zero.") % { + "param": "authorization-ttl" + } raise exceptions.CommandError(msg) kwargs['authorization_ttl'] = auth_ttl @@ -121,11 +133,13 @@ def take_action(self, parsed_args): description=parsed_args.description, domain_id=domain_id, enabled=parsed_args.enabled, - **kwargs) + **kwargs, + ) idp._info.pop('links', None) - remote_ids = format_columns.ListColumn(idp._info.pop('remote_ids', [])) - idp._info['remote_ids'] = remote_ids + idp._info['remote_ids'] = format_columns.ListColumn( + idp._info.pop('remote_ids', []) + ) return zip(*sorted(idp._info.items())) @@ -133,7 +147,7 @@ class DeleteIdentityProvider(command.Command): _description = _("Delete identity provider(s)") def get_parser(self, prog_name): - parser = super(DeleteIdentityProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'identity_provider', metavar='', @@ -150,14 +164,19 @@ def take_action(self, parsed_args): identity_client.federation.identity_providers.delete(i) except Exception as e: result += 1 - LOG.error(_("Failed to delete identity providers with " - "name or ID '%(provider)s': %(e)s"), - {'provider': i, 'e': e}) + LOG.error( + _( + "Failed to delete identity providers with " + "name or ID '%(provider)s': %(e)s" + ), + {'provider': i, 'e': e}, + ) if result > 0: total = len(parsed_args.identity_provider) - msg = (_("%(result)s of %(total)s identity providers failed" - " to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s identity providers failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -165,7 +184,7 @@ class ListIdentityProvider(command.Lister): _description = _("List identity providers") def get_parser(self, prog_name): - parser = super(ListIdentityProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--id', metavar='', @@ -190,18 +209,24 @@ def take_action(self, parsed_args): kwargs['enabled'] = True data = identity_client.federation.identity_providers.list(**kwargs) - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetIdentityProvider(command.Command): _description = _("Set identity provider properties") def get_parser(self, prog_name): - parser = super(SetIdentityProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'identity_provider', metavar='', @@ -216,23 +241,30 @@ def get_parser(self, prog_name): identity_remote_id_provider.add_argument( '--remote-id', metavar='', + dest='remote_ids', action='append', - help=_('Remote IDs to associate with the Identity Provider ' - '(repeat option to provide multiple values)'), + help=_( + 'Remote IDs to associate with the Identity Provider ' + '(repeat option to provide multiple values)' + ), ) identity_remote_id_provider.add_argument( '--remote-id-file', metavar='', - help=_('Name of a file that contains many remote IDs to associate ' - 'with the identity provider, one per line'), + help=_( + 'Name of a file that contains many remote IDs to associate ' + 'with the identity provider, one per line' + ), ) parser.add_argument( '--authorization-ttl', metavar='', type=int, - help=_('Time to keep the role assignments for users ' - 'authenticating via this identity provider. ' - 'Available since Identity API version 3.14 (Ussuri).'), + help=_( + 'Time to keep the role assignments for users ' + 'authenticating via this identity provider. ' + 'Available since Identity API version 3.14 (Ussuri).' + ), ) enable_identity_provider = parser.add_mutually_exclusive_group() enable_identity_provider.add_argument( @@ -253,11 +285,12 @@ def take_action(self, parsed_args): # Always set remote_ids if either is passed in if parsed_args.remote_id_file: file_content = utils.read_blob_file_contents( - parsed_args.remote_id_file) + parsed_args.remote_id_file + ) remote_ids = file_content.splitlines() remote_ids = list(map(str.strip, remote_ids)) - elif parsed_args.remote_id: - remote_ids = parsed_args.remote_id + elif parsed_args.remote_ids: + remote_ids = parsed_args.remote_ids # Setup keyword args for the client kwargs = {} @@ -267,7 +300,7 @@ def take_action(self, parsed_args): kwargs['enabled'] = True if parsed_args.disable: kwargs['enabled'] = False - if parsed_args.remote_id_file or parsed_args.remote_id: + if parsed_args.remote_id_file or parsed_args.remote_ids: kwargs['remote_ids'] = remote_ids # TODO(pas-ha) actually check for 3.14 microversion @@ -279,14 +312,14 @@ def take_action(self, parsed_args): auth_ttl = parsed_args.authorization_ttl if auth_ttl is not None: if auth_ttl < 0: - msg = (_("%(param)s must be positive integer or zero." - ) % {"param": "authorization-ttl"}) + msg = _("%(param)s must be positive integer or zero.") % { + "param": "authorization-ttl" + } raise exceptions.CommandError(msg) kwargs['authorization_ttl'] = auth_ttl federation_client.identity_providers.update( - parsed_args.identity_provider, - **kwargs + parsed_args.identity_provider, **kwargs ) @@ -294,7 +327,7 @@ class ShowIdentityProvider(command.ShowOne): _description = _("Display identity provider details") def get_parser(self, prog_name): - parser = super(ShowIdentityProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'identity_provider', metavar='', @@ -307,7 +340,8 @@ def take_action(self, parsed_args): idp = utils.find_resource( identity_client.federation.identity_providers, parsed_args.identity_provider, - id=parsed_args.identity_provider) + id=parsed_args.identity_provider, + ) idp._info.pop('links', None) remote_ids = format_columns.ListColumn(idp._info.pop('remote_ids', [])) diff --git a/openstackclient/identity/v3/implied_role.py b/openstackclient/identity/v3/implied_role.py index 054f30285c..c1236ad019 100644 --- a/openstackclient/identity/v3/implied_role.py +++ b/openstackclient/identity/v3/implied_role.py @@ -17,8 +17,8 @@ import logging -from osc_lib.command import command +from openstackclient import command from openstackclient.i18n import _ @@ -48,11 +48,10 @@ def _get_role_ids(identity_client, parsed_args): class CreateImpliedRole(command.ShowOne): - _description = _("Creates an association between prior and implied roles") def get_parser(self, prog_name): - parser = super(CreateImpliedRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -69,20 +68,20 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity (prior_role_id, implied_role_id) = _get_role_ids( - identity_client, parsed_args) + identity_client, parsed_args + ) response = identity_client.inference_rules.create( - prior_role_id, implied_role_id) + prior_role_id, implied_role_id + ) response._info.pop('links', None) - return zip(*sorted([(k, v['id']) - for k, v in response._info.items()])) + return zip(*sorted([(k, v['id']) for k, v in response._info.items()])) class DeleteImpliedRole(command.Command): - _description = _("Deletes an association between prior and implied roles") def get_parser(self, prog_name): - parser = super(DeleteImpliedRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -99,29 +98,34 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity (prior_role_id, implied_role_id) = _get_role_ids( - identity_client, parsed_args) - identity_client.inference_rules.delete( - prior_role_id, implied_role_id) + identity_client, parsed_args + ) + identity_client.inference_rules.delete(prior_role_id, implied_role_id) class ListImpliedRole(command.Lister): - _description = _("List implied roles") - _COLUMNS = ['Prior Role ID', 'Prior Role Name', - 'Implied Role ID', 'Implied Role Name'] + _COLUMNS = [ + 'Prior Role ID', + 'Prior Role Name', + 'Implied Role ID', + 'Implied Role Name', + ] def get_parser(self, prog_name): - parser = super(ListImpliedRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) return parser def take_action(self, parsed_args): def _list_implied(response): for rule in response: for implies in rule.implies: - yield (rule.prior_role['id'], - rule.prior_role['name'], - implies['id'], - implies['name']) + yield ( + rule.prior_role['id'], + rule.prior_role['name'], + implies['id'], + implies['name'], + ) identity_client = self.app.client_manager.identity response = identity_client.inference_rules.list_inference_roles() diff --git a/openstackclient/identity/v3/limit.py b/openstackclient/identity/v3/limit.py index b155cbd863..15da04369f 100644 --- a/openstackclient/identity/v3/limit.py +++ b/openstackclient/identity/v3/limit.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as common_utils @@ -29,7 +29,7 @@ class CreateLimit(command.ShowOne): _description = _("Create a limit") def get_parser(self, prog_name): - parser = super(CreateLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--description', metavar='', @@ -77,8 +77,7 @@ def take_action(self, parsed_args): ) region = None if parsed_args.region: - val = getattr(parsed_args, 'region', None) - if 'None' not in val: + if 'None' not in parsed_args.region: # NOTE (vishakha): Due to bug #1799153 and for any another # related case where GET resource API does not support the # filter by name, osc_lib.utils.find_resource() method cannot @@ -90,6 +89,13 @@ def take_action(self, parsed_args): region = common_utils.get_resource( identity_client.regions, parsed_args.region ) + else: + self.log.warning( + _( + "Passing 'None' to indicate no region is deprecated. " + "Instead, don't pass --region." + ) + ) limit = identity_client.limits.create( project, @@ -97,7 +103,7 @@ def take_action(self, parsed_args): parsed_args.resource_name, parsed_args.resource_limit, description=parsed_args.description, - region=region + region=region, ) limit._info.pop('links', None) @@ -108,7 +114,7 @@ class ListLimit(command.Lister): _description = _("List limits") def get_parser(self, prog_name): - parser = super(ListLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--service', metavar='', @@ -142,11 +148,7 @@ def take_action(self, parsed_args): ) region = None if parsed_args.region: - region = utils.find_resource( - identity_client.regions, parsed_args.region - ) - val = getattr(parsed_args, 'region', None) - if 'None' not in val: + if 'None' not in parsed_args.region: # NOTE (vishakha): Due to bug #1799153 and for any another # related case where GET resource API does not support the # filter by name, osc_lib.utils.find_resource() method cannot @@ -158,6 +160,14 @@ def take_action(self, parsed_args): region = common_utils.get_resource( identity_client.regions, parsed_args.region ) + else: + self.log.warning( + _( + "Passing 'None' to indicate no region is deprecated. " + "Instead, don't pass --region." + ) + ) + project = None if parsed_args.project: project = utils.find_resource( @@ -168,12 +178,17 @@ def take_action(self, parsed_args): service=service, resource_name=parsed_args.resource_name, region=region, - project=project + project=project, ) columns = ( - 'ID', 'Project ID', 'Service ID', 'Resource Name', - 'Resource Limit', 'Description', 'Region ID' + 'ID', + 'Project ID', + 'Service ID', + 'Resource Name', + 'Resource Limit', + 'Description', + 'Region ID', ) return ( columns, @@ -185,7 +200,7 @@ class ShowLimit(command.ShowOne): _description = _("Display limit details") def get_parser(self, prog_name): - parser = super(ShowLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'limit_id', metavar='', @@ -204,7 +219,7 @@ class SetLimit(command.ShowOne): _description = _("Update information about a limit") def get_parser(self, prog_name): - parser = super(SetLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'limit_id', metavar='', @@ -230,7 +245,7 @@ def take_action(self, parsed_args): limit = identity_client.limits.update( parsed_args.limit_id, description=parsed_args.description, - resource_limit=parsed_args.resource_limit + resource_limit=parsed_args.resource_limit, ) limit._info.pop('links', None) @@ -242,7 +257,7 @@ class DeleteLimit(command.Command): _description = _("Delete a limit") def get_parser(self, prog_name): - parser = super(DeleteLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'limit_id', metavar='', @@ -260,12 +275,15 @@ def take_action(self, parsed_args): identity_client.limits.delete(limit_id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete limit with ID " - "'%(id)s': %(e)s"), - {'id': limit_id, 'e': e}) + LOG.error( + _("Failed to delete limit with ID '%(id)s': %(e)s"), + {'id': limit_id, 'e': e}, + ) if errors > 0: total = len(parsed_args.limit_id) - msg = (_("%(errors)s of %(total)s limits failed to " - "delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s limits failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) diff --git a/openstackclient/identity/v3/mapping.py b/openstackclient/identity/v3/mapping.py index 7d40a2b7f9..a041f19e58 100644 --- a/openstackclient/identity/v3/mapping.py +++ b/openstackclient/identity/v3/mapping.py @@ -18,17 +18,17 @@ import json import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -class _RulesReader(object): +class _RulesReader: _description = _("Helper class capable of reading rules from files") def _read_rules(self, path): @@ -73,18 +73,35 @@ def _read_rules(self, path): try: rules = json.loads(blob) except ValueError as e: - msg = _("An error occurred when reading rules from file " - "%(path)s: %(error)s") % {"path": path, "error": e} + msg = _( + "An error occurred when reading rules from file " + "%(path)s: %(error)s" + ) % {"path": path, "error": e} raise exceptions.CommandError(msg) else: return rules + @staticmethod + def add_federated_schema_version_option(parser): + parser.add_argument( + '--schema-version', + metavar='', + required=False, + default=None, + help=_( + "The federated attribute mapping schema version. The " + "default value on the client side is 'None'; however, that " + "will lead the backend to set the default according to " + "'attribute_mapping_default_schema_version' option." + ), + ) + class CreateMapping(command.ShowOne, _RulesReader): _description = _("Create new mapping") def get_parser(self, prog_name): - parser = super(CreateMapping, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'mapping', metavar='', @@ -92,9 +109,11 @@ def get_parser(self, prog_name): ) parser.add_argument( '--rules', - metavar='', required=True, + metavar='', + required=True, help=_('Filename that contains a set of mapping rules (required)'), ) + _RulesReader.add_federated_schema_version_option(parser) return parser def take_action(self, parsed_args): @@ -103,7 +122,9 @@ def take_action(self, parsed_args): rules = self._read_rules(parsed_args.rules) mapping = identity_client.federation.mappings.create( mapping_id=parsed_args.mapping, - rules=rules) + rules=rules, + schema_version=parsed_args.schema_version, + ) mapping._info.pop('links', None) return zip(*sorted(mapping._info.items())) @@ -113,7 +134,7 @@ class DeleteMapping(command.Command): _description = _("Delete mapping(s)") def get_parser(self, prog_name): - parser = super(DeleteMapping, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'mapping', metavar='', @@ -130,13 +151,20 @@ def take_action(self, parsed_args): identity_client.federation.mappings.delete(i) except Exception as e: result += 1 - LOG.error(_("Failed to delete mapping with name or " - "ID '%(mapping)s': %(e)s"), {'mapping': i, 'e': e}) + LOG.error( + _( + "Failed to delete mapping with name or " + "ID '%(mapping)s': %(e)s" + ), + {'mapping': i, 'e': e}, + ) if result > 0: total = len(parsed_args.mapping) - msg = (_("%(result)s of %(total)s mappings failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s mappings failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -149,7 +177,7 @@ def take_action(self, parsed_args): # rules, (s)he should show specific ones. identity_client = self.app.client_manager.identity data = identity_client.federation.mappings.list() - columns = ('ID',) + columns = ('ID', 'schema_version') items = [utils.get_item_properties(s, columns) for s in data] return (columns, items) @@ -158,7 +186,7 @@ class SetMapping(command.Command, _RulesReader): _description = _("Set mapping properties") def get_parser(self, prog_name): - parser = super(SetMapping, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'mapping', metavar='', @@ -169,6 +197,8 @@ def get_parser(self, prog_name): metavar='', help=_('Filename that contains a new set of mapping rules'), ) + + _RulesReader.add_federated_schema_version_option(parser) return parser def take_action(self, parsed_args): @@ -178,7 +208,9 @@ def take_action(self, parsed_args): mapping = identity_client.federation.mappings.update( mapping=parsed_args.mapping, - rules=rules) + rules=rules, + schema_version=parsed_args.schema_version, + ) mapping._info.pop('links', None) @@ -187,7 +219,7 @@ class ShowMapping(command.ShowOne): _description = _("Display mapping details") def get_parser(self, prog_name): - parser = super(ShowMapping, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'mapping', metavar='', diff --git a/openstackclient/identity/v3/policy.py b/openstackclient/identity/v3/policy.py index 45674210f0..3554903952 100644 --- a/openstackclient/identity/v3/policy.py +++ b/openstackclient/identity/v3/policy.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -31,13 +31,15 @@ class CreatePolicy(command.ShowOne): _description = _("Create new policy") def get_parser(self, prog_name): - parser = super(CreatePolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--type', metavar='', default="application/json", - help=_('New MIME type of the policy rules file ' - '(defaults to application/json)'), + help=_( + 'New MIME type of the policy rules file ' + '(defaults to application/json)' + ), ) parser.add_argument( 'rules', @@ -63,7 +65,7 @@ class DeletePolicy(command.Command): _description = _("Delete policy(s)") def get_parser(self, prog_name): - parser = super(DeletePolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'policy', metavar='', @@ -80,13 +82,20 @@ def take_action(self, parsed_args): identity_client.policies.delete(i) except Exception as e: result += 1 - LOG.error(_("Failed to delete policy with name or " - "ID '%(policy)s': %(e)s"), {'policy': i, 'e': e}) + LOG.error( + _( + "Failed to delete policy with name or " + "ID '%(policy)s': %(e)s" + ), + {'policy': i, 'e': e}, + ) if result > 0: total = len(parsed_args.policy) - msg = (_("%(result)s of %(total)s policys failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s policies failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -94,7 +103,7 @@ class ListPolicy(command.Lister): _description = _("List policies") def get_parser(self, prog_name): - parser = super(ListPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -104,25 +113,30 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): + columns: tuple[str, ...] = ('ID', 'Type') + column_headers: tuple[str, ...] = columns if parsed_args.long: - columns = ('ID', 'Type', 'Blob') - column_headers = ('ID', 'Type', 'Rules') - else: - columns = ('ID', 'Type') - column_headers = columns + columns += ('Blob',) + column_headers += ('Rules',) data = self.app.client_manager.identity.policies.list() - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetPolicy(command.Command): _description = _("Set policy properties") def get_parser(self, prog_name): - parser = super(SetPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'policy', metavar='', @@ -160,7 +174,7 @@ class ShowPolicy(command.ShowOne): _description = _("Display policy details") def get_parser(self, prog_name): - parser = super(ShowPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'policy', metavar='', @@ -170,8 +184,9 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity - policy = utils.find_resource(identity_client.policies, - parsed_args.policy) + policy = utils.find_resource( + identity_client.policies, parsed_args.policy + ) policy._info.pop('links') policy._info.update({'rules': policy._info.pop('blob')}) diff --git a/openstackclient/identity/v3/project.py b/openstackclient/identity/v3/project.py index 5e8ce82981..e70a8a5011 100644 --- a/openstackclient/identity/v3/project.py +++ b/openstackclient/identity/v3/project.py @@ -19,10 +19,10 @@ from keystoneauth1 import exceptions as ks_exc from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common from openstackclient.identity.v3 import tag @@ -34,7 +34,7 @@ class CreateProject(command.ShowOne): _description = _("Create new project") def get_parser(self, prog_name): - parser = super(CreateProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -59,19 +59,26 @@ def get_parser(self, prog_name): enable_group.add_argument( '--enable', action='store_true', + dest='enabled', + default=True, help=_('Enable project'), ) enable_group.add_argument( '--disable', - action='store_true', + action='store_false', + dest='enabled', + default=True, help=_('Disable project'), ) parser.add_argument( '--property', metavar='', + dest='properties', action=parseractions.KeyValueAction, - help=_('Add a property to ' - '(repeat option to set multiple properties)'), + help=_( + 'Add a property to ' + '(repeat option to set multiple properties)' + ), ) parser.add_argument( '--or-show', @@ -87,8 +94,7 @@ def take_action(self, parsed_args): domain = None if parsed_args.domain: - domain = common.find_domain(identity_client, - parsed_args.domain).id + domain = common.find_domain(identity_client, parsed_args.domain).id parent = None if parsed_args.parent: @@ -97,15 +103,9 @@ def take_action(self, parsed_args): parsed_args.parent, ).id - enabled = True - if parsed_args.disable: - enabled = False - - options = common.get_immutable_options(parsed_args) - kwargs = {} - if parsed_args.property: - kwargs = parsed_args.property.copy() + if parsed_args.properties: + kwargs = parsed_args.properties.copy() if 'is_domain' in kwargs.keys(): if kwargs['is_domain'].lower() == "true": kwargs['is_domain'] = True @@ -116,21 +116,27 @@ def take_action(self, parsed_args): kwargs['tags'] = list(set(parsed_args.tags)) + options = {} + if parsed_args.immutable is not None: + options['immutable'] = parsed_args.immutable + try: project = identity_client.projects.create( name=parsed_args.name, domain=domain, parent=parent, description=parsed_args.description, - enabled=enabled, + enabled=parsed_args.enabled, options=options, - **kwargs + **kwargs, ) except ks_exc.Conflict: if parsed_args.or_show: - project = utils.find_resource(identity_client.projects, - parsed_args.name, - domain_id=domain) + project = utils.find_resource( + identity_client.projects, + parsed_args.name, + domain_id=domain, + ) LOG.info(_('Returning existing project %s'), project.name) else: raise @@ -140,10 +146,17 @@ def take_action(self, parsed_args): class DeleteProject(command.Command): - _description = _("Delete project(s)") + _description = _( + "Delete project(s). This command will remove specified " + "existing project(s) if an active user is authorized to do " + "this. If there are resources managed by other services " + "(for example, Nova, Neutron, Cinder) associated with " + "specified project(s), delete operation will proceed " + "regardless." + ) def get_parser(self, prog_name): - parser = super(DeleteProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'projects', metavar='', @@ -167,23 +180,30 @@ def take_action(self, parsed_args): for project in parsed_args.projects: try: if domain is not None: - project_obj = utils.find_resource(identity_client.projects, - project, - domain_id=domain.id) + project_obj = utils.find_resource( + identity_client.projects, project, domain_id=domain.id + ) else: - project_obj = utils.find_resource(identity_client.projects, - project) + project_obj = utils.find_resource( + identity_client.projects, project + ) identity_client.projects.delete(project_obj.id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete project with " - "name or ID '%(project)s': %(e)s"), - {'project': project, 'e': e}) + LOG.error( + _( + "Failed to delete project with " + "name or ID '%(project)s': %(e)s" + ), + {'project': project, 'e': e}, + ) if errors > 0: total = len(parsed_args.projects) - msg = (_("%(errors)s of %(total)s projects failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s projects failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -191,7 +211,7 @@ class ListProject(command.Lister): _description = _("List projects") def get_parser(self, prog_name): - parser = super(ListProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--domain', metavar='', @@ -210,8 +230,10 @@ def get_parser(self, prog_name): parser.add_argument( '--my-projects', action='store_true', - help=_('List projects for the authenticated user. ' - 'Supersedes other filters.'), + help=_( + 'List projects for the authenticated user. ' + 'Supersedes other filters.' + ), ) parser.add_argument( '--long', @@ -222,43 +244,66 @@ def get_parser(self, prog_name): parser.add_argument( '--sort', metavar='[:]', - help=_('Sort output by selected keys and directions (asc or desc) ' - '(default: asc), repeat this option to specify multiple ' - 'keys and directions.'), + help=_( + 'Sort output by selected keys and directions (asc or desc) ' + '(default: asc), repeat this option to specify multiple ' + 'keys and directions.' + ), + ) + parser.add_argument( + '--enabled', + action='store_true', + dest='is_enabled', + default=None, + help=_('List only enabled projects'), + ) + parser.add_argument( + '--disabled', + action='store_false', + dest='is_enabled', + default=None, + help=_('List only disabled projects'), ) tag.add_tag_filtering_option_to_parser(parser, _('projects')) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity + columns: tuple[str, ...] = ('ID', 'Name') if parsed_args.long: - columns = ('ID', 'Name', 'Domain ID', 'Description', 'Enabled') - else: - columns = ('ID', 'Name') + columns += ('Domain ID', 'Description', 'Enabled') kwargs = {} domain_id = None if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + domain_id = common.find_domain( + identity_client, parsed_args.domain + ).id kwargs['domain'] = domain_id if parsed_args.parent: - parent_id = common.find_project(identity_client, - parsed_args.parent).id + parent_id = common.find_project( + identity_client, parsed_args.parent + ).id kwargs['parent'] = parent_id if parsed_args.user: if parsed_args.domain: - user_id = utils.find_resource(identity_client.users, - parsed_args.user, - domain_id=domain_id).id + user_id = utils.find_resource( + identity_client.users, + parsed_args.user, + domain_id=domain_id, + ).id else: - user_id = utils.find_resource(identity_client.users, - parsed_args.user).id + user_id = utils.find_resource( + identity_client.users, parsed_args.user + ).id kwargs['user'] = user_id + if parsed_args.is_enabled is not None: + kwargs['is_enabled'] = parsed_args.is_enabled + tag.get_tag_filtering_args(parsed_args, kwargs) if parsed_args.my_projects: @@ -272,26 +317,31 @@ def take_action(self, parsed_args): # wanting their own project list. if not kwargs: user = self.app.client_manager.auth_ref.user_id - data = identity_client.projects.list( - user=user) + data = identity_client.projects.list(user=user) else: raise if parsed_args.sort: data = utils.sort_items(data, parsed_args.sort) - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetProject(command.Command): _description = _("Set project properties") def get_parser(self, prog_name): - parser = super(SetProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'project', metavar='', @@ -316,19 +366,26 @@ def get_parser(self, prog_name): enable_group.add_argument( '--enable', action='store_true', + dest='enabled', + default=None, help=_('Enable project'), ) enable_group.add_argument( '--disable', - action='store_true', + action='store_false', + dest='enabled', + default=None, help=_('Disable project'), ) parser.add_argument( '--property', metavar='', + dest='properties', action=parseractions.KeyValueAction, - help=_('Set a property on ' - '(repeat option to set multiple properties)'), + help=_( + 'Set a property on ' + '(repeat option to set multiple properties)' + ), ) common.add_resource_option_to_parser(parser) tag.add_tag_option_to_parser_for_set(parser, _('project')) @@ -337,24 +394,21 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity - project = common.find_project(identity_client, - parsed_args.project, - parsed_args.domain) + project = common.find_project( + identity_client, parsed_args.project, parsed_args.domain + ) kwargs = {} if parsed_args.name: kwargs['name'] = parsed_args.name if parsed_args.description: kwargs['description'] = parsed_args.description - if parsed_args.enable: - kwargs['enabled'] = True - if parsed_args.disable: - kwargs['enabled'] = False - options = common.get_immutable_options(parsed_args) - if options: - kwargs['options'] = options - if parsed_args.property: - kwargs.update(parsed_args.property) + if parsed_args.enabled is not None: + kwargs['enabled'] = parsed_args.enabled + if parsed_args.immutable is not None: + kwargs['options'] = {'immutable': parsed_args.immutable} + if parsed_args.properties: + kwargs.update(parsed_args.properties) tag.update_tags_in_args(parsed_args, project, kwargs) identity_client.projects.update(project.id, **kwargs) @@ -364,7 +418,7 @@ class ShowProject(command.ShowOne): _description = _("Display project details") def get_parser(self, prog_name): - parser = super(ShowProject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'project', metavar='', @@ -392,20 +446,19 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): identity_client = self.app.client_manager.identity - project_str = common._get_token_resource(identity_client, 'project', - parsed_args.project, - parsed_args.domain) + project_str = common._get_token_resource( + identity_client, 'project', parsed_args.project, parsed_args.domain + ) if parsed_args.domain: domain = common.find_domain(identity_client, parsed_args.domain) project = utils.find_resource( - identity_client.projects, - project_str, - domain_id=domain.id) + identity_client.projects, project_str, domain_id=domain.id + ) else: project = utils.find_resource( - identity_client.projects, - project_str) + identity_client.projects, project_str + ) if parsed_args.parents or parsed_args.children: # NOTE(RuiChen): utils.find_resource() can't pass kwargs, @@ -414,7 +467,8 @@ def take_action(self, parsed_args): project = identity_client.projects.get( project.id, parents_as_ids=parsed_args.parents, - subtree_as_ids=parsed_args.children) + subtree_as_ids=parsed_args.children, + ) project._info.pop('links') return zip(*sorted(project._info.items())) diff --git a/openstackclient/identity/v3/region.py b/openstackclient/identity/v3/region.py index 20ee073c3b..4882c9e9cb 100644 --- a/openstackclient/identity/v3/region.py +++ b/openstackclient/identity/v3/region.py @@ -15,21 +15,30 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) +def _format_region(region): + columns = ('id', 'description', 'parent_region_id') + column_headers = ('region', 'description', 'parent_region') + return ( + column_headers, + utils.get_item_properties(region, columns), + ) + + class CreateRegion(command.ShowOne): _description = _("Create new region") def get_parser(self, prog_name): - parser = super(CreateRegion, self).get_parser(prog_name) + parser = super().get_parser(prog_name) # NOTE(stevemar): The API supports an optional region ID, but that # seems like poor UX, we will only support user-defined IDs. parser.add_argument( @@ -50,25 +59,22 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - region = identity_client.regions.create( + region = identity_client.create_region( id=parsed_args.region, - parent_region=parsed_args.parent_region, + parent_region_id=parsed_args.parent_region, description=parsed_args.description, ) - region._info['region'] = region._info.pop('id') - region._info['parent_region'] = region._info.pop('parent_region_id') - region._info.pop('links', None) - return zip(*sorted(region._info.items())) + return _format_region(region) class DeleteRegion(command.Command): _description = _("Delete region(s)") def get_parser(self, prog_name): - parser = super(DeleteRegion, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'region', metavar='', @@ -78,20 +84,24 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity result = 0 for i in parsed_args.region: try: - identity_client.regions.delete(i) + identity_client.delete_region(i) except Exception as e: result += 1 - LOG.error(_("Failed to delete region with " - "ID '%(region)s': %(e)s"), {'region': i, 'e': e}) + LOG.error( + _("Failed to delete region with ID '%(region)s': %(e)s"), + {'region': i, 'e': e}, + ) if result > 0: total = len(parsed_args.region) - msg = (_("%(result)s of %(total)s regions failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s regions failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -99,7 +109,7 @@ class ListRegion(command.Lister): _description = _("List regions") def get_parser(self, prog_name): - parser = super(ListRegion, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--parent-region', metavar='', @@ -108,7 +118,7 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity kwargs = {} if parsed_args.parent_region: @@ -117,19 +127,25 @@ def take_action(self, parsed_args): columns_headers = ('Region', 'Parent Region', 'Description') columns = ('ID', 'Parent Region Id', 'Description') - data = identity_client.regions.list(**kwargs) - return (columns_headers, - (utils.get_item_properties( - s, columns, + data = identity_client.regions(**kwargs) + return ( + columns_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetRegion(command.Command): _description = _("Set region properties") def get_parser(self, prog_name): - parser = super(SetRegion, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'region', metavar='', @@ -148,22 +164,22 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity kwargs = {} if parsed_args.description: kwargs['description'] = parsed_args.description if parsed_args.parent_region: - kwargs['parent_region'] = parsed_args.parent_region + kwargs['parent_region_id'] = parsed_args.parent_region - identity_client.regions.update(parsed_args.region, **kwargs) + identity_client.update_region(parsed_args.region, **kwargs) class ShowRegion(command.ShowOne): _description = _("Display region details") def get_parser(self, prog_name): - parser = super(ShowRegion, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'region', metavar='', @@ -172,12 +188,8 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - region = utils.find_resource(identity_client.regions, - parsed_args.region) + region = identity_client.get_region(parsed_args.region) - region._info['region'] = region._info.pop('id') - region._info['parent_region'] = region._info.pop('parent_region_id') - region._info.pop('links', None) - return zip(*sorted(region._info.items())) + return _format_region(region) diff --git a/openstackclient/identity/v3/registered_limit.py b/openstackclient/identity/v3/registered_limit.py index 53117c71ae..e0afb4133f 100644 --- a/openstackclient/identity/v3/registered_limit.py +++ b/openstackclient/identity/v3/registered_limit.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as common_utils @@ -29,7 +29,7 @@ class CreateRegisteredLimit(command.ShowOne): _description = _("Create a registered limit") def get_parser(self, prog_name): - parser = super(CreateRegisteredLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--description', metavar='', @@ -44,7 +44,10 @@ def get_parser(self, prog_name): '--service', metavar='', required=True, - help=_('Service responsible for the resource to limit (required)'), + help=_( + 'Service responsible for the resource to limit (required) ' + '(name or ID)' + ), ) parser.add_argument( '--default-limit', @@ -68,8 +71,7 @@ def take_action(self, parsed_args): ) region = None if parsed_args.region: - val = getattr(parsed_args, 'region', None) - if 'None' not in val: + if 'None' not in parsed_args.region: # NOTE (vishakha): Due to bug #1799153 and for any another # related case where GET resource API does not support the # filter by name, osc_lib.utils.find_resource() method cannot @@ -81,13 +83,20 @@ def take_action(self, parsed_args): region = common_utils.get_resource( identity_client.regions, parsed_args.region ) + else: + self.log.warning( + _( + "Passing 'None' to indicate no region is deprecated. " + "Instead, don't pass --region." + ) + ) registered_limit = identity_client.registered_limits.create( service, parsed_args.resource_name, parsed_args.default_limit, description=parsed_args.description, - region=region + region=region, ) registered_limit._info.pop('links', None) @@ -98,12 +107,12 @@ class DeleteRegisteredLimit(command.Command): _description = _("Delete a registered limit") def get_parser(self, prog_name): - parser = super(DeleteRegisteredLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'registered_limit_id', - metavar='', + 'registered_limits', + metavar='', nargs="+", - help=_('Registered limit to delete (ID)'), + help=_('Registered limit(s) to delete (ID)'), ) return parser @@ -111,21 +120,27 @@ def take_action(self, parsed_args): identity_client = self.app.client_manager.identity errors = 0 - for registered_limit_id in parsed_args.registered_limit_id: + for registered_limit_id in parsed_args.registered_limits: try: identity_client.registered_limits.delete(registered_limit_id) except Exception as e: errors += 1 from pprint import pprint + pprint(type(e)) - LOG.error(_("Failed to delete registered limit with ID " - "'%(id)s': %(e)s"), - {'id': registered_limit_id, 'e': e}) + LOG.error( + _( + "Failed to delete registered limit with ID " + "'%(id)s': %(e)s" + ), + {'id': registered_limit_id, 'e': e}, + ) if errors > 0: - total = len(parsed_args.registered_limit_id) - msg = (_("%(errors)s of %(total)s registered limits failed to " - "delete.") % {'errors': errors, 'total': total}) + total = len(parsed_args.registered_limits) + msg = _( + "%(errors)s of %(total)s registered limits failed to delete." + ) % {'errors': errors, 'total': total} raise exceptions.CommandError(msg) @@ -133,11 +148,13 @@ class ListRegisteredLimit(command.Lister): _description = _("List registered limits") def get_parser(self, prog_name): - parser = super(ListRegisteredLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--service', metavar='', - help=_('Service responsible for the resource to limit'), + help=_( + 'Service responsible for the resource to limit (name or ID)' + ), ) parser.add_argument( '--resource-name', @@ -162,8 +179,7 @@ def take_action(self, parsed_args): ) region = None if parsed_args.region: - val = getattr(parsed_args, 'region', None) - if 'None' not in val: + if 'None' not in parsed_args.region: # NOTE (vishakha): Due to bug #1799153 and for any another # related case where GET resource API does not support the # filter by name, osc_lib.utils.find_resource() method cannot @@ -175,16 +191,27 @@ def take_action(self, parsed_args): region = common_utils.get_resource( identity_client.regions, parsed_args.region ) + else: + self.log.warning( + _( + "Passing 'None' to indicate no region is deprecated. " + "Instead, don't pass --region." + ) + ) registered_limits = identity_client.registered_limits.list( service=service, resource_name=parsed_args.resource_name, - region=region + region=region, ) columns = ( - 'ID', 'Service ID', 'Resource Name', 'Default Limit', - 'Description', 'Region ID' + 'ID', + 'Service ID', + 'Resource Name', + 'Default Limit', + 'Description', + 'Region ID', ) return ( columns, @@ -196,7 +223,7 @@ class SetRegisteredLimit(command.ShowOne): _description = _("Update information about a registered limit") def get_parser(self, prog_name): - parser = super(SetRegisteredLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'registered_limit_id', metavar='', @@ -205,18 +232,22 @@ def get_parser(self, prog_name): parser.add_argument( '--service', metavar='', - help=_('Service to be updated responsible for the resource to ' - 'limit. Either --service, --resource-name or --region must ' - 'be different than existing value otherwise it will be ' - 'duplicate entry') + help=_( + 'Service to be updated responsible for the resource to limit ' + '(name or ID). Either --service, --resource-name or --region ' + 'must be different than existing value otherwise it will be ' + 'duplicate entry' + ), ) parser.add_argument( '--resource-name', metavar='', - help=_('Resource to be updated responsible for the resource to ' - 'limit. Either --service, --resource-name or --region must ' - 'be different than existing value otherwise it will be ' - 'duplicate entry'), + help=_( + 'Resource to be updated responsible for the resource to ' + 'limit. Either --service, --resource-name or --region must ' + 'be different than existing value otherwise it will be ' + 'duplicate entry' + ), ) parser.add_argument( '--default-limit', @@ -232,10 +263,12 @@ def get_parser(self, prog_name): parser.add_argument( '--region', metavar='', - help=_('Region for the registered limit to affect. Either ' - '--service, --resource-name or --region must be ' - 'different than existing value otherwise it will be ' - 'duplicate entry'), + help=_( + 'Region for the registered limit to affect. Either ' + '--service, --resource-name or --region must be ' + 'different than existing value otherwise it will be ' + 'duplicate entry' + ), ) return parser @@ -250,8 +283,7 @@ def take_action(self, parsed_args): region = None if parsed_args.region: - val = getattr(parsed_args, 'region', None) - if 'None' not in val: + if 'None' not in parsed_args.region: # NOTE (vishakha): Due to bug #1799153 and for any another # related case where GET resource API does not support the # filter by name, osc_lib.utils.find_resource() method cannot @@ -263,6 +295,10 @@ def take_action(self, parsed_args): region = common_utils.get_resource( identity_client.regions, parsed_args.region ) + else: + self.log.warning( + _("Passing 'None' to indicate no region is deprecated.") + ) registered_limit = identity_client.registered_limits.update( parsed_args.registered_limit_id, @@ -270,7 +306,7 @@ def take_action(self, parsed_args): resource_name=parsed_args.resource_name, default_limit=parsed_args.default_limit, description=parsed_args.description, - region=region + region=region, ) registered_limit._info.pop('links', None) @@ -281,7 +317,7 @@ class ShowRegisteredLimit(command.ShowOne): _description = _("Display registered limit details") def get_parser(self, prog_name): - parser = super(ShowRegisteredLimit, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'registered_limit_id', metavar='', diff --git a/openstackclient/identity/v3/role.py b/openstackclient/identity/v3/role.py index a674564fe0..3c580d6f8b 100644 --- a/openstackclient/identity/v3/role.py +++ b/openstackclient/identity/v3/role.py @@ -17,11 +17,11 @@ import logging -from keystoneauth1 import exceptions as ks_exc -from osc_lib.command import command +from openstack import exceptions as sdk_exc from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -29,6 +29,25 @@ LOG = logging.getLogger(__name__) +def _format_role(role): + columns = ( + "id", + "name", + "domain_id", + "description", + ) + column_headers = ( + "id", + "name", + "domain_id", + "description", + ) + return ( + column_headers, + utils.get_item_properties(role, columns), + ) + + def _add_identity_and_resource_options_to_parser(parser): system_or_domain_or_project = parser.add_mutually_exclusive_group() system_or_domain_or_project.add_argument( @@ -63,33 +82,59 @@ def _add_identity_and_resource_options_to_parser(parser): common.add_inherited_option_to_parser(parser) -def _process_identity_and_resource_options(parsed_args, - identity_client_manager, - validate_actor_existence=True): - +def _process_identity_and_resource_options( + parsed_args, identity_client, validate_actor_existence=True +): def _find_user(): - try: - return common.find_user( - identity_client_manager, - parsed_args.user, - parsed_args.user_domain - ).id - except exceptions.CommandError: - if not validate_actor_existence: - return parsed_args.user - raise + domain_id = ( + common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.user_domain, + validate_actor_existence=validate_actor_existence, + ) + if parsed_args.user_domain + else None + ) + return common._find_sdk_id( + identity_client.find_user, + name_or_id=parsed_args.user, + validate_actor_existence=validate_actor_existence, + domain_id=domain_id, + ) def _find_group(): - try: - return common.find_group( - identity_client_manager, - parsed_args.group, - parsed_args.group_domain - ).id - except exceptions.CommandError: - if not validate_actor_existence: - return parsed_args.group - raise + domain_id = ( + common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.group_domain, + validate_actor_existence=validate_actor_existence, + ) + if parsed_args.group_domain + else None + ) + return common._find_sdk_id( + identity_client.find_group, + name_or_id=parsed_args.group, + validate_actor_existence=validate_actor_existence, + domain_id=domain_id, + ) + + def _find_project(): + domain_id = ( + common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.project_domain, + validate_actor_existence=validate_actor_existence, + ) + if parsed_args.project_domain + else None + ) + return common._find_sdk_id( + identity_client.find_project, + name_or_id=parsed_args.project, + validate_actor_existence=validate_actor_existence, + domain_id=domain_id, + ) kwargs = {} if parsed_args.user and parsed_args.system: @@ -97,43 +142,46 @@ def _find_group(): kwargs['system'] = parsed_args.system elif parsed_args.user and parsed_args.domain: kwargs['user'] = _find_user() - kwargs['domain'] = common.find_domain( - identity_client_manager, - parsed_args.domain, - ).id + kwargs['domain'] = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.domain, + validate_actor_existence=validate_actor_existence, + ) elif parsed_args.user and parsed_args.project: kwargs['user'] = _find_user() - kwargs['project'] = common.find_project( - identity_client_manager, - parsed_args.project, - parsed_args.project_domain, - ).id + kwargs['project'] = _find_project() elif parsed_args.group and parsed_args.system: kwargs['group'] = _find_group() kwargs['system'] = parsed_args.system elif parsed_args.group and parsed_args.domain: kwargs['group'] = _find_group() - kwargs['domain'] = common.find_domain( - identity_client_manager, - parsed_args.domain, - ).id + kwargs['domain'] = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.domain, + validate_actor_existence=validate_actor_existence, + ) elif parsed_args.group and parsed_args.project: kwargs['group'] = _find_group() - kwargs['project'] = common.find_project( - identity_client_manager, - parsed_args.project, - parsed_args.project_domain, - ).id - kwargs['os_inherit_extension_inherited'] = parsed_args.inherited + kwargs['project'] = _find_project() + else: + msg = _( + "Role not added, incorrect set of arguments " + "provided. See openstack --help for more details" + ) + raise exceptions.CommandError(msg) + + kwargs['inherited'] = parsed_args.inherited return kwargs class AddRole(command.Command): - _description = _("Adds a role assignment to a user or group on the " - "system, a domain, or a project") + _description = _( + "Adds a role assignment to a user or group on the " + "system, a domain, or a project" + ) def get_parser(self, prog_name): - parser = super(AddRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -144,35 +192,94 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - - if (not parsed_args.user and not parsed_args.domain and - not parsed_args.group and not parsed_args.project): - msg = _("Role not added, incorrect set of arguments " - "provided. See openstack --help for more details") + identity_client = self.app.client_manager.sdk_connection.identity + + if ( + not parsed_args.user + and not parsed_args.domain + and not parsed_args.group + and not parsed_args.project + ): + msg = _( + "Role not added, incorrect set of arguments " + "provided. See openstack --help for more details" + ) raise exceptions.CommandError(msg) domain_id = None if parsed_args.role_domain: - domain_id = common.find_domain(identity_client, - parsed_args.role_domain).id - role = utils.find_resource( - identity_client.roles, - parsed_args.role, - domain_id=domain_id + domain_id = common._find_sdk_id( + identity_client.find_domain, name_or_id=parsed_args.role_domain + ) + role = common._find_sdk_id( + identity_client.find_role, + name_or_id=parsed_args.role, + domain_id=domain_id, ) - kwargs = _process_identity_and_resource_options( - parsed_args, self.app.client_manager.identity) + add_kwargs = _process_identity_and_resource_options( + parsed_args, identity_client + ) - identity_client.roles.grant(role.id, **kwargs) + if add_kwargs.get("domain"): + if add_kwargs.get("user"): + identity_client.assign_domain_role_to_user( + domain=add_kwargs["domain"], + user=add_kwargs["user"], + role=role, + inherited=add_kwargs["inherited"], + ) + if add_kwargs.get("group"): + identity_client.assign_domain_role_to_group( + domain=add_kwargs["domain"], + group=add_kwargs["group"], + role=role, + inherited=add_kwargs["inherited"], + ) + elif add_kwargs.get("project"): + if add_kwargs.get("user"): + identity_client.assign_project_role_to_user( + project=add_kwargs["project"], + user=add_kwargs["user"], + role=role, + inherited=add_kwargs["inherited"], + ) + if add_kwargs.get("group"): + identity_client.assign_project_role_to_group( + project=add_kwargs["project"], + group=add_kwargs["group"], + role=role, + inherited=add_kwargs["inherited"], + ) + elif add_kwargs.get("system"): + if add_kwargs["inherited"]: + LOG.warning( + _( + "'--inherited' was given, which is not supported " + "when adding a system role. This will be an error " + "in a future release." + ) + ) + # TODO(0weng): This should be an error in a future release + if add_kwargs.get("user"): + identity_client.assign_system_role_to_user( + system=add_kwargs["system"], + user=add_kwargs["user"], + role=role, + ) + if add_kwargs.get("group"): + identity_client.assign_system_role_to_group( + system=add_kwargs["system"], + group=add_kwargs["group"], + role=role, + ) class CreateRole(command.ShowOne): _description = _("Create new role") def get_parser(self, prog_name): - parser = super(CreateRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -197,42 +304,49 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - domain_id = None + create_kwargs = {} if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + create_kwargs['domain_id'] = common._find_sdk_id( + identity_client.find_domain, name_or_id=parsed_args.domain + ) + + if parsed_args.name: + create_kwargs['name'] = parsed_args.name - options = common.get_immutable_options(parsed_args) + if parsed_args.description: + create_kwargs['description'] = parsed_args.description + + if parsed_args.immutable is not None: + create_kwargs['options'] = {"immutable": parsed_args.immutable} try: - role = identity_client.roles.create( - name=parsed_args.name, domain=domain_id, - description=parsed_args.description, options=options) + role = identity_client.create_role(**create_kwargs) - except ks_exc.Conflict: + except sdk_exc.ConflictException: if parsed_args.or_show: - role = utils.find_resource(identity_client.roles, - parsed_args.name, - domain_id=domain_id) + role = identity_client.find_role( + name_or_id=parsed_args.name, + domain_id=parsed_args.domain, + ignore_missing=False, + ) LOG.info(_('Returning existing role %s'), role.name) else: raise - role._info.pop('links') - return zip(*sorted(role._info.items())) + return _format_role(role) class DeleteRole(command.Command): _description = _("Delete role(s)") def get_parser(self, prog_name): - parser = super(DeleteRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'roles', metavar='', - nargs="+", + nargs='+', help=_('Role(s) to delete (name or ID)'), ) parser.add_argument( @@ -243,31 +357,38 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity domain_id = None if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + domain_id = common._find_sdk_id( + identity_client.find_domain, parsed_args.domain + ) errors = 0 for role in parsed_args.roles: try: - role_obj = utils.find_resource( - identity_client.roles, - role, - domain_id=domain_id + role_id = common._find_sdk_id( + identity_client.find_role, + name_or_id=role, + domain_id=domain_id, ) - identity_client.roles.delete(role_obj.id) + identity_client.delete_role(role=role_id, ignore_missing=False) except Exception as e: errors += 1 - LOG.error(_("Failed to delete role with " - "name or ID '%(role)s': %(e)s"), - {'role': role, 'e': e}) + LOG.error( + _( + "Failed to delete role with " + "name or ID '%(role)s': %(e)s" + ), + {'role': role, 'e': e}, + ) if errors > 0: total = len(parsed_args.roles) - msg = (_("%(errors)s of %(total)s roles failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s roles failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -275,7 +396,7 @@ class ListRole(command.Lister): _description = _("List roles") def get_parser(self, prog_name): - parser = super(ListRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--domain', metavar='', @@ -284,34 +405,38 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity if parsed_args.domain: - domain = common.find_domain( - identity_client, - parsed_args.domain, + domain = identity_client.find_domain( + name_or_id=parsed_args.domain, + ignore_missing=False, + ) + data = identity_client.roles(domain_id=domain.id) + return ( + ('ID', 'Name', 'Domain'), + ( + utils.get_item_properties(s, ('id', 'name')) + + (domain.name,) + for s in data + ), ) - columns = ('ID', 'Name', 'Domain') - data = identity_client.roles.list(domain_id=domain.id) - for role in data: - role.domain = domain.name - else: - columns = ('ID', 'Name') - data = identity_client.roles.list() - return (columns, - (utils.get_item_properties( - s, columns, - formatters={}, - ) for s in data)) + else: + data = identity_client.roles() + return ( + ('ID', 'Name'), + (utils.get_item_properties(s, ('id', 'name')) for s in data), + ) class RemoveRole(command.Command): - _description = _("Removes a role assignment from system/domain/project : " - "user/group") + _description = _( + "Removes a role assignment from system/domain/project : user/group" + ) def get_parser(self, prog_name): - parser = super(RemoveRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -323,36 +448,87 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - - if (not parsed_args.user and not parsed_args.domain and - not parsed_args.group and not parsed_args.project): - msg = _("Incorrect set of arguments provided. " - "See openstack --help for more details") + identity_client = self.app.client_manager.sdk_connection.identity + if ( + not parsed_args.user + and not parsed_args.domain + and not parsed_args.group + and not parsed_args.project + ): + msg = _( + "Incorrect set of arguments provided. " + "See openstack --help for more details" + ) raise exceptions.CommandError(msg) domain_id = None if parsed_args.role_domain: - domain_id = common.find_domain(identity_client, - parsed_args.role_domain).id - role = utils.find_resource( - identity_client.roles, - parsed_args.role, - domain_id=domain_id + domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.role_domain, + ) + role = common._find_sdk_id( + identity_client.find_role, + name_or_id=parsed_args.role, + domain_id=domain_id, ) - kwargs = _process_identity_and_resource_options( - parsed_args, self.app.client_manager.identity, - validate_actor_existence=False + remove_kwargs = _process_identity_and_resource_options( + parsed_args, + identity_client, + validate_actor_existence=False, ) - identity_client.roles.revoke(role.id, **kwargs) + + if remove_kwargs.get("domain"): + if remove_kwargs.get("user"): + identity_client.unassign_domain_role_from_user( + domain=remove_kwargs["domain"], + user=remove_kwargs["user"], + role=role, + inherited=remove_kwargs["inherited"], + ) + if remove_kwargs.get("group"): + identity_client.unassign_domain_role_from_group( + domain=remove_kwargs["domain"], + group=remove_kwargs["group"], + role=role, + inherited=remove_kwargs["inherited"], + ) + elif remove_kwargs.get("project"): + if remove_kwargs.get("user"): + identity_client.unassign_project_role_from_user( + project=remove_kwargs["project"], + user=remove_kwargs["user"], + role=role, + inherited=remove_kwargs["inherited"], + ) + if remove_kwargs.get("group"): + identity_client.unassign_project_role_from_group( + project=remove_kwargs["project"], + group=remove_kwargs["group"], + role=role, + inherited=remove_kwargs["inherited"], + ) + elif remove_kwargs.get("system"): + if remove_kwargs.get("user"): + identity_client.unassign_system_role_from_user( + system=remove_kwargs["system"], + user=remove_kwargs["user"], + role=role, + ) + if remove_kwargs.get("group"): + identity_client.unassign_system_role_from_group( + system=remove_kwargs["system"], + group=remove_kwargs["group"], + role=role, + ) class SetRole(command.Command): _description = _("Set role properties") def get_parser(self, prog_name): - parser = super(SetRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -377,28 +553,40 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity + + update_kwargs = {} + if parsed_args.description: + update_kwargs["description"] = parsed_args.description + if parsed_args.name: + update_kwargs["name"] = parsed_args.name domain_id = None if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.domain, + ) + update_kwargs["domain_id"] = domain_id - options = common.get_immutable_options(parsed_args) - role = utils.find_resource(identity_client.roles, - parsed_args.role, - domain_id=domain_id) + if parsed_args.immutable is not None: + update_kwargs["options"] = {"immutable": parsed_args.immutable} - identity_client.roles.update(role.id, name=parsed_args.name, - description=parsed_args.description, - options=options) + role = common._find_sdk_id( + identity_client.find_role, + name_or_id=parsed_args.role, + domain_id=domain_id, + ) + update_kwargs["role"] = role + + identity_client.update_role(**update_kwargs) class ShowRole(command.ShowOne): _description = _("Display role details") def get_parser(self, prog_name): - parser = super(ShowRole, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'role', metavar='', @@ -412,16 +600,19 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity domain_id = None if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.domain, + ) - role = utils.find_resource(identity_client.roles, - parsed_args.role, - domain_id=domain_id) + role = identity_client.find_role( + name_or_id=parsed_args.role, + domain_id=domain_id, + ignore_missing=False, + ) - role._info.pop('links') - return zip(*sorted(role._info.items())) + return _format_role(role) diff --git a/openstackclient/identity/v3/role_assignment.py b/openstackclient/identity/v3/role_assignment.py index 9c2f3d249e..78c010f0e1 100644 --- a/openstackclient/identity/v3/role_assignment.py +++ b/openstackclient/identity/v3/role_assignment.py @@ -13,22 +13,51 @@ """Identity v3 Assignment action implementations""" -from osc_lib.command import command -from osc_lib import utils - +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common +def _format_role_assignment_(assignment, include_names): + def _get_names(attr): + return ( + ( + attr['name'] + + ( + "@" + domain['name'] + if (domain := attr.get('domain')) + else '' + ) + ) + or '' + if attr + else '' + ) + + def _get_ids(attr): + return attr['id'] or '' if attr else '' + + func = _get_names if include_names else _get_ids + return ( + func(assignment.role), + func(assignment.user), + func(assignment.group), + func(assignment.scope.get('project')), + func(assignment.scope.get('domain')), + 'all' if assignment.scope.get("system") else '', + assignment.scope.get("OS-INHERIT:inherited_to") == 'projects', + ) + + class ListRoleAssignment(command.Lister): _description = _("List role assignments") def get_parser(self, prog_name): - parser = super(ListRoleAssignment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--effective', action="store_true", - default=False, + default=None, help=_('Returns only effective role assignments'), ) parser.add_argument( @@ -84,171 +113,127 @@ def get_parser(self, prog_name): action="store_true", dest='authproject', help='Only list assignments for the project to which the ' - 'authenticated user\'s token is scoped', + 'authenticated user\'s token is scoped', ) return parser - def _as_tuple(self, assignment): - return (assignment.role, assignment.user, assignment.group, - assignment.project, assignment.domain, assignment.system, - assignment.inherited) - def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity auth_ref = self.app.client_manager.auth_ref - role = None + role_id = None role_domain_id = None if parsed_args.role_domain: - role_domain_id = common.find_domain(identity_client, - parsed_args.role_domain).id + role_domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.role_domain, + ) if parsed_args.role: - role = utils.find_resource( - identity_client.roles, - parsed_args.role, - domain_id=role_domain_id + role_id = common._find_sdk_id( + identity_client.find_role, + name_or_id=parsed_args.role, + domain_id=role_domain_id, ) - user = None + user_domain_id = None + if parsed_args.user_domain: + user_domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.user_domain, + ) + + user_id = None if parsed_args.user: - user = common.find_user( - identity_client, - parsed_args.user, - parsed_args.user_domain, + user_id = common._find_sdk_id( + identity_client.find_user, + name_or_id=parsed_args.user, + domain_id=user_domain_id, ) elif parsed_args.authuser: if auth_ref: - user = common.find_user( - identity_client, - auth_ref.user_id + user_id = common._find_sdk_id( + identity_client.find_user, + name_or_id=auth_ref.user_id, ) system = None if parsed_args.system: system = parsed_args.system - domain = None + domain_id = None if parsed_args.domain: - domain = common.find_domain( - identity_client, - parsed_args.domain, + domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.domain, ) - project = None + project_domain_id = None + if parsed_args.project_domain: + project_domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.project_domain, + ) + + project_id = None if parsed_args.project: - project = common.find_project( - identity_client, - common._get_token_resource(identity_client, 'project', - parsed_args.project), - parsed_args.project_domain, + project_id = common._find_sdk_id( + identity_client.find_project, + name_or_id=common._get_token_resource( + identity_client, 'project', parsed_args.project + ), + domain_id=project_domain_id, ) elif parsed_args.authproject: if auth_ref: - project = common.find_project( - identity_client, - auth_ref.project_id + project_id = common._find_sdk_id( + identity_client.find_project, + name_or_id=auth_ref.project_id, ) - group = None + group_domain_id = None + if parsed_args.group_domain: + group_domain_id = common._find_sdk_id( + identity_client.find_domain, + name_or_id=parsed_args.group_domain, + ) + + group_id = None if parsed_args.group: - group = common.find_group( - identity_client, - parsed_args.group, - parsed_args.group_domain, + group_id = common._find_sdk_id( + identity_client.find_group, + name_or_id=parsed_args.group, + domain_id=group_domain_id, ) - include_names = True if parsed_args.names else False - effective = True if parsed_args.effective else False + include_names = True if parsed_args.names else None columns = ( - 'Role', 'User', 'Group', 'Project', 'Domain', 'System', 'Inherited' + 'Role', + 'User', + 'Group', + 'Project', + 'Domain', + 'System', + 'Inherited', ) inherited_to = 'projects' if parsed_args.inherited else None - data = identity_client.role_assignments.list( - domain=domain, - user=user, - group=group, - project=project, - system=system, - role=role, - effective=effective, - os_inherit_extension_inherited_to=inherited_to, - include_names=include_names) + + data = identity_client.role_assignments( + role_id=role_id, + user_id=user_id, + group_id=group_id, + scope_project_id=project_id, + scope_domain_id=domain_id, + scope_system=system, + effective=parsed_args.effective, + include_names=include_names, + inherited_to=inherited_to, + ) data_parsed = [] for assignment in data: - # Removing the extra "scope" layer in the assignment json - scope = assignment.scope - if 'project' in scope: - if include_names: - prj = '@'.join([scope['project']['name'], - scope['project']['domain']['name']]) - setattr(assignment, 'project', prj) - else: - setattr(assignment, 'project', scope['project']['id']) - assignment.domain = '' - assignment.system = '' - elif 'domain' in scope: - if include_names: - setattr(assignment, 'domain', scope['domain']['name']) - else: - setattr(assignment, 'domain', scope['domain']['id']) - assignment.project = '' - assignment.system = '' - elif 'system' in scope: - # NOTE(lbragstad): If, or when, keystone supports role - # assignments on subsets of a system, this will have to evolve - # to handle that case instead of hardcoding to the entire - # system. - setattr(assignment, 'system', 'all') - assignment.domain = '' - assignment.project = '' - else: - assignment.system = '' - assignment.domain = '' - assignment.project = '' - - inherited = scope.get('OS-INHERIT:inherited_to') == 'projects' - assignment.inherited = inherited - - del assignment.scope - - if hasattr(assignment, 'user'): - if include_names: - usr = '@'.join([assignment.user['name'], - assignment.user['domain']['name']]) - setattr(assignment, 'user', usr) - else: - setattr(assignment, 'user', assignment.user['id']) - assignment.group = '' - elif hasattr(assignment, 'group'): - if include_names: - grp = '@'.join([assignment.group['name'], - assignment.group['domain']['name']]) - setattr(assignment, 'group', grp) - else: - setattr(assignment, 'group', assignment.group['id']) - assignment.user = '' - else: - assignment.user = '' - assignment.group = '' - - if hasattr(assignment, 'role'): - if include_names: - # TODO(henry-nash): If this is a domain specific role it - # would be good show this as role@domain, although this - # domain info is not yet included in the response from the - # server. Although we could get it by re-reading the role - # from the ID, let's wait until the server does the right - # thing. - setattr(assignment, 'role', assignment.role['name']) - else: - setattr(assignment, 'role', assignment.role['id']) - else: - assignment.role = '' - - # Creating a tuple from data object fields - # (including the blank ones) - data_parsed.append(self._as_tuple(assignment)) + data_parsed.append( + _format_role_assignment_(assignment, include_names) + ) return columns, tuple(data_parsed) diff --git a/openstackclient/identity/v3/service.py b/openstackclient/identity/v3/service.py index 9dc6696251..53a706299e 100644 --- a/openstackclient/identity/v3/service.py +++ b/openstackclient/identity/v3/service.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -28,11 +28,36 @@ LOG = logging.getLogger(__name__) +def _format_service(service): + columns = ( + 'id', + 'name', + 'type', + 'is_enabled', + 'description', + ) + column_headers = ( + 'id', + 'name', + 'type', + 'enabled', + 'description', + ) + + return ( + column_headers, + utils.get_item_properties( + service, + columns, + ), + ) + + class CreateService(command.ShowOne): _description = _("Create new service") def get_parser(self, prog_name): - parser = super(CreateService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'type', metavar='', @@ -52,38 +77,37 @@ def get_parser(self, prog_name): enable_group.add_argument( '--enable', action='store_true', + dest='is_enabled', + default=True, help=_('Enable service (default)'), ) enable_group.add_argument( '--disable', - action='store_true', + action='store_false', + dest='is_enabled', + default=True, help=_('Disable service'), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - - enabled = True - if parsed_args.disable: - enabled = False + identity_client = self.app.client_manager.sdk_connection.identity - service = identity_client.services.create( + service = identity_client.create_service( name=parsed_args.name, type=parsed_args.type, description=parsed_args.description, - enabled=enabled, + is_enabled=parsed_args.is_enabled, ) - service._info.pop('links') - return zip(*sorted(service._info.items())) + return _format_service(service) class DeleteService(command.Command): _description = _("Delete service(s)") def get_parser(self, prog_name): - parser = super(DeleteService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -93,22 +117,28 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity result = 0 for i in parsed_args.service: try: - service = common.find_service(identity_client, i) - identity_client.services.delete(service.id) + service = common.find_service_sdk(identity_client, i) + identity_client.delete_service(service.id) except Exception as e: result += 1 - LOG.error(_("Failed to delete consumer with type, " - "name or ID '%(service)s': %(e)s"), - {'service': i, 'e': e}) + LOG.error( + _( + "Failed to delete consumer with type, " + "name or ID '%(service)s': %(e)s" + ), + {'service': i, 'e': e}, + ) if result > 0: total = len(parsed_args.service) - msg = (_("%(result)s of %(total)s services failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s services failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -116,7 +146,7 @@ class ListService(command.Lister): _description = _("List services") def get_parser(self, prog_name): - parser = super(ListService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -126,14 +156,18 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): + identity_client = self.app.client_manager.sdk_connection.identity + columns: tuple[str, ...] = ('id', 'name', 'type') + column_headers: tuple[str, ...] = ('ID', 'Name', 'Type') if parsed_args.long: - columns = ('ID', 'Name', 'Type', 'Description', 'Enabled') - else: - columns = ('ID', 'Name', 'Type') - data = self.app.client_manager.identity.services.list() + columns += ('description', 'is_enabled') + column_headers += ('Description', 'Enabled') + + data = identity_client.services() + return ( - columns, + column_headers, (utils.get_item_properties(s, columns) for s in data), ) @@ -142,7 +176,7 @@ class SetService(command.Command): _description = _("Set service properties") def get_parser(self, prog_name): - parser = super(SetService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -167,20 +201,23 @@ def get_parser(self, prog_name): enable_group.add_argument( '--enable', action='store_true', + dest='is_enabled', + default=None, help=_('Enable service'), ) enable_group.add_argument( '--disable', - action='store_true', + action='store_false', + dest='is_enabled', + default=None, help=_('Disable service'), ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - service = common.find_service(identity_client, - parsed_args.service) + service = common.find_service_sdk(identity_client, parsed_args.service) kwargs = {} if parsed_args.type: kwargs['type'] = parsed_args.type @@ -188,22 +225,17 @@ def take_action(self, parsed_args): kwargs['name'] = parsed_args.name if parsed_args.description: kwargs['description'] = parsed_args.description - if parsed_args.enable: - kwargs['enabled'] = True - if parsed_args.disable: - kwargs['enabled'] = False + if parsed_args.is_enabled is not None: + kwargs['is_enabled'] = parsed_args.is_enabled - identity_client.services.update( - service.id, - **kwargs - ) + identity_client.update_service(service.id, **kwargs) class ShowService(command.ShowOne): _description = _("Display service details") def get_parser(self, prog_name): - parser = super(ShowService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service', metavar='', @@ -212,9 +244,8 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - service = common.find_service(identity_client, parsed_args.service) + service = common.find_service_sdk(identity_client, parsed_args.service) - service._info.pop('links') - return zip(*sorted(service._info.items())) + return _format_service(service) diff --git a/openstackclient/identity/v3/service_provider.py b/openstackclient/identity/v3/service_provider.py index e106c787b4..02aae66bf8 100644 --- a/openstackclient/identity/v3/service_provider.py +++ b/openstackclient/identity/v3/service_provider.py @@ -15,21 +15,44 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) +def _format_service_provider(sp): + column_headers = ( + 'id', + 'enabled', + 'description', + 'auth_url', + 'sp_url', + 'relay_state_prefix', + ) + columns = ( + 'id', + 'is_enabled', + 'description', + 'auth_url', + 'sp_url', + 'relay_state_prefix', + ) + return ( + column_headers, + utils.get_item_properties(sp, columns), + ) + + class CreateServiceProvider(command.ShowOne): _description = _("Create new service provider") def get_parser(self, prog_name): - parser = super(CreateServiceProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service_provider_id', metavar='', @@ -39,8 +62,10 @@ def get_parser(self, prog_name): '--auth-url', metavar='', required=True, - help=_('Authentication URL of remote federated service provider ' - '(required)'), + help=_( + 'Authentication URL of remote federated service provider ' + '(required)' + ), ) parser.add_argument( '--description', @@ -51,21 +76,22 @@ def get_parser(self, prog_name): '--service-provider-url', metavar='', required=True, - help=_('A service URL where SAML assertions are being sent ' - '(required)'), + help=_( + 'A service URL where SAML assertions are being sent (required)' + ), ) enable_service_provider = parser.add_mutually_exclusive_group() enable_service_provider.add_argument( '--enable', - dest='enabled', + dest='is_enabled', action='store_true', default=True, help=_('Enable the service provider (default)'), ) enable_service_provider.add_argument( '--disable', - dest='enabled', + dest='is_enabled', action='store_false', help=_('Disable the service provider'), ) @@ -73,23 +99,34 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - service_client = self.app.client_manager.identity - sp = service_client.federation.service_providers.create( - id=parsed_args.service_provider_id, - auth_url=parsed_args.auth_url, - description=parsed_args.description, - enabled=parsed_args.enabled, - sp_url=parsed_args.service_provider_url) + service_client = self.app.client_manager.sdk_connection.identity + + kwargs = {} + + kwargs = {'id': parsed_args.service_provider_id} + + if parsed_args.is_enabled is not None: + kwargs['is_enabled'] = parsed_args.is_enabled + + if parsed_args.description: + kwargs['description'] = parsed_args.description - sp._info.pop('links', None) - return zip(*sorted(sp._info.items())) + if parsed_args.auth_url: + kwargs['auth_url'] = parsed_args.auth_url + + if parsed_args.service_provider_url: + kwargs['sp_url'] = parsed_args.service_provider_url + + sp = service_client.create_service_provider(**kwargs) + + return _format_service_provider(sp) class DeleteServiceProvider(command.Command): _description = _("Delete service provider(s)") def get_parser(self, prog_name): - parser = super(DeleteServiceProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service_provider', metavar='', @@ -99,21 +136,26 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - service_client = self.app.client_manager.identity + service_client = self.app.client_manager.sdk_connection.identity result = 0 for i in parsed_args.service_provider: try: - service_client.federation.service_providers.delete(i) + service_client.delete_service_provider(i) except Exception as e: result += 1 - LOG.error(_("Failed to delete service provider with " - "name or ID '%(provider)s': %(e)s"), - {'provider': i, 'e': e}) + LOG.error( + _( + "Failed to delete service provider with " + "name or ID '%(provider)s': %(e)s" + ), + {'provider': i, 'e': e}, + ) if result > 0: total = len(parsed_args.service_provider) - msg = (_("%(result)s of %(total)s service providers failed" - " to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s service providers failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -121,22 +163,36 @@ class ListServiceProvider(command.Lister): _description = _("List service providers") def take_action(self, parsed_args): - service_client = self.app.client_manager.identity - data = service_client.federation.service_providers.list() - - column_headers = ('ID', 'Enabled', 'Description', 'Auth URL') - return (column_headers, - (utils.get_item_properties( - s, column_headers, - formatters={}, - ) for s in data)) + service_client = self.app.client_manager.sdk_connection.identity + data = service_client.service_providers() + + column_headers = ( + 'ID', + 'Enabled', + 'Description', + 'Auth URL', + 'Service Provider URL', + 'Relay State Prefix', + ) + columns = ( + 'id', + 'is_enabled', + 'description', + 'auth_url', + 'sp_url', + 'relay_state_prefix', + ) + return ( + column_headers, + (utils.get_item_properties(s, columns) for s in data), + ) -class SetServiceProvider(command.Command): +class SetServiceProvider(command.ShowOne): _description = _("Set service provider properties") def get_parser(self, prog_name): - parser = super(SetServiceProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service_provider', metavar='', @@ -145,8 +201,9 @@ def get_parser(self, prog_name): parser.add_argument( '--auth-url', metavar='', - help=_('New Authentication URL of remote ' - 'federated service provider'), + help=_( + 'New Authentication URL of remote federated service provider' + ), ) parser.add_argument( @@ -162,39 +219,50 @@ def get_parser(self, prog_name): enable_service_provider = parser.add_mutually_exclusive_group() enable_service_provider.add_argument( '--enable', + dest='is_enabled', action='store_true', + default=None, help=_('Enable the service provider'), ) enable_service_provider.add_argument( '--disable', - action='store_true', + dest='is_enabled', + action='store_false', + default=None, help=_('Disable the service provider'), ) return parser def take_action(self, parsed_args): - federation_client = self.app.client_manager.identity.federation + service_client = self.app.client_manager.sdk_connection.identity + + kwargs = {} - enabled = None - if parsed_args.enable is True: - enabled = True - elif parsed_args.disable is True: - enabled = False + if parsed_args.is_enabled is not None: + kwargs['is_enabled'] = parsed_args.is_enabled - federation_client.service_providers.update( + if parsed_args.description: + kwargs['description'] = parsed_args.description + + if parsed_args.auth_url: + kwargs['auth_url'] = parsed_args.auth_url + + if parsed_args.service_provider_url: + kwargs['sp_url'] = parsed_args.service_provider_url + + service_provider = service_client.update_service_provider( parsed_args.service_provider, - enabled=enabled, - description=parsed_args.description, - auth_url=parsed_args.auth_url, - sp_url=parsed_args.service_provider_url, + **kwargs, ) + return _format_service_provider(service_provider) + class ShowServiceProvider(command.ShowOne): _description = _("Display service provider details") def get_parser(self, prog_name): - parser = super(ShowServiceProvider, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'service_provider', metavar='', @@ -203,11 +271,10 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - service_client = self.app.client_manager.identity - service_provider = utils.find_resource( - service_client.federation.service_providers, + service_client = self.app.client_manager.sdk_connection.identity + service_provider = service_client.find_service_provider( parsed_args.service_provider, - id=parsed_args.service_provider) + ignore_missing=False, + ) - service_provider._info.pop('links', None) - return zip(*sorted(service_provider._info.items())) + return _format_service_provider(service_provider) diff --git a/openstackclient/identity/v3/tag.py b/openstackclient/identity/v3/tag.py index abf022d486..41493c9936 100644 --- a/openstackclient/identity/v3/tag.py +++ b/openstackclient/identity/v3/tag.py @@ -16,7 +16,6 @@ class _CommaListAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values.split(',')) @@ -26,29 +25,41 @@ def add_tag_filtering_option_to_parser(parser, collection_name): '--tags', metavar='[,,...]', action=_CommaListAction, - help=_('List %s which have all given tag(s) ' - '(Comma-separated list of tags)') % collection_name + help=_( + 'List %s which have all given tag(s) ' + '(Comma-separated list of tags)' + ) + % collection_name, ) parser.add_argument( '--tags-any', metavar='[,,...]', action=_CommaListAction, - help=_('List %s which have any given tag(s) ' - '(Comma-separated list of tags)') % collection_name + help=_( + 'List %s which have any given tag(s) ' + '(Comma-separated list of tags)' + ) + % collection_name, ) parser.add_argument( '--not-tags', metavar='[,,...]', action=_CommaListAction, - help=_('Exclude %s which have all given tag(s) ' - '(Comma-separated list of tags)') % collection_name + help=_( + 'Exclude %s which have all given tag(s) ' + '(Comma-separated list of tags)' + ) + % collection_name, ) parser.add_argument( '--not-tags-any', metavar='[,,...]', action=_CommaListAction, - help=_('Exclude %s which have any given tag(s) ' - '(Comma-separated list of tags)') % collection_name + help=_( + 'Exclude %s which have any given tag(s) ' + '(Comma-separated list of tags)' + ) + % collection_name, ) @@ -71,8 +82,10 @@ def add_tag_option_to_parser_for_create(parser, resource_name): dest='tags', metavar='', default=[], - help=_('Tag to be added to the %s ' - '(repeat option to set multiple tags)') % resource_name + help=_( + 'Tag to be added to the %s (repeat option to set multiple tags)' + ) + % resource_name, ) @@ -83,22 +96,32 @@ def add_tag_option_to_parser_for_set(parser, resource_name): dest='tags', metavar='', default=[], - help=_('Tag to be added to the %s ' - '(repeat option to set multiple tags)') % resource_name + help=_( + 'Tag to be added to the %s (repeat option to set multiple tags)' + ) + % resource_name, ) parser.add_argument( '--clear-tags', action='store_true', - help=_('Clear tags associated with the %s. Specify ' - 'both --tag and --clear-tags to overwrite ' - 'current tags') % resource_name + help=_( + 'Clear tags associated with the %s. Specify ' + 'both --tag and --clear-tags to overwrite ' + 'current tags' + ) + % resource_name, ) parser.add_argument( '--remove-tag', + action='append', + dest='remove_tags', metavar='', default=[], - help=_('Tag to be deleted from the %s ' - '(repeat option to delete multiple tags)') % resource_name + help=_( + 'Tag to be deleted from the %s ' + '(repeat option to delete multiple tags)' + ) + % resource_name, ) @@ -106,11 +129,8 @@ def update_tags_in_args(parsed_args, obj, args): if parsed_args.clear_tags: args['tags'] = [] obj.tags = [] - if parsed_args.remove_tag: - if parsed_args.remove_tag in obj.tags: - obj.tags.remove(parsed_args.remove_tag) - args['tags'] = list(set(obj.tags)) + if parsed_args.remove_tags: + args['tags'] = sorted(set(obj.tags) - set(parsed_args.remove_tags)) return if parsed_args.tags: - args['tags'] = list(set(obj.tags).union( - set(parsed_args.tags))) + args['tags'] = sorted(set(obj.tags).union(set(parsed_args.tags))) diff --git a/openstackclient/identity/v3/token.py b/openstackclient/identity/v3/token.py index f14dd8bc3e..05e374caf0 100644 --- a/openstackclient/identity/v3/token.py +++ b/openstackclient/identity/v3/token.py @@ -15,10 +15,10 @@ """Identity v3 Token action implementations""" -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -27,7 +27,7 @@ class AuthorizeRequestToken(command.ShowOne): _description = _("Authorize a request token") def get_parser(self, prog_name): - parser = super(AuthorizeRequestToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--request-key', metavar='', @@ -37,11 +37,14 @@ def get_parser(self, prog_name): parser.add_argument( '--role', metavar='', + dest='roles', action='append', default=[], required=True, - help=_('Roles to authorize (name or ID) ' - '(repeat option to set multiple values) (required)'), + help=_( + 'Roles to authorize (name or ID) ' + '(repeat option to set multiple values) (required)' + ), ) return parser @@ -50,7 +53,7 @@ def take_action(self, parsed_args): # NOTE(stevemar): We want a list of role ids roles = [] - for role in parsed_args.role: + for role in parsed_args.roles: role_id = utils.find_resource( identity_client.roles, role, @@ -58,8 +61,8 @@ def take_action(self, parsed_args): roles.append(role_id) verifier_pin = identity_client.oauth1.request_tokens.authorize( - parsed_args.request_key, - roles) + parsed_args.request_key, roles + ) return zip(*sorted(verifier_pin._info.items())) @@ -68,45 +71,48 @@ class CreateAccessToken(command.ShowOne): _description = _("Create an access token") def get_parser(self, prog_name): - parser = super(CreateAccessToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--consumer-key', metavar='', help=_('Consumer key (required)'), - required=True + required=True, ) parser.add_argument( '--consumer-secret', metavar='', help=_('Consumer secret (required)'), - required=True + required=True, ) parser.add_argument( '--request-key', metavar='', help=_('Request token to exchange for access token (required)'), - required=True + required=True, ) parser.add_argument( '--request-secret', metavar='', help=_('Secret associated with (required)'), - required=True + required=True, ) parser.add_argument( '--verifier', metavar='', help=_('Verifier associated with (required)'), - required=True + required=True, ) return parser def take_action(self, parsed_args): token_client = self.app.client_manager.identity.oauth1.access_tokens access_token = token_client.create( - parsed_args.consumer_key, parsed_args.consumer_secret, - parsed_args.request_key, parsed_args.request_secret, - parsed_args.verifier) + parsed_args.consumer_key, + parsed_args.consumer_secret, + parsed_args.request_key, + parsed_args.request_secret, + parsed_args.verifier, + ) return zip(*sorted(access_token._info.items())) @@ -114,25 +120,26 @@ class CreateRequestToken(command.ShowOne): _description = _("Create a request token") def get_parser(self, prog_name): - parser = super(CreateRequestToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--consumer-key', metavar='', help=_('Consumer key (required)'), - required=True + required=True, ) parser.add_argument( '--consumer-secret', metavar='', help=_('Consumer secret (required)'), - required=True + required=True, ) parser.add_argument( '--project', metavar='', - help=_('Project that consumer wants to access (name or ID)' - ' (required)'), - required=True + help=_( + 'Project that consumer wants to access (name or ID) (required)' + ), + required=True, ) parser.add_argument( '--domain', @@ -146,19 +153,21 @@ def take_action(self, parsed_args): if parsed_args.domain: domain = common.find_domain(identity_client, parsed_args.domain) - project = utils.find_resource(identity_client.projects, - parsed_args.project, - domain_id=domain.id) + project = utils.find_resource( + identity_client.projects, + parsed_args.project, + domain_id=domain.id, + ) else: - project = utils.find_resource(identity_client.projects, - parsed_args.project) + project = utils.find_resource( + identity_client.projects, parsed_args.project + ) token_client = identity_client.oauth1.request_tokens request_token = token_client.create( - parsed_args.consumer_key, - parsed_args.consumer_secret, - project.id) + parsed_args.consumer_key, parsed_args.consumer_secret, project.id + ) return zip(*sorted(request_token._info.items())) @@ -169,14 +178,15 @@ class IssueToken(command.ShowOne): required_scope = False def get_parser(self, prog_name): - parser = super(IssueToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) return parser def take_action(self, parsed_args): auth_ref = self.app.client_manager.auth_ref if not auth_ref: raise exceptions.AuthorizationFailure( - _("Only an authorized user may issue a new token.")) + _("Only an authorized user may issue a new token.") + ) data = {} if auth_ref.auth_token: @@ -204,7 +214,7 @@ class RevokeToken(command.Command): _description = _("Revoke existing token") def get_parser(self, prog_name): - parser = super(RevokeToken, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'token', metavar='', diff --git a/openstackclient/identity/v3/trust.py b/openstackclient/identity/v3/trust.py index 61273f410d..80808aa12b 100644 --- a/openstackclient/identity/v3/trust.py +++ b/openstackclient/identity/v3/trust.py @@ -14,13 +14,14 @@ """Identity v3 Trust action implementations""" import datetime +import itertools import logging -from keystoneclient import exceptions as identity_exc -from osc_lib.command import command +from openstack import exceptions as sdk_exceptions from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -28,11 +29,30 @@ LOG = logging.getLogger(__name__) +def _format_trust(trust): + columns = ( + 'expires_at', + 'id', + 'is_impersonation', + 'project_id', + 'redelegated_trust_id', + 'redelegation_count', + 'remaining_uses', + 'roles', + 'trustee_user_id', + 'trustor_user_id', + ) + return ( + columns, + utils.get_item_properties(trust, columns), + ) + + class CreateTrust(command.ShowOne): _description = _("Create new trust") def get_parser(self, prog_name): - parser = super(CreateTrust, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'trustor', metavar='', @@ -52,25 +72,32 @@ def get_parser(self, prog_name): parser.add_argument( '--role', metavar='', + dest='roles', action='append', default=[], - help=_('Roles to authorize (name or ID) ' - '(repeat option to set multiple values, required)'), - required=True + help=_( + 'Roles to authorize (name or ID) ' + '(repeat option to set multiple values, required)' + ), + required=True, ) parser.add_argument( '--impersonate', - dest='impersonate', + dest='is_impersonation', action='store_true', default=False, - help=_('Tokens generated from the trust will represent ' - ' (defaults to False)'), + help=_( + 'Tokens generated from the trust will represent ' + ' (defaults to False)' + ), ) parser.add_argument( '--expiration', metavar='', - help=_('Sets an expiration date for the trust' - ' (format of YYYY-mm-ddTHH:MM:SS)'), + help=_( + 'Sets an expiration date for the trust' + ' (format of YYYY-mm-ddTHH:MM:SS)' + ), ) common.add_project_domain_option_to_parser(parser) parser.add_argument( @@ -86,63 +113,98 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity + + kwargs = {} # NOTE(stevemar): Find the two users, project and roles that # are necessary for making a trust usable, the API dictates that # trustee, project and role are optional, but that makes the trust # pointless, and trusts are immutable, so let's enforce it at the # client level. - trustor_id = common.find_user(identity_client, - parsed_args.trustor, - parsed_args.trustor_domain).id - trustee_id = common.find_user(identity_client, - parsed_args.trustee, - parsed_args.trustee_domain).id - project_id = common.find_project(identity_client, - parsed_args.project, - parsed_args.project_domain).id - - role_ids = [] - for role in parsed_args.role: + try: + if parsed_args.trustor_domain: + trustor_domain_id = identity_client.find_domain( + parsed_args.trustor_domain, ignore_missing=False + ).id + trustor_id = identity_client.find_user( + parsed_args.trustor, + ignore_missing=False, + domain_id=trustor_domain_id, + ).id + else: + trustor_id = identity_client.find_user( + parsed_args.trustor, ignore_missing=False + ).id + kwargs['trustor_user_id'] = trustor_id + except sdk_exceptions.ForbiddenException: + kwargs['trustor_user_id'] = parsed_args.trustor + + try: + if parsed_args.trustee_domain: + trustee_domain_id = identity_client.find_domain( + parsed_args.trustee_domain, ignore_missing=False + ).id + trustee_id = identity_client.find_user( + parsed_args.trustee, + ignore_missing=False, + domain_id=trustee_domain_id, + ).id + else: + trustee_id = identity_client.find_user( + parsed_args.trustee, ignore_missing=False + ).id + kwargs['trustee_user_id'] = trustee_id + except sdk_exceptions.ForbiddenException: + kwargs['trustee_user_id'] = parsed_args.trustee + + try: + if parsed_args.project_domain: + project_domain_id = identity_client.find_domain( + parsed_args.project_domain, ignore_missing=False + ).id + project_id = identity_client.find_project( + parsed_args.project, + ignore_missing=False, + domain_id=project_domain_id, + ).id + else: + project_id = identity_client.find_project( + parsed_args.project, ignore_missing=False + ).id + kwargs['project_id'] = project_id + except sdk_exceptions.ForbiddenException: + kwargs['project_id'] = parsed_args.project + + roles = [] + for role in parsed_args.roles: try: - role_id = utils.find_resource( - identity_client.roles, - role, + role_id = identity_client.find_role( + role, ignore_missing=False ).id - except identity_exc.Forbidden: + except sdk_exceptions.ForbiddenException: role_id = role - role_ids.append(role_id) + roles.append({"id": role_id}) + kwargs['roles'] = roles - expires_at = None if parsed_args.expiration: - expires_at = datetime.datetime.strptime(parsed_args.expiration, - '%Y-%m-%dT%H:%M:%S') - - trust = identity_client.trusts.create( - trustee_id, trustor_id, - impersonation=parsed_args.impersonate, - project=project_id, - role_ids=role_ids, - expires_at=expires_at, - ) + expires_at = datetime.datetime.strptime( + parsed_args.expiration, '%Y-%m-%dT%H:%M:%S' + ) + kwargs['expires_at'] = expires_at - trust._info.pop('roles_links', None) - trust._info.pop('links', None) + kwargs['impersonation'] = bool(parsed_args.is_impersonation) - # Format roles into something sensible - roles = trust._info.pop('roles') - msg = ' '.join(r['name'] for r in roles) - trust._info['roles'] = msg + trust = identity_client.create_trust(**kwargs) - return zip(*sorted(trust._info.items())) + return _format_trust(trust) class DeleteTrust(command.Command): _description = _("Delete trust(s)") def get_parser(self, prog_name): - parser = super(DeleteTrust, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'trust', metavar='', @@ -152,24 +214,31 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity errors = 0 for trust in parsed_args.trust: try: - trust_obj = utils.find_resource(identity_client.trusts, - trust) - identity_client.trusts.delete(trust_obj.id) + trust_obj = identity_client.find_trust( + trust, ignore_missing=False + ) + identity_client.delete_trust(trust_obj.id) except Exception as e: errors += 1 - LOG.error(_("Failed to delete trust with " - "name or ID '%(trust)s': %(e)s"), - {'trust': trust, 'e': e}) + LOG.error( + _( + "Failed to delete trust with " + "name or ID '%(trust)s': %(e)s" + ), + {'trust': trust, 'e': e}, + ) if errors > 0: total = len(parsed_args.trust) - msg = (_("%(errors)s of %(total)s trusts failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s trusts failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -207,15 +276,17 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity auth_ref = self.app.client_manager.auth_ref - if parsed_args.authuser and any([ - parsed_args.trustor, - parsed_args.trustor_domain, - parsed_args.trustee, - parsed_args.trustee_domain, - ]): + if parsed_args.authuser and any( + [ + parsed_args.trustor, + parsed_args.trustor_domain, + parsed_args.trustee, + parsed_args.trustee_domain, + ] + ): msg = _("--authuser cannot be used with --trustee or --trustor") raise exceptions.CommandError(msg) @@ -228,55 +299,104 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) if parsed_args.authuser: - if auth_ref: - user = common.find_user( - identity_client, - auth_ref.user_id - ) - # We need two calls here as we want trusts with - # either the trustor or the trustee set to current user - # using a single call would give us trusts with both - # trustee and trustor set to current user - data1 = identity_client.trusts.list(trustor_user=user) - data2 = identity_client.trusts.list(trustee_user=user) - data = set(data1 + data2) + # We need two calls here as we want trusts with + # either the trustor or the trustee set to current user + # using a single call would give us trusts with both + # trustee and trustor set to current user + data = list( + { + x.id: x + for x in itertools.chain( + identity_client.trusts( + trustor_user_id=auth_ref.user_id + ), + identity_client.trusts( + trustee_user_id=auth_ref.user_id + ), + ) + }.values() + ) else: trustor = None if parsed_args.trustor: - trustor = common.find_user( - identity_client, - parsed_args.trustor, - parsed_args.trustor_domain, - ) + try: + if parsed_args.trustor_domain: + trustor_domain_id = identity_client.find_domain( + parsed_args.trustor_domain, ignore_missing=False + ).id + trustor_id = identity_client.find_user( + parsed_args.trustor, + ignore_missing=False, + domain_id=trustor_domain_id, + ).id + else: + trustor_id = identity_client.find_user( + parsed_args.trustor, ignore_missing=False + ).id + trustor = trustor_id + except sdk_exceptions.ForbiddenException: + trustor = parsed_args.trustor trustee = None if parsed_args.trustee: - trustee = common.find_user( - identity_client, - parsed_args.trustor, - parsed_args.trustor_domain, - ) - - data = self.app.client_manager.identity.trusts.list( - trustor_user=trustor, - trustee_user=trustee, + try: + if parsed_args.trustee_domain: + trustee_domain_id = identity_client.find_domain( + parsed_args.trustee_domain, ignore_missing=False + ).id + trustee_id = identity_client.find_user( + parsed_args.trustee, + ignore_missing=False, + domain_id=trustee_domain_id, + ).id + else: + trustee_id = identity_client.find_user( + parsed_args.trustee, ignore_missing=False + ).id + trustee = trustee_id + except sdk_exceptions.ForbiddenException: + trustee = parsed_args.trustee + + data = identity_client.trusts( + trustor_user_id=trustor, + trustee_user_id=trustee, ) - columns = ('ID', 'Expires At', 'Impersonation', 'Project ID', - 'Trustee User ID', 'Trustor User ID') + column_headers = ( + 'ID', + 'Expires At', + 'Impersonation', + 'Project ID', + 'Trustee User ID', + 'Trustor User ID', + ) + columns = ( + 'id', + 'expires_at', + 'is_impersonation', + 'project_id', + 'trustee_user_id', + 'trustor_user_id', + ) - return (columns, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class ShowTrust(command.ShowOne): _description = _("Display trust details") def get_parser(self, prog_name): - parser = super(ShowTrust, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'trust', metavar='', @@ -285,16 +405,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity - trust = utils.find_resource(identity_client.trusts, - parsed_args.trust) - - trust._info.pop('roles_links', None) - trust._info.pop('links', None) - - # Format roles into something sensible - roles = trust._info.pop('roles') - msg = ' '.join(r['name'] for r in roles) - trust._info['roles'] = msg + identity_client = self.app.client_manager.sdk_connection.identity + trust = identity_client.find_trust( + parsed_args.trust, ignore_missing=False + ) - return zip(*sorted(trust._info.items())) + return _format_trust(trust) diff --git a/openstackclient/identity/v3/unscoped_saml.py b/openstackclient/identity/v3/unscoped_saml.py index f7598f178f..e1efc15cfa 100644 --- a/openstackclient/identity/v3/unscoped_saml.py +++ b/openstackclient/identity/v3/unscoped_saml.py @@ -17,9 +17,9 @@ the user can list domains and projects they are allowed to access, and request a scoped token.""" -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -30,11 +30,17 @@ def take_action(self, parsed_args): columns = ('ID', 'Enabled', 'Name', 'Description') identity_client = self.app.client_manager.identity data = identity_client.federation.domains.list() - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class ListAccessibleProjects(command.Lister): @@ -44,8 +50,14 @@ def take_action(self, parsed_args): columns = ('ID', 'Domain ID', 'Enabled', 'Name') identity_client = self.app.client_manager.identity data = identity_client.federation.projects.list() - return (columns, - (utils.get_item_properties( - s, columns, + return ( + columns, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) diff --git a/openstackclient/identity/v3/user.py b/openstackclient/identity/v3/user.py index cbc112a058..bec1c620c5 100644 --- a/openstackclient/identity/v3/user.py +++ b/openstackclient/identity/v3/user.py @@ -17,12 +17,13 @@ import copy import logging +import typing as ty -from keystoneauth1 import exceptions as ks_exc -from osc_lib.command import command +from openstack import exceptions as sdk_exc from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common @@ -30,8 +31,37 @@ LOG = logging.getLogger(__name__) +def _format_user(user): + columns = ( + 'default_project_id', + 'domain_id', + 'email', + 'is_enabled', + 'id', + 'name', + 'description', + 'password_expires_at', + 'options', + ) + column_headers = ( + 'default_project_id', + 'domain_id', + 'email', + 'enabled', + 'id', + 'name', + 'description', + 'password_expires_at', + 'options', + ) + return ( + column_headers, + utils.get_item_properties(user, columns), + ) + + def _get_options_for_user(identity_client, parsed_args): - options = {} + options: dict[str, ty.Any] = {} if parsed_args.ignore_lockout_failure_attempts: options['ignore_lockout_failure_attempts'] = True if parsed_args.no_ignore_lockout_failure_attempts: @@ -52,9 +82,10 @@ def _get_options_for_user(identity_client, parsed_args): options['multi_factor_auth_enabled'] = True if parsed_args.disable_multi_factor_auth: options['multi_factor_auth_enabled'] = False - if parsed_args.multi_factor_auth_rule: - auth_rules = [rule.split(",") for rule in - parsed_args.multi_factor_auth_rule] + if parsed_args.multi_factor_auth_rules: + auth_rules = [ + rule.split(",") for rule in parsed_args.multi_factor_auth_rules + ] if auth_rules: options['multi_factor_auth_rules'] = auth_rules return options @@ -66,54 +97,70 @@ def _add_user_options(parser): parser.add_argument( '--ignore-lockout-failure-attempts', action="store_true", - help=_('Opt into ignoring the number of times a user has ' - 'authenticated and locking out the user as a result'), + help=_( + 'Opt into ignoring the number of times a user has ' + 'authenticated and locking out the user as a result' + ), ) parser.add_argument( '--no-ignore-lockout-failure-attempts', action="store_true", - help=_('Opt out of ignoring the number of times a user has ' - 'authenticated and locking out the user as a result'), + help=_( + 'Opt out of ignoring the number of times a user has ' + 'authenticated and locking out the user as a result' + ), ) parser.add_argument( '--ignore-password-expiry', action="store_true", - help=_('Opt into allowing user to continue using passwords that ' - 'may be expired'), + help=_( + 'Opt into allowing user to continue using passwords that ' + 'may be expired' + ), ) parser.add_argument( '--no-ignore-password-expiry', action="store_true", - help=_('Opt out of allowing user to continue using passwords ' - 'that may be expired'), + help=_( + 'Opt out of allowing user to continue using passwords ' + 'that may be expired' + ), ) parser.add_argument( '--ignore-change-password-upon-first-use', action="store_true", - help=_('Control if a user should be forced to change their password ' - 'immediately after they log into keystone for the first time. ' - 'Opt into ignoring the user to change their password during ' - 'first time login in keystone'), + help=_( + 'Control if a user should be forced to change their password ' + 'immediately after they log into keystone for the first time. ' + 'Opt into ignoring the user to change their password during ' + 'first time login in keystone' + ), ) parser.add_argument( '--no-ignore-change-password-upon-first-use', action="store_true", - help=_('Control if a user should be forced to change their password ' - 'immediately after they log into keystone for the first time. ' - 'Opt out of ignoring the user to change their password during ' - 'first time login in keystone'), + help=_( + 'Control if a user should be forced to change their password ' + 'immediately after they log into keystone for the first time. ' + 'Opt out of ignoring the user to change their password during ' + 'first time login in keystone' + ), ) parser.add_argument( '--enable-lock-password', action="store_true", - help=_('Disables the ability for a user to change its password ' - 'through self-service APIs'), + help=_( + 'Disables the ability for a user to change its password ' + 'through self-service APIs' + ), ) parser.add_argument( '--disable-lock-password', action="store_true", - help=_('Enables the ability for a user to change its password ' - 'through self-service APIs'), + help=_( + 'Enables the ability for a user to change its password ' + 'through self-service APIs' + ), ) parser.add_argument( '--enable-multi-factor-auth', @@ -128,13 +175,16 @@ def _add_user_options(parser): parser.add_argument( '--multi-factor-auth-rule', metavar='', - action="append", + dest='multi_factor_auth_rules', + action='append', default=[], - help=_('Set multi-factor auth rules. For example, to set a rule ' - 'requiring the "password" and "totp" auth methods to be ' - 'provided, use: "--multi-factor-auth-rule password,totp". ' - 'May be provided multiple times to set different rule ' - 'combinations.') + help=_( + 'Set multi-factor auth rules. For example, to set a rule ' + 'requiring the "password" and "totp" auth methods to be ' + 'provided, use: "--multi-factor-auth-rule password,totp". ' + 'May be provided multiple times to set different rule ' + 'combinations.' + ), ) @@ -142,7 +192,7 @@ class CreateUser(command.ShowOne): _description = _("Create new user") def get_parser(self, prog_name): - parser = super(CreateUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -201,59 +251,90 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - project_id = None - if parsed_args.project: - project_id = common.find_project(identity_client, - parsed_args.project, - parsed_args.project_domain).id + kwargs = {} domain_id = None if parsed_args.domain: - domain_id = common.find_domain(identity_client, - parsed_args.domain).id + domain_id = identity_client.find_domain( + parsed_args.domain, + ignore_missing=False, + ).id + kwargs['domain_id'] = domain_id + + if parsed_args.project: + project_domain_id = None + if parsed_args.project_domain: + project_domain_id = identity_client.find_domain( + parsed_args.project_domain, + ignore_missing=False, + ).id + kwargs['default_project_id'] = identity_client.find_project( + parsed_args.project, + ignore_missing=False, + domain_id=project_domain_id, + ).id + + if parsed_args.description: + kwargs['description'] = parsed_args.description - enabled = True + if parsed_args.email: + kwargs['email'] = parsed_args.email + + is_enabled = True if parsed_args.disable: - enabled = False - if parsed_args.password_prompt: - parsed_args.password = utils.get_password(self.app.stdin) + is_enabled = False + + password = None + if parsed_args.password: + password = parsed_args.password + elif parsed_args.password_prompt: + password = utils.get_password(self.app.stdin) + + if not password: + LOG.warning( + _( + "No password was supplied, authentication will fail " + "when a user does not have a password." + ) + ) + else: + kwargs['password'] = password - if not parsed_args.password: - LOG.warning(_("No password was supplied, authentication will fail " - "when a user does not have a password.")) options = _get_options_for_user(identity_client, parsed_args) + if options: + kwargs['options'] = options try: - user = identity_client.users.create( + user = identity_client.create_user( + is_enabled=is_enabled, name=parsed_args.name, - domain=domain_id, - default_project=project_id, - password=parsed_args.password, - email=parsed_args.email, - description=parsed_args.description, - enabled=enabled, - options=options, + **kwargs, ) - except ks_exc.Conflict: + except sdk_exc.ConflictException: if parsed_args.or_show: - user = utils.find_resource(identity_client.users, - parsed_args.name, - domain_id=domain_id) + kwargs = {} + if domain_id: + kwargs['domain_id'] = domain_id + + user = identity_client.find_user( + parsed_args.name, + ignore_missing=False, + **kwargs, + ) LOG.info(_('Returning existing user %s'), user.name) else: raise - user._info.pop('links') - return zip(*sorted(user._info.items())) + return _format_user(user) class DeleteUser(command.Command): _description = _("Delete user(s)") def get_parser(self, prog_name): - parser = super(DeleteUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'users', metavar='', @@ -268,32 +349,44 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity domain = None if parsed_args.domain: - domain = common.find_domain(identity_client, parsed_args.domain) + domain = identity_client.find_domain( + name_or_id=parsed_args.domain, + ignore_missing=True, + ) errors = 0 for user in parsed_args.users: try: if domain is not None: - user_obj = utils.find_resource(identity_client.users, - user, - domain_id=domain.id) + user_obj = identity_client.find_user( + name_or_id=user, + domain_id=domain.id, + ignore_missing=False, + ) else: - user_obj = utils.find_resource(identity_client.users, - user) - identity_client.users.delete(user_obj.id) + user_obj = identity_client.find_user( + name_or_id=user, ignore_missing=False + ) + identity_client.delete_user(user_obj.id, ignore_missing=False) except Exception as e: errors += 1 - LOG.error(_("Failed to delete user with " - "name or ID '%(user)s': %(e)s"), - {'user': user, 'e': e}) + LOG.error( + _( + "Failed to delete user with " + "name or ID '%(user)s': %(e)s" + ), + {'user': user, 'e': e}, + ) if errors > 0: total = len(parsed_args.users) - msg = (_("%(errors)s of %(total)s users failed " - "to delete.") % {'errors': errors, 'total': total}) + msg = _("%(errors)s of %(total)s users failed to delete.") % { + 'errors': errors, + 'total': total, + } raise exceptions.CommandError(msg) @@ -301,7 +394,7 @@ class ListUser(command.Lister): _description = _("List users") def get_parser(self, prog_name): - parser = super(ListUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--domain', metavar='', @@ -324,76 +417,124 @@ def get_parser(self, prog_name): default=False, help=_('List additional fields in output'), ) + parser.add_argument( + '--enabled', + action='store_true', + dest='is_enabled', + default=None, + help=_( + 'List only enabled users, does nothing with ' + '--project and --group' + ), + ) + parser.add_argument( + '--disabled', + action='store_false', + dest='is_enabled', + default=None, + help=_( + 'List only disabled users, does nothing with ' + '--project and --group' + ), + ) return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity domain = None if parsed_args.domain: - domain = common.find_domain(identity_client, - parsed_args.domain).id + domain = identity_client.find_domain( + name_or_id=parsed_args.domain, + ignore_missing=False, + ).id group = None if parsed_args.group: - group = common.find_group(identity_client, - parsed_args.group, - parsed_args.domain).id + group = identity_client.find_group( + name_or_id=parsed_args.group, + domain_id=parsed_args.domain, + ignore_missing=False, + ).id + + if parsed_args.is_enabled is not None: + enabled = parsed_args.is_enabled if parsed_args.project: if domain is not None: - project = utils.find_resource( - identity_client.projects, - parsed_args.project, - domain_id=domain + project = identity_client.find_project( + name_or_id=parsed_args.project, + domain_id=domain, + ignore_missing=False, ).id else: - project = utils.find_resource( - identity_client.projects, - parsed_args.project, + project = identity_client.find_project( + name_or_id=parsed_args.project, + ignore_missing=False, ).id - assignments = identity_client.role_assignments.list( - project=project) - # NOTE(stevemar): If a user has more than one role on a project # then they will have two entries in the returned data. Since we # are looking for any role, let's just track unique user IDs. user_ids = set() - for assignment in assignments: - if hasattr(assignment, 'user'): + for assignment in identity_client.role_assignments( + scope_project_id=project + ): + if assignment.user: user_ids.add(assignment.user['id']) # NOTE(stevemar): Call find_resource once we have unique IDs, so # it's fewer trips to the Identity API, then collect the data. data = [] for user_id in user_ids: - user = utils.find_resource(identity_client.users, user_id) + user = identity_client.find_user(user_id, ignore_missing=False) data.append(user) - else: - data = identity_client.users.list( - domain=domain, + elif parsed_args.group: + data = identity_client.group_users( + domain_id=domain, group=group, ) + else: + if parsed_args.is_enabled is not None: + data = identity_client.users( + domain_id=domain, + is_enabled=enabled, + ) + else: + data = identity_client.users( + domain_id=domain, + ) # Column handling if parsed_args.long: - columns = ['ID', 'Name', 'Default Project Id', 'Domain Id', - 'Description', 'Email', 'Enabled'] + columns = [ + 'ID', + 'Name', + 'Default Project Id', + 'Domain Id', + 'Description', + 'Email', + 'Is Enabled', + ] column_headers = copy.deepcopy(columns) column_headers[2] = 'Project' column_headers[3] = 'Domain' + column_headers[6] = 'Enabled' else: columns = ['ID', 'Name'] column_headers = columns return ( column_headers, - (utils.get_item_properties( - s, columns, - formatters={}, - ) for s in data) + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), ) @@ -401,7 +542,7 @@ class SetUser(command.Command): _description = _("Set user properties") def get_parser(self, prog_name): - parser = super(SetUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'user', metavar='', @@ -415,8 +556,10 @@ def get_parser(self, prog_name): parser.add_argument( '--domain', metavar='', - help=_('Domain the user belongs to (name or ID). This can be ' - 'used in case collisions between user names exist.'), + help=_( + 'Domain the user belongs to (name or ID). This can be ' + 'used in case collisions between user names exist.' + ), ) parser.add_argument( '--project', @@ -461,27 +604,36 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity if parsed_args.password_prompt: parsed_args.password = utils.get_password(self.app.stdin) if '' == parsed_args.password: - LOG.warning(_("No password was supplied, authentication will fail " - "when a user does not have a password.")) + LOG.warning( + _( + "No password was supplied, authentication will fail " + "when a user does not have a password." + ) + ) - user_str = common._get_token_resource(identity_client, 'user', - parsed_args.user, - parsed_args.domain) + user_str = common._get_token_resource( + identity_client, 'user', parsed_args.user, parsed_args.domain + ) if parsed_args.domain: - domain = common.find_domain(identity_client, parsed_args.domain) - user = utils.find_resource(identity_client.users, - user_str, - domain_id=domain.id) + domain = identity_client.find_domain( + name_or_id=parsed_args.domain, + ignore_missing=False, + ) + user = identity_client.find_user( + name_or_id=user_str, + domain_id=domain.id, + ignore_missing=False, + ) else: - user = utils.find_resource( - identity_client.users, - parsed_args.user, + user = identity_client.find_user( + name_or_id=parsed_args.user, + ignore_missing=False, ) kwargs = {} @@ -494,21 +646,29 @@ def take_action(self, parsed_args): if parsed_args.description: kwargs['description'] = parsed_args.description if parsed_args.project: - project_id = common.find_project(identity_client, - parsed_args.project, - parsed_args.project_domain).id - kwargs['default_project'] = project_id - kwargs['enabled'] = user.enabled + project_domain_id = None + if parsed_args.project_domain: + project_domain_id = identity_client.find_domain( + name_or_id=parsed_args.project_domain, + ignore_missing=False, + ).id + project_id = identity_client.find_project( + name_or_id=parsed_args.project, + ignore_missing=False, + domain_id=project_domain_id, + ).id + kwargs['default_project_id'] = project_id + kwargs['is_enabled'] = user.is_enabled if parsed_args.enable: - kwargs['enabled'] = True + kwargs['is_enabled'] = True if parsed_args.disable: - kwargs['enabled'] = False + kwargs['is_enabled'] = False options = _get_options_for_user(identity_client, parsed_args) if options: kwargs['options'] = options - identity_client.users.update(user.id, **kwargs) + identity_client.update_user(user=user, **kwargs) class SetPasswordUser(command.Command): @@ -517,7 +677,7 @@ class SetPasswordUser(command.Command): required_scope = False def get_parser(self, prog_name): - parser = super(SetPasswordUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--password', metavar='', @@ -531,7 +691,14 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity + conn = self.app.client_manager.sdk_connection + auth = conn.config.get_auth() + if auth is None: + # this will never happen + raise exceptions.CommandError('invalid authentication info') + + user_id = auth.get_user_id(conn.identity) # FIXME(gyee): there are two scenarios: # @@ -556,25 +723,35 @@ def take_action(self, parsed_args): current_password = parsed_args.original_password if current_password is None: current_password = utils.get_password( - self.app.stdin, prompt="Current Password:", confirm=False) + self.app.stdin, prompt="Current Password:", confirm=False + ) password = parsed_args.password if password is None: password = utils.get_password( - self.app.stdin, prompt="New Password:") + self.app.stdin, prompt="New Password:" + ) if '' == password: - LOG.warning(_("No password was supplied, authentication will fail " - "when a user does not have a password.")) + LOG.warning( + _( + "No password was supplied, authentication will fail " + "when a user does not have a password." + ) + ) - identity_client.users.update_password(current_password, password) + identity_client.update_user( + user=user_id, + current_password=current_password, + password=password, + ) class ShowUser(command.ShowOne): _description = _("Display user details") def get_parser(self, prog_name): - parser = super(ShowUser, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'user', metavar='', @@ -588,19 +765,28 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - identity_client = self.app.client_manager.identity + identity_client = self.app.client_manager.sdk_connection.identity - user_str = common._get_token_resource(identity_client, 'user', - parsed_args.user, - parsed_args.domain) + user_str = common._get_token_resource( + identity_client, 'user', parsed_args.user, parsed_args.domain + ) + + domain = None if parsed_args.domain: - domain = common.find_domain(identity_client, parsed_args.domain) - user = utils.find_resource(identity_client.users, - user_str, - domain_id=domain.id) + domain = identity_client.find_domain( + name_or_id=parsed_args.domain, + ignore_missing=True, + ) + if domain: + user = identity_client.find_user( + name_or_id=user_str, + domain_id=domain.id, + ignore_missing=False, + ) else: - user = utils.find_resource(identity_client.users, - user_str) + user = identity_client.find_user( + name_or_id=user_str, + ignore_missing=False, + ) - user._info.pop('links') - return zip(*sorted(user._info.items())) + return _format_user(user) diff --git a/openstackclient/image/client.py b/openstackclient/image/client.py index 9a0d7bacb1..f75b3e6820 100644 --- a/openstackclient/image/client.py +++ b/openstackclient/image/client.py @@ -11,7 +11,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# import logging @@ -19,20 +18,17 @@ from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) +# global variables used when building the shell DEFAULT_API_VERSION = '2' API_VERSION_OPTION = 'os_image_api_version' -API_NAME = "image" -API_VERSIONS = { - "1": "openstack.connection.Connection", - "2": "openstack.connection.Connection", -} +API_NAME = 'image' +API_VERSIONS = ('1', '2') def make_client(instance): - + """Returns an image service client.""" LOG.debug( 'Image client initialized using OpenStack SDK: %s', instance.sdk_connection.image, @@ -46,7 +42,12 @@ def build_option_parser(parser): '--os-image-api-version', metavar='', default=utils.env('OS_IMAGE_API_VERSION'), - help=_('Image API version, default=%s (Env: OS_IMAGE_API_VERSION)') % - DEFAULT_API_VERSION, + help=_('Image API version, default=%s (Env: OS_IMAGE_API_VERSION)') + % DEFAULT_API_VERSION, ) return parser + + +def check_api_version(check_version): + # SDK supports auto-negotiation for us: always return True + return True diff --git a/openstackclient/image/v1/image.py b/openstackclient/image/v1/image.py index 43ccf5d212..0ea7eca710 100644 --- a/openstackclient/image/v1/image.py +++ b/openstackclient/image/v1/image.py @@ -16,55 +16,59 @@ """Image V1 Action Implementations""" import argparse -import io import logging import os import sys +import typing as ty from cliff import columns as cliff_columns from osc_lib.api import utils as api_utils from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command +from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ -if os.name == "nt": - import msvcrt -else: - msvcrt = None - - CONTAINER_CHOICES = ["ami", "ari", "aki", "bare", "docker", "ova", "ovf"] DEFAULT_CONTAINER_FORMAT = 'bare' DEFAULT_DISK_FORMAT = 'raw' -DISK_CHOICES = ["ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vhdx", - "vdi", "iso", "ploop"] - +DISK_CHOICES = [ + "ami", + "ari", + "aki", + "vhd", + "vmdk", + "raw", + "qcow2", + "vhdx", + "vdi", + "iso", + "ploop", +] LOG = logging.getLogger(__name__) def _get_columns(item): - column_map = { - 'is_protected': 'protected', - 'owner_id': 'owner' - } + column_map = {'is_protected': 'protected', 'owner_id': 'owner'} hidden_columns = [ - 'location', 'checksum', 'copy_from', 'created_at', 'status', + 'location', + 'checksum', + 'copy_from', + 'created_at', + 'status', 'updated_at', ] return utils.get_osc_show_columns_for_sdk_resource( - item.to_dict(), column_map, hidden_columns, + item.to_dict(), + column_map, + hidden_columns, ) -_formatters = { -} - - -class HumanReadableSizeColumn(cliff_columns.FormattableColumn): +class HumanReadableSizeColumn(cliff_columns.FormattableColumn[int]): def human_readable(self): """Return a formatted visibility string @@ -78,7 +82,7 @@ def human_readable(self): return '' -class VisibilityColumn(cliff_columns.FormattableColumn): +class VisibilityColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): """Return a formatted visibility string @@ -96,7 +100,7 @@ class CreateImage(command.ShowOne): _description = _("Create/upload an image") def get_parser(self, prog_name): - parser = super(CreateImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "name", metavar="", @@ -117,25 +121,36 @@ def get_parser(self, prog_name): default=DEFAULT_CONTAINER_FORMAT, metavar="", choices=CONTAINER_CHOICES, - help=(_("Image container format. " + help=( + _( + "Image container format. " "The supported options are: %(option_list)s. " - "The default format is: %(default_opt)s") % - {'option_list': ', '.join(CONTAINER_CHOICES), - 'default_opt': DEFAULT_CONTAINER_FORMAT}) + "The default format is: %(default_opt)s" + ) + % { + 'option_list': ', '.join(CONTAINER_CHOICES), + 'default_opt': DEFAULT_CONTAINER_FORMAT, + } + ), ) parser.add_argument( "--disk-format", default=DEFAULT_DISK_FORMAT, metavar="", choices=DISK_CHOICES, - help=_("Image disk format. The supported options are: %s. " - "The default format is: raw") % ', '.join(DISK_CHOICES) + help=_( + "Image disk format. The supported options are: %s. " + "The default format is: raw" + ) + % ', '.join(DISK_CHOICES), ) parser.add_argument( "--size", metavar="", - help=_("Image size, in bytes (only used with --location and" - " --copy-from)"), + help=_( + "Image size, in bytes (only used with --location and" + " --copy-from)" + ), ) parser.add_argument( "--min-disk", @@ -175,8 +190,10 @@ def get_parser(self, prog_name): dest='force', action='store_true', default=False, - help=_("Force image creation if volume is in use " - "(only meaningful with --volume)"), + help=_( + "Force image creation if volume is in use " + "(only meaningful with --volume)" + ), ) parser.add_argument( "--checksum", @@ -210,8 +227,10 @@ def get_parser(self, prog_name): dest="properties", metavar="", action=parseractions.KeyValueAction, - help=_("Set a property on this image " - "(repeat option to set multiple properties)"), + help=_( + "Set a property on this image " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( "--project", @@ -226,10 +245,23 @@ def take_action(self, parsed_args): # Build an attribute dict from the parsed args, only include # attributes that were actually set on the command line kwargs = {} - copy_attrs = ('name', 'id', 'store', 'container_format', - 'disk_format', 'owner', 'size', 'min_disk', 'min_ram', - 'location', 'copy_from', 'volume', 'force', - 'checksum', 'properties') + copy_attrs = ( + 'name', + 'id', + 'store', + 'container_format', + 'disk_format', + 'owner', + 'size', + 'min_disk', + 'min_ram', + 'location', + 'copy_from', + 'volume', + 'force', + 'checksum', + 'properties', + ) for attr in copy_attrs: if attr in parsed_args: val = getattr(parsed_args, attr, None) @@ -278,12 +310,14 @@ def take_action(self, parsed_args): elif parsed_args.file: # Send an open file handle to glanceclient so it will # do a chunked transfer - kwargs["data"] = io.open(parsed_args.file, "rb") + kwargs["data"] = open(parsed_args.file, "rb") else: # Read file from stdin if not sys.stdin.isatty(): - if msvcrt: - msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) + if os.name == "nt": + import msvcrt + + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) # type: ignore if hasattr(sys.stdin, 'buffer'): kwargs['data'] = sys.stdin.buffer else: @@ -295,20 +329,28 @@ def take_action(self, parsed_args): image = image_client.create_image(**kwargs) finally: # Clean up open files - make sure data isn't a string - if ('data' in kwargs and hasattr(kwargs['data'], 'close') and - kwargs['data'] != sys.stdin): + if ( + 'data' in kwargs + and hasattr(kwargs['data'], 'close') + and kwargs['data'] != sys.stdin + ): kwargs['data'].close() if image: display_columns, columns = _get_columns(image) - _formatters['properties'] = format_columns.DictColumn - data = utils.get_item_properties(image, columns, - formatters=_formatters) + data = utils.get_item_properties( + image, + columns, + formatters={ + 'properties': format_columns.DictColumn, + }, + ) return (display_columns, data) elif info: info.update(image._info) info['properties'] = format_columns.DictColumn( - info.get('properties', {})) + info.get('properties', {}) + ) return zip(*sorted(info.items())) @@ -316,7 +358,7 @@ class DeleteImage(command.Command): _description = _("Delete image(s)") def get_parser(self, prog_name): - parser = super(DeleteImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "images", metavar="", @@ -326,17 +368,36 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): + result = 0 image_client = self.app.client_manager.image for image in parsed_args.images: - image_obj = image_client.find_image(image) - image_client.delete_image(image_obj.id) + try: + image_obj = image_client.find_image( + image, + ignore_missing=False, + ) + image_client.delete_image(image_obj.id) + except Exception as e: + result += 1 + msg = _( + "Failed to delete image with name or ID '%(image)s': %(e)s" + ) + LOG.error(msg, {'image': image, 'e': e}) + + total = len(parsed_args.images) + if result > 0: + msg = _("Failed to delete %(result)s of %(total)s images.") % { + 'result': result, + 'total': total, + } + raise exceptions.CommandError(msg) class ListImage(command.Lister): _description = _("List available images") def get_parser(self, prog_name): - parser = super(ListImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) public_group = parser.add_mutually_exclusive_group() public_group.add_argument( "--public", @@ -384,9 +445,11 @@ def get_parser(self, prog_name): '--sort', metavar="[:]", default='name:asc', - help=_("Sort output by selected keys and directions(asc or desc) " - "(default: name:asc), multiple keys and directions can be " - "specified separated by comma"), + help=_( + "Sort output by selected keys and directions(asc or desc) " + "(default: name:asc), multiple keys and directions can be " + "specified separated by comma" + ), ) return parser @@ -400,7 +463,7 @@ def take_action(self, parsed_args): kwargs['is_private'] = True if parsed_args.long: - columns = ( + columns: tuple[str, ...] = ( 'ID', 'Name', 'Disk Format', @@ -413,7 +476,7 @@ def take_action(self, parsed_args): 'owner_id', 'properties', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Name', 'Disk Format', @@ -431,30 +494,33 @@ def take_action(self, parsed_args): column_headers = columns # List of image data received - data = list(image_client.images(**kwargs)) + images = list(image_client.images(**kwargs)) if parsed_args.property: # NOTE(dtroyer): coerce to a list to subscript it in py3 attr, value = list(parsed_args.property.items())[0] api_utils.simple_filter( - data, + images, attr=attr, value=value, property_field='properties', ) - data = utils.sort_items(data, parsed_args.sort) + data = utils.sort_items(images, parsed_args.sort) return ( column_headers, - (utils.get_item_properties( - s, - columns, - formatters={ - 'is_public': VisibilityColumn, - 'properties': format_columns.DictColumn, - }, - ) for s in data) + ( + utils.get_item_properties( + s, + columns, + formatters={ + 'is_public': VisibilityColumn, + 'properties': format_columns.DictColumn, + }, + ) + for s in data + ), ) @@ -462,7 +528,17 @@ class SaveImage(command.Command): _description = _("Save an image locally") def get_parser(self, prog_name): - parser = super(SaveImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) + parser.add_argument( + "--chunk-size", + type=int, + default=1024, + metavar="", + help=_( + "Size in bytes to read from the wire and buffer at one " + "time (default: 1024)" + ), + ) parser.add_argument( "--file", metavar="", @@ -477,20 +553,27 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): image_client = self.app.client_manager.image - image = image_client.find_image(parsed_args.image) + image = image_client.find_image( + parsed_args.image, ignore_missing=False + ) output_file = parsed_args.file if output_file is None: output_file = getattr(sys.stdout, "buffer", sys.stdout) - image_client.download_image(image.id, stream=True, output=output_file) + image_client.download_image( + image.id, + stream=True, + output=output_file, + chunk_size=parsed_args.chunk_size, + ) class SetImage(command.Command): _description = _("Set image properties") def get_parser(self, prog_name): - parser = super(SetImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "image", metavar="", @@ -517,21 +600,21 @@ def get_parser(self, prog_name): "--container-format", metavar="", choices=CONTAINER_CHOICES, - help=_("Image container format. The supported options are: %s") % - ', '.join(CONTAINER_CHOICES) + help=_("Image container format. The supported options are: %s") + % ', '.join(CONTAINER_CHOICES), ) parser.add_argument( "--disk-format", metavar="", choices=DISK_CHOICES, - help=_("Image disk format. The supported options are: %s.") % - ', '.join(DISK_CHOICES) + help=_("Image disk format. The supported options are: %s.") + % ', '.join(DISK_CHOICES), ) parser.add_argument( "--size", metavar="", type=int, - help=_("Size of image data (in bytes)") + help=_("Size of image data (in bytes)"), ) protected_group = parser.add_mutually_exclusive_group() protected_group.add_argument( @@ -560,8 +643,10 @@ def get_parser(self, prog_name): dest="properties", metavar="", action=parseractions.KeyValueAction, - help=_("Set a property on this image " - "(repeat option to set multiple properties)"), + help=_( + "Set a property on this image " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( "--store", @@ -593,8 +678,10 @@ def get_parser(self, prog_name): dest='force', action='store_true', default=False, - help=_("Force image change if volume is in use " - "(only meaningful with --volume)"), + help=_( + "Force image change if volume is in use " + "(only meaningful with --volume)" + ), ) parser.add_argument( "--stdin", @@ -619,9 +706,21 @@ def take_action(self, parsed_args): image_client = self.app.client_manager.image kwargs = {} - copy_attrs = ('name', 'owner', 'min_disk', 'min_ram', 'properties', - 'container_format', 'disk_format', 'size', 'store', - 'location', 'copy_from', 'volume', 'checksum') + copy_attrs = ( + 'name', + 'owner', + 'min_disk', + 'min_ram', + 'properties', + 'container_format', + 'disk_format', + 'size', + 'store', + 'location', + 'copy_from', + 'volume', + 'checksum', + ) for attr in copy_attrs: if attr in parsed_args: val = getattr(parsed_args, attr, None) @@ -652,7 +751,9 @@ def take_action(self, parsed_args): # Wrap the call to catch exceptions in order to close files try: - image = image_client.find_image(parsed_args.image) + image = image_client.find_image( + parsed_args.image, ignore_missing=False + ) if not parsed_args.location and not parsed_args.copy_from: if parsed_args.volume: @@ -665,30 +766,40 @@ def take_action(self, parsed_args): source_volume.id, parsed_args.force, parsed_args.image, - (parsed_args.container_format - if parsed_args.container_format - else image.container_format), - (parsed_args.disk_format - if parsed_args.disk_format - else image.disk_format), + ( + parsed_args.container_format + if parsed_args.container_format + else image.container_format + ), + ( + parsed_args.disk_format + if parsed_args.disk_format + else image.disk_format + ), ) elif parsed_args.file: # Send an open file handle to glanceclient so it will # do a chunked transfer - kwargs["data"] = io.open(parsed_args.file, "rb") + kwargs["data"] = open(parsed_args.file, "rb") else: # Read file from stdin if sys.stdin.isatty() is not True: if parsed_args.stdin: - if msvcrt: - msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) + if os.name == "nt": + import msvcrt + + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) # type: ignore if hasattr(sys.stdin, 'buffer'): kwargs['data'] = sys.stdin.buffer else: kwargs["data"] = sys.stdin else: - LOG.warning(_('Use --stdin to enable read image ' - 'data from standard input')) + LOG.warning( + _( + 'Use --stdin to enable read image ' + 'data from standard input' + ) + ) if image.properties and parsed_args.properties: image.properties.update(kwargs['properties']) @@ -697,8 +808,11 @@ def take_action(self, parsed_args): image = image_client.update_image(image.id, **kwargs) finally: # Clean up open files - make sure data isn't a string - if ('data' in kwargs and hasattr(kwargs['data'], 'close') and - kwargs['data'] != sys.stdin): + if ( + 'data' in kwargs + and hasattr(kwargs['data'], 'close') + and kwargs['data'] != sys.stdin + ): kwargs['data'].close() @@ -706,7 +820,7 @@ class ShowImage(command.ShowOne): _description = _("Display image details") def get_parser(self, prog_name): - parser = super(ShowImage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--human-readable", default=False, @@ -722,12 +836,17 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): image_client = self.app.client_manager.image - image = image_client.find_image(parsed_args.image) + image = image_client.find_image( + parsed_args.image, ignore_missing=False + ) + formatters: dict[ + str, type[cliff_columns.FormattableColumn[ty.Any]] + ] = { + 'properties': format_columns.DictColumn, + } if parsed_args.human_readable: - _formatters['size'] = HumanReadableSizeColumn + formatters['size'] = HumanReadableSizeColumn display_columns, columns = _get_columns(image) - _formatters['properties'] = format_columns.DictColumn - data = utils.get_item_properties(image, columns, - formatters=_formatters) + data = utils.get_item_properties(image, columns, formatters=formatters) return (display_columns, data) diff --git a/openstackclient/image/v2/cache.py b/openstackclient/image/v2/cache.py new file mode 100644 index 0000000000..952d9ed01b --- /dev/null +++ b/openstackclient/image/v2/cache.py @@ -0,0 +1,222 @@ +# Copyright 2023 Red Hat. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import datetime +import logging + +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + + +LOG = logging.getLogger(__name__) + + +def _format_image_cache(cached_images): + """Format image cache to make it more consistent with OSC operations.""" + + image_list = [] + for item in cached_images: + if item == "cached_images": + for image in cached_images[item]: + image_obj = copy.deepcopy(image) + image_obj['state'] = 'cached' + image_obj['last_accessed'] = ( + datetime.datetime.fromtimestamp( + image['last_accessed'], tz=datetime.timezone.utc + ) + .replace(tzinfo=None) + .isoformat() + ) + image_obj['last_modified'] = ( + datetime.datetime.fromtimestamp( + image['last_modified'], tz=datetime.timezone.utc + ) + .replace(tzinfo=None) + .isoformat() + ) + image_list.append(image_obj) + elif item == "queued_images": + for image in cached_images[item]: + image = {'image_id': image} + image.update( + { + 'state': 'queued', + 'last_accessed': 'N/A', + 'last_modified': 'N/A', + 'size': 'N/A', + 'hits': 'N/A', + } + ) + image_list.append(image) + return image_list + + +class ListCachedImage(command.Lister): + _description = _("Get Cache State") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + # List of Cache data received + data = _format_image_cache(dict(image_client.get_image_cache())) + columns = [ + 'image_id', + 'state', + 'last_accessed', + 'last_modified', + 'size', + 'hits', + ] + column_headers = [ + "ID", + "State", + "Last Accessed (UTC)", + "Last Modified (UTC)", + "Size", + "Hits", + ] + + return ( + column_headers, + ( + utils.get_dict_properties( + image, + columns, + ) + for image in data + ), + ) + + +class QueueCachedImage(command.Command): + _description = _("Queue image(s) for caching.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "images", + metavar="", + nargs="+", + help=_("Image to display (name or ID)"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + failures = 0 + for image in parsed_args.images: + try: + image_obj = image_client.find_image( + image, + ignore_missing=False, + ) + image_client.queue_image(image_obj.id) + except Exception as e: + failures += 1 + msg = _( + "Failed to queue image with name or ID '%(image)s': %(e)s" + ) + LOG.error(msg, {'image': image, 'e': e}) + + if failures > 0: + total = len(parsed_args.images) + msg = _("Failed to queue %(failures)s of %(total)s images") % { + 'failures': failures, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class DeleteCachedImage(command.Command): + _description = _("Delete image(s) from cache") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "images", + metavar="", + nargs="+", + help=_("Image(s) to delete (name or ID)"), + ) + return parser + + def take_action(self, parsed_args): + failures = 0 + image_client = self.app.client_manager.image + for image in parsed_args.images: + try: + image_obj = image_client.find_image( + image, + ignore_missing=False, + ) + image_client.cache_delete_image(image_obj.id) + except Exception as e: + failures += 1 + msg = _( + "Failed to delete image with name or ID '%(image)s': %(e)s" + ) + LOG.error(msg, {'image': image, 'e': e}) + + if failures > 0: + total = len(parsed_args.images) + msg = _("Failed to delete %(failures)s of %(total)s images.") % { + 'failures': failures, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class ClearCachedImage(command.Command): + _description = _("Clear all images from cache, queue or both") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "--cache", + action="store_const", + const="cache", + default="both", + dest="target", + help=_("Clears all the cached images"), + ) + parser.add_argument( + "--queue", + action="store_const", + const="queue", + default="both", + dest="target", + help=_("Clears all the queued images"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + target = parsed_args.target + try: + image_client.clear_cache(target) + except Exception: + msg = _("Failed to clear image cache") + LOG.error(msg) + raise exceptions.CommandError(msg) diff --git a/openstackclient/image/v2/image.py b/openstackclient/image/v2/image.py index 71dcc73120..cbb6d874de 100644 --- a/openstackclient/image/v2/image.py +++ b/openstackclient/image/v2/image.py @@ -17,30 +17,28 @@ import argparse from base64 import b64encode +import copy import logging import os import sys +import typing as ty +import urllib.parse -from cinderclient import api_versions from openstack import exceptions as sdk_exceptions from openstack.image import image_signer +from openstack import utils as sdk_utils from osc_lib.api import utils as api_utils from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.common import progressbar from openstackclient.i18n import _ from openstackclient.identity import common as identity_common -if os.name == "nt": - import msvcrt -else: - msvcrt = None - - CONTAINER_CHOICES = ["ami", "ari", "aki", "bare", "docker", "ova", "ovf"] DEFAULT_CONTAINER_FORMAT = 'bare' DEFAULT_DISK_FORMAT = 'raw' @@ -57,9 +55,21 @@ "iso", "ploop", ] +# A list of openstacksdk Image object attributes (values) that named +# differently from actual properties stored by Glance (keys). +IMAGE_ATTRIBUTES_CUSTOM_NAMES = { + 'os_hidden': 'is_hidden', + 'protected': 'is_protected', + 'os_hash_algo': 'hash_algo', + 'os_hash_value': 'hash_value', + 'img_config_drive': 'needs_config_drive', + 'os_secure_boot': 'needs_secure_boot', + 'hw_vif_multiqueue_enabled': 'is_hw_vif_multiqueue_enabled', + 'hw_boot_menu': 'is_hw_boot_menu_enabled', + 'auto_disk_config': 'has_auto_disk_config', +} MEMBER_STATUS_CHOICES = ["accepted", "pending", "rejected", "all"] - LOG = logging.getLogger(__name__) @@ -88,6 +98,9 @@ def _format_image(image, human_readable=False): 'virtual_size', 'min_ram', 'schema', + 'is_hidden', + 'hash_algo', + 'hash_value', ] # TODO(gtema/anybody): actually it should be possible to drop this method, @@ -153,8 +166,10 @@ def get_data_from_stdin(): image = sys.stdin if hasattr(sys.stdin, 'buffer'): image = sys.stdin.buffer - if msvcrt: - msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) + if os.name == "nt": + import msvcrt + + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) # type: ignore return image else: @@ -162,6 +177,67 @@ def get_data_from_stdin(): return None +def _add_is_protected_args(parser): + protected_group = parser.add_mutually_exclusive_group() + protected_group.add_argument( + "--protected", + action="store_true", + dest="is_protected", + default=None, + help=_("Prevent image from being deleted"), + ) + protected_group.add_argument( + "--unprotected", + action="store_false", + dest="is_protected", + default=None, + help=_("Allow image to be deleted (default)"), + ) + + +def _add_visibility_args(parser): + public_group = parser.add_mutually_exclusive_group() + public_group.add_argument( + "--public", + action="store_const", + const="public", + dest="visibility", + help=_("Image is accessible and visible to all users"), + ) + public_group.add_argument( + "--private", + action="store_const", + const="private", + dest="visibility", + help=_( + "Image is only accessible by the owner " + "(default until --os-image-api-version 2.5)" + ), + ) + public_group.add_argument( + "--community", + action="store_const", + const="community", + dest="visibility", + help=_( + "Image is accessible by all users but does not appear in the " + "default image list of any user except the owner " + "(requires --os-image-api-version 2.5 or later)" + ), + ) + public_group.add_argument( + "--shared", + action="store_const", + const="shared", + dest="visibility", + help=_( + "Image is only accessible by the owner and image members " + "(requires --os-image-api-version 2.5 or later) " + "(default since --os-image-api-version 2.5)" + ), + ) + + class AddProjectToImage(command.ShowOne): _description = _("Associate project with image") @@ -297,8 +373,7 @@ def get_parser(self, prog_name): action="store_true", default=False, help=_( - "Show upload progress bar " - "(ignored if passing data via stdin)" + "Show upload progress bar (ignored if passing data via stdin)" ), ) parser.add_argument( @@ -321,50 +396,8 @@ def get_parser(self, prog_name): "Only use in combination with --sign-key-path" ), ) - protected_group = parser.add_mutually_exclusive_group() - protected_group.add_argument( - "--protected", - action="store_true", - dest="is_protected", - default=None, - help=_("Prevent image from being deleted"), - ) - protected_group.add_argument( - "--unprotected", - action="store_false", - dest="is_protected", - default=None, - help=_("Allow image to be deleted (default)"), - ) - public_group = parser.add_mutually_exclusive_group() - public_group.add_argument( - "--public", - action="store_const", - const="public", - dest="visibility", - help=_("Image is accessible to the public"), - ) - public_group.add_argument( - "--private", - action="store_const", - const="private", - dest="visibility", - help=_("Image is inaccessible to the public (default)"), - ) - public_group.add_argument( - "--community", - action="store_const", - const="community", - dest="visibility", - help=_("Image is accessible to the community"), - ) - public_group.add_argument( - "--shared", - action="store_const", - const="shared", - dest="visibility", - help=_("Image can be shared"), - ) + _add_is_protected_args(parser) + _add_visibility_args(parser) parser.add_argument( "--property", dest="properties", @@ -381,8 +414,7 @@ def get_parser(self, prog_name): metavar="", action='append', help=_( - "Set a tag on this image " - "(repeat option to set multiple tags)" + "Set a tag on this image (repeat option to set multiple tags)" ), ) parser.add_argument( @@ -401,8 +433,8 @@ def get_parser(self, prog_name): identity_common.add_project_domain_option_to_parser(parser) for deadopt in self.deadopts: parser.add_argument( - "--%s" % deadopt, - metavar="<%s>" % deadopt, + f"--{deadopt}", + metavar=f"<{deadopt}>", dest=deadopt.replace('-', '_'), help=argparse.SUPPRESS, ) @@ -414,7 +446,7 @@ def _take_action_image(self, parsed_args): # Build an attribute dict from the parsed args, only include # attributes that were actually set on the command line - kwargs = {'allow_duplicates': True} + kwargs: dict[str, ty.Any] = {'allow_duplicates': True} copy_attrs = ( 'name', 'id', @@ -468,7 +500,7 @@ def _take_action_image(self, parsed_args): fp = open(parsed_args.filename, 'rb') except FileNotFoundError: raise exceptions.CommandError( - '%r is not a valid file' % parsed_args.filename, + f'{parsed_args.filename!r} is not a valid file', ) else: fp = get_data_from_stdin() @@ -506,8 +538,8 @@ def _take_action_image(self, parsed_args): raise exceptions.CommandError(msg) if ( - len(parsed_args.sign_key_path) < 1 or - len(parsed_args.sign_cert_id) < 1 + len(parsed_args.sign_key_path) < 1 + or len(parsed_args.sign_cert_id) < 1 ): msg = _( "'sign-key-path' and 'sign-cert-id' must both be " @@ -519,7 +551,7 @@ def _take_action_image(self, parsed_args): sign_cert_id = parsed_args.sign_cert_id signer = image_signer.ImageSigner() try: - pw = utils.get_password( + pw: str | None = utils.get_password( self.app.stdin, prompt=( "Please enter private key password, leave " @@ -530,12 +562,11 @@ def _take_action_image(self, parsed_args): if not pw or len(pw) < 1: pw = None - else: - # load_private_key() requires the password to be - # passed as bytes - pw = pw.encode() - signer.load_private_key(sign_key_path, password=pw) + signer.load_private_key( + sign_key_path, + password=pw.encode() if pw else None, + ) except Exception: msg = _( "Error during sign operation: private key " @@ -556,10 +587,13 @@ def _take_action_image(self, parsed_args): if parsed_args.filename: fp.close() + # NOTE(pas-ha): create_image returns the image object as it was created + # before the data was uploaded, need a refresh to show the final state + image = image_client.get_image(image) return _format_image(image) def _take_action_volume(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume unsupported_opts = { # 'name', # 'name' is a positional argument and will always exist @@ -590,16 +624,15 @@ def _take_action_volume(self, parsed_args): # version LOG.warning(msg % opt_name) - source_volume = utils.find_resource( - volume_client.volumes, - parsed_args.volume, + source_volume = volume_client.find_volume( + parsed_args.volume, ignore_missing=False ) - kwargs = {} - if volume_client.api_version < api_versions.APIVersion('3.1'): - if ( - parsed_args.visibility or - parsed_args.is_protected is not None - ): + kwargs: dict[str, ty.Any] = { + 'visibility': None, + 'protected': None, + } + if not sdk_utils.supports_microversion(volume_client, '3.1'): + if parsed_args.visibility or parsed_args.is_protected is not None: msg = _( '--os-volume-api-version 3.1 or greater is required ' 'to support the --public, --private, --community, ' @@ -607,20 +640,18 @@ def _take_action_volume(self, parsed_args): ) raise exceptions.CommandError(msg) else: - kwargs.update( - visibility=parsed_args.visibility or 'private', - protected=parsed_args.is_protected or False, - ) + kwargs['visibility'] = parsed_args.visibility or 'private' + kwargs['protected'] = parsed_args.is_protected or False - response, body = volume_client.volumes.upload_to_image( + response = volume_client.upload_volume_to_image( source_volume.id, - parsed_args.force, parsed_args.name, - parsed_args.container_format, - parsed_args.disk_format, - **kwargs + force=parsed_args.force, + disk_format=parsed_args.disk_format, + container_format=parsed_args.container_format, + **kwargs, ) - info = body['os-volume_upload_image'] + info = copy.deepcopy(response) try: info['volume_type'] = info['volume_type']['name'] except TypeError: @@ -656,11 +687,17 @@ def get_parser(self, prog_name): nargs="+", help=_("Image(s) to delete (name or ID)"), ) + parser.add_argument( + '--store', + metavar='', + # default=None, + dest='store', + help=_('Store to delete image(s) from.'), + ) return parser def take_action(self, parsed_args): - - del_result = 0 + result = 0 image_client = self.app.client_manager.image for image in parsed_args.images: try: @@ -668,19 +705,25 @@ def take_action(self, parsed_args): image, ignore_missing=False, ) - image_client.delete_image(image_obj.id) + image_client.delete_image( + image_obj.id, + store=parsed_args.store, + ignore_missing=False, + ) + except sdk_exceptions.ResourceNotFound: + msg = _("Multi Backend support not enabled.") + raise exceptions.CommandError(msg) except Exception as e: - del_result += 1 + result += 1 msg = _( - "Failed to delete image with name or " - "ID '%(image)s': %(e)s" + "Failed to delete image with name or ID '%(image)s': %(e)s" ) LOG.error(msg, {'image': image, 'e': e}) total = len(parsed_args.images) - if del_result > 0: - msg = _("Failed to delete %(dresult)s of %(total)s images.") % { - 'dresult': del_result, + if result > 0: + msg = _("Failed to delete %(result)s of %(total)s images.") % { + 'result': result, 'total': total, } raise exceptions.CommandError(msg) @@ -711,14 +754,20 @@ def get_parser(self, prog_name): action="store_const", const="community", dest="visibility", - help=_("List only community images"), + help=_( + "List only community images " + "(requires --os-image-api-version 2.5 or later)" + ), ) public_group.add_argument( "--shared", action="store_const", const="shared", dest="visibility", - help=_("List only shared images"), + help=_( + "List only shared images " + "(requires --os-image-api-version 2.5 or later)" + ), ) public_group.add_argument( "--all", @@ -791,9 +840,9 @@ def get_parser(self, prog_name): default=False, help=_('List additional fields in output'), ) - # --page-size has never worked, leave here for silent compatibility # We'll implement limit/marker differently later + # TODO(stephenfin): Remove this in the next major version bump parser.add_argument( "--page-size", metavar="", @@ -809,22 +858,7 @@ def get_parser(self, prog_name): "specified separated by comma" ), ) - parser.add_argument( - "--limit", - metavar="", - type=int, - help=_("Maximum number of images to display."), - ) - parser.add_argument( - '--marker', - metavar='', - default=None, - help=_( - "The last image of the previous page. Display " - "list of images after marker. Display all images if not " - "specified. (name or ID)" - ), - ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): @@ -860,7 +894,7 @@ def take_action(self, parsed_args): if parsed_args.is_hidden: kwargs['is_hidden'] = parsed_args.is_hidden if parsed_args.long: - columns = ( + columns: tuple[str, ...] = ( 'ID', 'Name', 'Disk Format', @@ -871,9 +905,11 @@ def take_action(self, parsed_args): 'visibility', 'is_protected', 'owner_id', + 'hash_algo', + 'hash_value', 'tags', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Name', 'Disk Format', @@ -884,6 +920,8 @@ def take_action(self, parsed_args): 'Visibility', 'Protected', 'Project', + 'Hash Algorithm', + 'Hash Value', 'Tags', ) else: @@ -894,18 +932,19 @@ def take_action(self, parsed_args): if 'limit' in kwargs: # Disable automatic pagination in SDK kwargs['paginated'] = False - data = list(image_client.images(**kwargs)) + + images = list(image_client.images(**kwargs)) if parsed_args.property: for attr, value in parsed_args.property.items(): api_utils.simple_filter( - data, + images, attr=attr, value=value, property_field='properties', ) - data = utils.sort_items(data, parsed_args.sort, str) + data = utils.sort_items(images, parsed_args.sort, str) return ( column_headers, @@ -935,7 +974,7 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): image_client = self.app.client_manager.image - columns = ("Image ID", "Member ID", "Status") + columns: tuple[str, ...] = ("Image ID", "Member ID", "Status") image_id = image_client.find_image( parsed_args.image, @@ -992,11 +1031,58 @@ def take_action(self, parsed_args): image_client.remove_member(member=project_id, image=image.id) +class ShowProjectImage(command.ShowOne): + _description = _("Show a particular project associated with image") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "image", + metavar="", + help=_("Image (name or ID)"), + ) + parser.add_argument( + "member", + metavar="", + help=_("Project to show (name or ID)"), + ) + identity_common.add_project_domain_option_to_parser(parser) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + image = image_client.find_image( + parsed_args.image, + ignore_missing=False, + ) + + obj = image_client.get_member( + image=image.id, + member=parsed_args.member, + ) + + display_columns, columns = _get_member_columns(obj) + data = utils.get_item_properties(obj, columns, formatters={}) + + return (display_columns, data) + + class SaveImage(command.Command): _description = _("Save an image locally") def get_parser(self, prog_name): parser = super().get_parser(prog_name) + parser.add_argument( + "--chunk-size", + type=int, + default=1024, + metavar="", + help=_( + "Size in bytes to read from the wire and buffer at one " + "time (default: 1024)" + ), + ) parser.add_argument( "--file", metavar="", @@ -1021,7 +1107,12 @@ def take_action(self, parsed_args): if output_file is None: output_file = getattr(sys.stdout, "buffer", sys.stdout) - image_client.download_image(image.id, stream=True, output=output_file) + image_client.download_image( + image.id, + stream=True, + output=output_file, + chunk_size=parsed_args.chunk_size, + ) class SetImage(command.Command): @@ -1073,50 +1164,8 @@ def get_parser(self, prog_name): help=_("Image disk format. The supported options are: %s") % ', '.join(DISK_CHOICES), ) - protected_group = parser.add_mutually_exclusive_group() - protected_group.add_argument( - "--protected", - action="store_true", - dest="is_protected", - default=None, - help=_("Prevent image from being deleted"), - ) - protected_group.add_argument( - "--unprotected", - action="store_false", - dest="is_protected", - default=None, - help=_("Allow image to be deleted (default)"), - ) - public_group = parser.add_mutually_exclusive_group() - public_group.add_argument( - "--public", - action="store_const", - const="public", - dest="visibility", - help=_("Image is accessible to the public"), - ) - public_group.add_argument( - "--private", - action="store_const", - const="private", - dest="visibility", - help=_("Image is inaccessible to the public (default)"), - ) - public_group.add_argument( - "--community", - action="store_const", - const="community", - dest="visibility", - help=_("Image is accessible to the community"), - ) - public_group.add_argument( - "--shared", - action="store_const", - const="shared", - dest="visibility", - help=_("Image can be shared"), - ) + _add_is_protected_args(parser) + _add_visibility_args(parser) parser.add_argument( "--property", dest="properties", @@ -1134,8 +1183,7 @@ def get_parser(self, prog_name): default=None, action='append', help=_( - "Set a tag on this image " - "(repeat option to set multiple tags)" + "Set a tag on this image (repeat option to set multiple tags)" ), ) parser.add_argument( @@ -1193,8 +1241,8 @@ def get_parser(self, prog_name): identity_common.add_project_domain_option_to_parser(parser) for deadopt in self.deadopts: parser.add_argument( - "--%s" % deadopt, - metavar="<%s>" % deadopt, + f"--{deadopt}", + metavar=f"<{deadopt}>", dest=f"dead_{deadopt.replace('-', '_')}", help=argparse.SUPPRESS, ) @@ -1345,7 +1393,10 @@ def take_action(self, parsed_args): if parsed_args.visibility is not None: kwargs['visibility'] = parsed_args.visibility - if parsed_args.project: + # Only set owner_id if --project is used WITHOUT membership flags + # When --project is used with --accept/--reject/--pending, it should + # only identify which member's status to update, not change ownership + if parsed_args.project and not parsed_args.membership: # We already did the project lookup above kwargs['owner_id'] = project_id @@ -1438,7 +1489,6 @@ def take_action(self, parsed_args): ignore_missing=False, ) - kwargs = {} tagret = 0 propret = 0 if parsed_args.tags: @@ -1447,10 +1497,11 @@ def take_action(self, parsed_args): image_client.remove_tag(image.id, k) except Exception: LOG.error( - _("tag unset failed, '%s' is a " "nonexistent tag "), k + _("tag unset failed, '%s' is a nonexistent tag "), k ) tagret += 1 + kwargs: dict[str, ty.Any] = {} if parsed_args.properties: for k in parsed_args.properties: if k in image: @@ -1465,6 +1516,11 @@ def take_action(self, parsed_args): ) new_props.pop(k, None) kwargs['properties'] = new_props + elif ( + k in IMAGE_ATTRIBUTES_CUSTOM_NAMES + and IMAGE_ATTRIBUTES_CUSTOM_NAMES[k] in image + ): + delattr(image, IMAGE_ATTRIBUTES_CUSTOM_NAMES[k]) else: LOG.error( _( @@ -1500,7 +1556,7 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) elif propret > 0: msg = _( - "Failed to unset %(propret)s of %(proptotal)s" " properties." + "Failed to unset %(propret)s of %(proptotal)s properties." ) % {'propret': propret, 'proptotal': proptotal} raise exceptions.CommandError(msg) @@ -1532,8 +1588,7 @@ def get_parser(self, prog_name): action='store_true', default=False, help=_( - 'Show upload progress bar ' - '(ignored if passing data via stdin)' + 'Show upload progress bar (ignored if passing data via stdin)' ), ) parser.add_argument( @@ -1559,12 +1614,12 @@ def take_action(self, parsed_args): fp = open(parsed_args.filename, 'rb') except FileNotFoundError: raise exceptions.CommandError( - '%r is not a valid file' % parsed_args.filename, + f'{parsed_args.filename!r} is not a valid file', ) else: fp = get_data_from_stdin() - kwargs = {} + kwargs: dict[str, ty.Any] = {} if parsed_args.progress and parsed_args.filename: # NOTE(stephenfin): we only show a progress bar if the user @@ -1598,8 +1653,6 @@ def get_parser(self, prog_name): metavar='', help=_('Image to initiate import process for (name or ID)'), ) - # TODO(stephenfin): Uncomment help text when we have this command - # implemented parser.add_argument( '--method', metavar='', @@ -1614,8 +1667,6 @@ def get_parser(self, prog_name): help=_( "Import method used for image import process. " "Not all deployments will support all methods. " - # "Valid values can be retrieved with the 'image import " - # "methods' command. " "The 'glance-direct' method (default) requires images be " "first staged using the 'image-stage' command." ), @@ -1674,7 +1725,8 @@ def get_parser(self, prog_name): "'copy-image' import method)" ), ) - parser.add_argument( + allow_failure_group = parser.add_mutually_exclusive_group() + allow_failure_group.add_argument( '--allow-failure', action='store_true', dest='allow_failure', @@ -1685,9 +1737,9 @@ def get_parser(self, prog_name): 'Only usable with --stores or --all-stores' ), ) - parser.add_argument( + allow_failure_group.add_argument( '--disallow-failure', - action='store_true', + action='store_false', dest='allow_failure', default=True, help=_( @@ -1718,11 +1770,15 @@ def take_action(self, parsed_args): if parsed_args.import_method not in import_methods: msg = _( - "The '%s' import method is not supported by this deployment. " - "Supported: %s" + "The '%(method)s' import method is not supported by this " + "deployment. Supported: %(supported)s" ) raise exceptions.CommandError( - msg % (parsed_args.import_method, ', '.join(import_methods)), + msg + % { + 'method': parsed_args.import_method, + 'supported': ', '.join(import_methods), + }, ) if parsed_args.import_method == 'web-download': @@ -1732,6 +1788,12 @@ def take_action(self, parsed_args): "'--method=web-download'" ) raise exceptions.CommandError(msg) + _parsed = urllib.parse.urlparse(parsed_args.uri) + if not all({_parsed.scheme, _parsed.netloc}): + msg = _("'%(uri)s' is not a valid url") + raise exceptions.CommandError( + msg % {'uri': parsed_args.uri}, + ) else: if parsed_args.uri: msg = _( @@ -1777,7 +1839,9 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) - image = image_client.find_image(parsed_args.image) + image = image_client.find_image( + parsed_args.image, ignore_missing=False + ) if not image.container_format and not image.disk_format: msg = _( @@ -1813,7 +1877,7 @@ def take_action(self, parsed_args): method=parsed_args.import_method, uri=parsed_args.uri, remote_region=parsed_args.remote_region, - remote_image=parsed_args.remote_image, + remote_image_id=parsed_args.remote_image, remote_service_interface=parsed_args.remote_service_interface, stores=parsed_args.stores, all_stores=parsed_args.all_stores, @@ -1822,3 +1886,48 @@ def take_action(self, parsed_args): info = _format_image(image) return zip(*sorted(info.items())) + + +class StoresInfo(command.Lister): + _description = _( + "Get available backends (only valid with Multi-Backend support)" + ) + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "--detail", + action='store_true', + default=None, + help=_( + 'Shows details of stores (admin only) ' + '(requires --os-image-api-version 2.15 or later)' + ), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + try: + columns: tuple[str, ...] = ("id", "description", "is_default") + column_headers: tuple[str, ...] = ("ID", "Description", "Default") + if parsed_args.detail: + columns += ("properties",) + column_headers += ("Properties",) + + data = list(image_client.stores(details=parsed_args.detail)) + except sdk_exceptions.ResourceNotFound: + msg = _('Multi Backend support not enabled') + raise exceptions.CommandError(msg) + else: + return ( + column_headers, + ( + utils.get_item_properties( + store, + columns, + formatters=_formatters, + ) + for store in data + ), + ) diff --git a/openstackclient/image/v2/info.py b/openstackclient/image/v2/info.py new file mode 100644 index 0000000000..68848136ea --- /dev/null +++ b/openstackclient/image/v2/info.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from osc_lib.cli import format_columns + +from openstackclient import command +from openstackclient.i18n import _ + + +class ImportInfo(command.ShowOne): + _description = _("Show available import methods") + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + import_info = image_client.get_import_info() + import_methods = import_info.import_methods or {} + return ( + ('import-methods',), + (format_columns.ListColumn(import_methods.get('value', [])),), + ) diff --git a/openstackclient/image/v2/metadef_namespaces.py b/openstackclient/image/v2/metadef_namespaces.py index f09f200249..af30f718f9 100644 --- a/openstackclient/image/v2/metadef_namespaces.py +++ b/openstackclient/image/v2/metadef_namespaces.py @@ -18,10 +18,10 @@ import logging from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ _formatters = { @@ -42,6 +42,7 @@ def _format_namespace(namespace): 'owner', 'protected', 'schema', + 'updated_at', 'visibility', ] @@ -52,15 +53,16 @@ def _format_namespace(namespace): if key in fields_to_show: info[key] = namespace.get(key) elif key == "resource_type_associations": - info[key] = [resource_type['name'] - for resource_type in namespace.get(key)] + info[key] = [ + resource_type['name'] for resource_type in namespace.get(key) + ] elif key == 'properties': info['properties'] = list(namespace.get(key).keys()) return info -class CreateMetadefNameSpace(command.ShowOne): +class CreateMetadefNamespace(command.ShowOne): _description = _("Create a metadef namespace") def get_parser(self, prog_name): @@ -114,11 +116,7 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): image_client = self.app.client_manager.image - filter_keys = [ - 'namespace', - 'display_name', - 'description' - ] + filter_keys = ['namespace', 'display_name', 'description'] kwargs = {} for key in filter_keys: @@ -133,20 +131,21 @@ def take_action(self, parsed_args): kwargs['visibility'] = parsed_args.visibility data = image_client.create_metadef_namespace(**kwargs) + info = _format_namespace(data) - return zip(*sorted(data.items())) + return zip(*sorted(info.items())) -class DeleteMetadefNameSpace(command.Command): +class DeleteMetadefNamespace(command.Command): _description = _("Delete metadef namespace") def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( - "namespace_name", - metavar="", + "namespace", + metavar="", nargs="+", - help=_("An identifier (a name) for the namespace"), + help=_("Metadef namespace(s) to delete (name)"), ) return parser @@ -154,25 +153,30 @@ def take_action(self, parsed_args): image_client = self.app.client_manager.image result = 0 - for i in parsed_args.namespace_name: + for ns in parsed_args.namespace: try: - namespace = image_client.get_metadef_namespace(i) + namespace = image_client.get_metadef_namespace(ns) image_client.delete_metadef_namespace(namespace.id) except Exception as e: result += 1 - LOG.error(_("Failed to delete namespace with name or " - "ID '%(namespace)s': %(e)s"), - {'namespace': i, 'e': e} - ) + LOG.error( + _( + "Failed to delete namespace with name or " + "ID '%(namespace)s': %(e)s" + ), + {'namespace': ns, 'e': e}, + ) if result > 0: - total = len(parsed_args.namespace_name) - msg = (_("%(result)s of %(total)s namespace failed " - "to delete.") % {'result': result, 'total': total}) + total = len(parsed_args.namespace) + msg = _("%(result)s of %(total)s namespace failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) -class ListMetadefNameSpaces(command.Lister): +class ListMetadefNamespace(command.Lister): _description = _("List metadef namespaces") def get_parser(self, prog_name): @@ -203,15 +207,18 @@ def take_action(self, parsed_args): column_headers = columns return ( column_headers, - (utils.get_item_properties( - s, - columns, - formatters=_formatters, - ) for s in data) + ( + utils.get_item_properties( + s, + columns, + formatters=_formatters, + ) + for s in data + ), ) -class SetMetadefNameSpace(command.Command): +class SetMetadefNamespace(command.Command): _description = _("Set metadef namespace properties") def get_parser(self, prog_name): @@ -219,7 +226,7 @@ def get_parser(self, prog_name): parser.add_argument( "namespace", metavar="", - help=_("Namespace (name) for the namespace"), + help=_("Metadef namespace to modify (name)"), ) parser.add_argument( "--display-name", @@ -237,14 +244,16 @@ def get_parser(self, prog_name): action="store_const", const="public", dest="visibility", - help=_("Set namespace visibility 'public'"), + help=_("Metadef namespace is accessible to the public"), ) visibility_group.add_argument( "--private", action="store_const", const="private", dest="visibility", - help=_("Set namespace visibility 'private'"), + help=_( + "Metadef namespace is inaccessible to the public (default)" + ), ) protected_group = parser.add_mutually_exclusive_group() protected_group.add_argument( @@ -268,11 +277,7 @@ def take_action(self, parsed_args): namespace = parsed_args.namespace - filter_keys = [ - 'namespace', - 'display_name', - 'description' - ] + filter_keys = ['namespace', 'display_name', 'description'] kwargs = {} for key in filter_keys: @@ -289,24 +294,24 @@ def take_action(self, parsed_args): image_client.update_metadef_namespace(namespace, **kwargs) -class ShowMetadefNameSpace(command.ShowOne): +class ShowMetadefNamespace(command.ShowOne): _description = _("Show a metadef namespace") def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( - "namespace_name", - metavar="", - help=_("Namespace (name) for the namespace"), + "namespace", + metavar="", + help=_("Metadef namespace to show (name)"), ) return parser def take_action(self, parsed_args): image_client = self.app.client_manager.image - namespace_name = parsed_args.namespace_name + namespace = parsed_args.namespace - data = image_client.get_metadef_namespace(namespace_name) + data = image_client.get_metadef_namespace(namespace) info = _format_namespace(data) return zip(*sorted(info.items())) diff --git a/openstackclient/image/v2/metadef_objects.py b/openstackclient/image/v2/metadef_objects.py new file mode 100644 index 0000000000..d5cbec1cd9 --- /dev/null +++ b/openstackclient/image/v2/metadef_objects.py @@ -0,0 +1,277 @@ +# Copyright 2023 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Image V2 Action Implementations""" + +import logging + +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + + +LOG = logging.getLogger(__name__) + + +def _format_object(md_object): + fields_to_show = ( + 'created_at', + 'description', + 'name', + 'namespace_name', + 'properties', + 'required', + 'updated_at', + ) + + return ( + fields_to_show, + utils.get_item_properties( + md_object, + fields_to_show, + ), + ) + + +class CreateMetadefObjects(command.ShowOne): + _description = _("Create a metadef object") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "--namespace", + metavar="", + help=_("Metadef namespace to create the object in (name)"), + ) + parser.add_argument( + "name", + metavar='', + help=_('New metadef object name'), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + namespace = image_client.get_metadef_namespace( + parsed_args.namespace, + ) + data = image_client.create_metadef_object( + namespace=namespace.namespace, + name=parsed_args.name, + ) + + fields, value = _format_object(data) + + return fields, value + + +class ShowMetadefObjects(command.ShowOne): + _description = _("Show a particular metadef object") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Metadef namespace of the object (name)"), + ) + parser.add_argument( + "object", + metavar="", + help=_("Metadef object to show"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + namespace = parsed_args.namespace + object = parsed_args.object + + data = image_client.get_metadef_object(object, namespace) + + fields, value = _format_object(data) + + return fields, value + + +class DeleteMetadefObject(command.Command): + _description = _("Delete metadata definitions object(s)") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Metadef namespace of the object (name)"), + ) + parser.add_argument( + "objects", + metavar="", + nargs="*", + help=_( + "Metadef object(s) to delete (name) " + "(omit this argument to delete all objects in the namespace)" + ), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + namespace = parsed_args.namespace + + if not parsed_args.objects: + return image_client.delete_all_metadef_objects(namespace) + + result = 0 + for obj in parsed_args.objects: + try: + object = image_client.get_metadef_object(obj, namespace) + image_client.delete_metadef_object(object, namespace) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete object with name or " + "ID '%(object)s': %(e)s" + ), + {'object': obj, 'e': e}, + ) + + if result > 0: + total = len(parsed_args.namespace) + msg = _("%(result)s of %(total)s object failed to delete.") % { + 'result': result, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class ListMetadefObjects(command.Lister): + _description = _("List metadef objects inside a specific namespace.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Namespace (name) for the namespace"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + namespace = parsed_args.namespace + columns = ['name', 'description'] + + md_objects = list(image_client.metadef_objects(namespace)) + column_headers = columns + return ( + column_headers, + ( + utils.get_item_properties( + md_object, + columns, + ) + for md_object in md_objects + ), + ) + + +class SetMetadefObject(command.Command): + _description = _("Update a metadef object") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Metadef namespace name"), + ) + parser.add_argument( + "object", + metavar="", + help=_('Metadef object to be updated'), + ) + parser.add_argument( + "--name", + help=_("New name of the object"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + object = image_client.get_metadef_object( + parsed_args.object, parsed_args.namespace + ) + kwargs = {} + if parsed_args.name: + kwargs['name'] = parsed_args.name + + image_client.update_metadef_object( + object, parsed_args.namespace, **kwargs + ) + + +class ShowMetadefObjectProperty(command.ShowOne): + _description = _( + "Describe a specific metadata definitions property inside an object." + ) + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Namespace (name) for the namespace"), + ) + parser.add_argument( + "object", + metavar="", + help=_("Name of an object."), + ) + parser.add_argument( + "property", + help=_("Name of the property."), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + namespace_name = parsed_args.namespace + object_name = parsed_args.object + + obj = image_client.get_metadef_object(object_name, namespace_name) + try: + prop = obj['properties'][parsed_args.property] + prop['name'] = parsed_args.property + + except KeyError: + msg = _( + 'Property %(property)s not found in object %(object)s.' + ) % { + 'property': parsed_args.property, + 'object': parsed_args.object, + } + raise exceptions.CommandError(msg) + + return zip(*sorted(prop.items())) diff --git a/openstackclient/image/v2/metadef_properties.py b/openstackclient/image/v2/metadef_properties.py new file mode 100644 index 0000000000..3a923c5226 --- /dev/null +++ b/openstackclient/image/v2/metadef_properties.py @@ -0,0 +1,292 @@ +# Copyright 2023 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import logging + +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + + +LOG = logging.getLogger(__name__) + + +def _format_property(prop): + prop = prop.to_dict(ignore_none=True, original_names=True) + return { + key: prop[key] + for key in [ + 'namespace_name', + 'name', + 'type', + 'title', + 'description', + 'operators', + 'default', + 'is_readonly', + 'minimum', + 'maximum', + 'enum', + 'pattern', + 'min_length', + 'max_length', + 'items', + 'require_unique_items', + 'min_items', + 'max_items', + 'allow_additional_items', + ] + if key in prop + } + + +class CreateMetadefProperty(command.ShowOne): + _description = _("Create a metadef property") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "--name", + required=True, + help=_("Internal name of the property"), + ) + parser.add_argument( + "--title", + required=True, + help=_("Property name displayed to the user"), + ) + parser.add_argument( + "--type", + required=True, + help=_("Property type"), + ) + parser.add_argument( + "--schema", + required=True, + help=_("Valid JSON schema of the property"), + ) + parser.add_argument( + "namespace", + help=_("Name of namespace the property will belong."), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + kwargs = { + 'name': parsed_args.name, + 'title': parsed_args.title, + 'type': parsed_args.type, + } + try: + kwargs.update(json.loads(parsed_args.schema)) + except json.JSONDecodeError as e: + raise exceptions.CommandError( + _("Failed to load JSON schema: %(e)s") + % { + 'e': e, + } + ) + + data = image_client.create_metadef_property( + parsed_args.namespace, **kwargs + ) + info = _format_property(data) + + return zip(*sorted(info.items())) + + +class DeleteMetadefProperty(command.Command): + _description = _("Delete metadef propert(ies)") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Metadef namespace of the property (name)"), + ) + parser.add_argument( + "properties", + metavar="", + nargs="*", + help=_( + "Metadef properties to delete (name) " + "(omit this argument to delete all properties " + "in the namespace)" + ), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + if not parsed_args.properties: + image_client.delete_all_metadef_properties(parsed_args.namespace) + return + + result = 0 + for prop in parsed_args.properties: + try: + image_client.delete_metadef_property( + prop, + parsed_args.namespace, + ignore_missing=False, + ) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete property with name or ID " + "'%(property)s' from namespace '%(namespace)s': %(e)s" + ), + { + 'property': prop, + 'namespace': parsed_args.namespace, + 'e': e, + }, + ) + + if result > 0: + total = len(parsed_args.namespace) + msg = _("%(result)s of %(total)s properties failed to delete.") % { + 'result': result, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class ListMetadefProperties(command.Lister): + _description = _("List metadef properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("An identifier (a name) for the namespace"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + props = image_client.metadef_properties(parsed_args.namespace) + columns = ['name', 'title', 'type'] + return ( + columns, + ( + utils.get_item_properties( + prop, + columns, + ) + for prop in props + ), + ) + + +class SetMetadefProperty(command.Command): + _description = _("Update metadef namespace property") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "--name", + help=_("Internal name of the property"), + ) + parser.add_argument( + "--title", + help=_("Property name displayed to the user"), + ) + parser.add_argument( + "--type", + help=_("Property type"), + ) + parser.add_argument( + "--schema", + help=_("Valid JSON schema of the property"), + ) + parser.add_argument( + "namespace", + help=_("Namespace of the namespace to which the property belongs"), + ) + parser.add_argument( + "property", + help=_("Property to update"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + # We need to pass the values for *all* attributes as kwargs to + # update_metadef_property(), otherwise the attributes that are not + # listed will be reset. + data = image_client.get_metadef_property( + parsed_args.property, + parsed_args.namespace, + ) + kwargs = _format_property(data) + for key in ['name', 'title', 'type']: + argument = getattr(parsed_args, key, None) + if argument is not None: + kwargs[key] = argument + + if parsed_args.schema: + try: + kwargs.update(json.loads(parsed_args.schema)) + except json.JSONDecodeError as e: + raise exceptions.CommandError( + _("Failed to load JSON schema: %(e)s") + % { + 'e': e, + } + ) + + image_client.update_metadef_property( + parsed_args.property, + parsed_args.namespace, + **kwargs, + ) + + +class ShowMetadefProperty(command.ShowOne): + _description = _("Show a particular metadef property") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_("Metadef namespace of the property (name)"), + ) + parser.add_argument( + "property", + metavar="", + help=_("Property to show"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + data = image_client.get_metadef_property( + parsed_args.property, + parsed_args.namespace, + ) + info = _format_property(data) + + return zip(*sorted(info.items())) diff --git a/openstackclient/image/v2/metadef_resource_type_association.py b/openstackclient/image/v2/metadef_resource_type_association.py new file mode 100644 index 0000000000..4d3ee46689 --- /dev/null +++ b/openstackclient/image/v2/metadef_resource_type_association.py @@ -0,0 +1,188 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + +LOG = logging.getLogger(__name__) + + +def _get_columns(item): + hidden_columns = ['location'] + return utils.get_osc_show_columns_for_sdk_resource( + item, {}, hidden_columns + ) + + +class CreateMetadefResourceTypeAssociation(command.ShowOne): + _description = _("Create metadef resource type association") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "namespace", + metavar="", + help=_( + "The name of the namespace you want to create the " + "resource type association in" + ), + ) + parser.add_argument( + "name", + metavar="", + help=_("A name of the new resource type"), + ) + parser.add_argument( + "--properties-target", + metavar="", + help=_( + "Some resource types allow more than one " + "key/value pair per instance." + ), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + kwargs = {} + + kwargs['namespace'] = parsed_args.namespace + kwargs['name'] = parsed_args.name + + if parsed_args.properties_target: + kwargs['properties_target'] = parsed_args.properties_target + + obj = image_client.create_metadef_resource_type_association( + parsed_args.namespace, **kwargs + ) + + display_columns, columns = _get_columns(obj) + data = utils.get_item_properties(obj, columns, formatters={}) + + return (display_columns, data) + + +class DeleteMetadefResourceTypeAssociation(command.Command): + _description = _("Delete metadef resource type association") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "metadef_namespace", + metavar="", + help=_("The name of the namespace whose details you want to see"), + ) + parser.add_argument( + "name", + metavar="", + nargs="+", + help=_( + "The name of the resource type(s) (repeat option to delete" + "multiple metadef resource type associations)" + ), + ) + parser.add_argument( + "--force", + dest='force', + action='store_true', + default=False, + help=_( + "Force delete the resource type association if the" + "namespace is protected" + ), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + + result = 0 + for resource_type in parsed_args.name: + try: + metadef_namespace = image_client.get_metadef_namespace( + parsed_args.metadef_namespace + ) + + kwargs = {} + is_initially_protected = ( + True if metadef_namespace.is_protected else False + ) + if is_initially_protected and parsed_args.force: + kwargs['is_protected'] = False + + image_client.update_metadef_namespace( + metadef_namespace.namespace, **kwargs + ) + + try: + image_client.delete_metadef_resource_type_association( + resource_type, metadef_namespace, ignore_missing=False + ) + finally: + if is_initially_protected: + kwargs['is_protected'] = True + image_client.update_metadef_namespace( + metadef_namespace.namespace, **kwargs + ) + + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete resource type with name or " + "ID '%(resource_type)s': %(e)s" + ), + {'resource_type': resource_type, 'e': e}, + ) + + if result > 0: + total = len(parsed_args.metadef_namespace) + msg = _( + "%(result)s of %(total)s resource type failed to delete." + ) % {'result': result, 'total': total} + raise exceptions.CommandError(msg) + + +class ListMetadefResourceTypeAssociations(command.Lister): + _description = _("List metadef resource type associations") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "metadef_namespace", + metavar="", + help=_("The name of the namespace whose details you want to see"), + ) + return parser + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + data = image_client.metadef_resource_type_associations( + parsed_args.metadef_namespace, + ) + columns = ['Name'] + column_headers = columns + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) diff --git a/openstackclient/image/v2/metadef_resource_types.py b/openstackclient/image/v2/metadef_resource_types.py new file mode 100644 index 0000000000..88001b7e1c --- /dev/null +++ b/openstackclient/image/v2/metadef_resource_types.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Image V2 Action Implementations""" + +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + + +class ListMetadefResourceTypes(command.Lister): + _description = _("List metadef resource types") + + def take_action(self, parsed_args): + image_client = self.app.client_manager.image + data = image_client.metadef_resource_types() + columns = ['Name'] + column_headers = columns + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) diff --git a/openstackclient/image/v2/task.py b/openstackclient/image/v2/task.py index 924eaaf13d..a0f1d35ded 100644 --- a/openstackclient/image/v2/task.py +++ b/openstackclient/image/v2/task.py @@ -11,9 +11,9 @@ # under the License. from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ _formatters = { @@ -64,7 +64,7 @@ class ShowTask(command.ShowOne): _description = _('Display task details') def get_parser(self, prog_name): - parser = super(ShowTask, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'task', diff --git a/openstackclient/locale/tr_TR/LC_MESSAGES/openstackclient.po b/openstackclient/locale/tr_TR/LC_MESSAGES/openstackclient.po index b44baf889b..f49640ecc2 100644 --- a/openstackclient/locale/tr_TR/LC_MESSAGES/openstackclient.po +++ b/openstackclient/locale/tr_TR/LC_MESSAGES/openstackclient.po @@ -1,32 +1,20 @@ -# Andreas Jaeger , 2017. #zanata # işbaran akçayır , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: python-openstackclient VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-02-25 01:10+0000\n" +"POT-Creation-Date: 2025-04-01 18:07+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-08-15 12:09+0000\n" -"Last-Translator: Andreas Jaeger \n" +"PO-Revision-Date: 2017-08-14 07:58+0000\n" +"Last-Translator: Copied by Zanata \n" "Language-Team: Turkish (Turkey)\n" "Language: tr_TR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "X-Generator: Zanata 4.3.3\n" "X-POOTLE-MTIME: 1502656444.000000\n" -#, python-format -msgid "" -"\"Create\" rule command for type \"%(rule_type)s\" requires arguments " -"%(args)s" -msgstr "" -"\"%(rule_type)s\" türü için \"create\" kural komutu %(args)s argümanlarını " -"gerektirir" - -msgid "\"Create\" rule command requires argument \"type\"" -msgstr "\"Create\" kural komutu için \"type\" argümanı zorunludur" - #, python-format msgid "%(errors)s of %(total)s groups failed to delete." msgstr "%(total)s gruptan %(errors)s grup silinirken hata oluştu." @@ -51,10 +39,6 @@ msgstr "%(total)s kullanıcıdan %(errors)s kullanıcıyı silme işlemi başar msgid "%(num)s of %(total)s %(resource)ss failed to delete." msgstr "%(total)s %(resource)s'tan %(num)s tanesi silinirken hata oluştu." -#, python-format -msgid "%(result)s of %(total)s %(resource)ss failed to delete." -msgstr "%(total)s'ın %(result)s %(resource)s'ların silinmesi başarısız." - #, python-format msgid "%(result)s of %(total)s EC2 keys failed to delete." msgstr "" @@ -165,10 +149,6 @@ msgstr "%(total)s ağ ajanından %(result)s tanesi silinirken hata oluştu." msgid "%(result)s of %(total)s network segments failed to delete." msgstr "%(total)s ağ dilimlerinin %(result)s tanesi silinirken hata oluştu." -#, python-format -msgid "%(result)s of %(total)s policys failed to delete." -msgstr "%(total)s politikadan %(result)s tanesi silinirken hata oluştu." - #, python-format msgid "%(result)s of %(total)s ports failed to delete." msgstr "" @@ -279,24 +259,12 @@ msgstr "" "Varolan uzak disk bölümünden yeni görüntüsünden yeni disk bölümü anlık " "görüntüsü oluştururken 'force' seçeneği çalışmaz" -msgid "'--retype-policy' option will not work without '--type' option" -msgstr "'--retype-policy' seçeneği '--type' seçeneği olmadan çalışmaz" - msgid "--project is only allowed with --private" msgstr "--project sadece --private ile kullanılabilir" -msgid "" -"--size is a required option if snapshot or source volume is not specified." -msgstr "" -"Anlık görüntü veya kaynak disk bölümü belirtilmezse --size gerekli bir " -"seçenektir." - msgid "A service URL where SAML assertions are being sent (required)" msgstr "SAML bildirimlerinin gönderildiği bir hizmet URL'i (gerekli)" -msgid "Accept the image membership" -msgstr "İmaj üyeliğini kabul et" - msgid "Accept volume transfer request." msgstr "Disk bölümü aktarım isteğini kabul et." @@ -358,27 +326,12 @@ msgstr "L3 aracısına yönlendirici ekleyin" msgid "Add router to an agent" msgstr "Bir ajana yönlendirici ekle" -msgid "Add security group to server" -msgstr "Sunucuya güvenlik grubu ekle" - msgid "Add user to group" msgstr "Gruba kullanıcı ekle" -msgid "Add volume to server" -msgstr "Disk bölümünü sunucuya ekle" - msgid "Add volume(s) to consistency group" msgstr "Uyum grubuna disk bölümleri ekle" -msgid "" -"Additional route for this subnet e.g.: destination=10.10.0.0/16," -"gateway=192.168.71.254 destination: destination subnet (in CIDR notation) " -"gateway: nexthop IP address (repeat option to add multiple routes)" -msgstr "" -"Bu alt ağ için ek yönlendirici örn: hedef=10.10.0.0/16,geçit=192.168.71.254 " -"hedef: hedef alt ağ (CIDR gösteriminde) geçit: bir sonraki durak IP adresi " -"(birden fazla yölendirici eklemek için tekrarlanacak seçenek)" - msgid "Address scope to display (name or ID)" msgstr "Gösterilecek adres kapsamı (isim veya ID)" @@ -388,9 +341,6 @@ msgstr "Değiştirilecek adres kapsamı (ad veya kimlik)" msgid "Address scope(s) to delete (name or ID)" msgstr "Silinecek adres kapsam(lar)ı (isim veya ID)" -msgid "Adds a role assignment to a user or group on a domain or project" -msgstr "Bir alandaki veya projedeki bir kullanıcıya veya gruba rol atama ekler" - msgid "Agent from which router will be removed (ID only)" msgstr "Yönlendiricinin kaldırılacağı ajan (yalnızca ID)" @@ -418,22 +368,6 @@ msgstr "Silinecek küme(ler) (isim veya ID)" msgid "Allocate port on host (ID only)" msgstr " ana bilgisayarında bağlantı noktası ayır (sadece ID)" -msgid "" -"Allocation pool IP addresses for this subnet e.g.: start=192.168.199.2," -"end=192.168.199.254 (repeat option to add multiple IP addresses)" -msgstr "" -"Bu alt ağ için ayırma havuzu IP adresleri örn: başlangıç=192.168.199.2," -"bitiş=192.168.199.254 (birden fazla IP adresi eklemek için seçeneği tekrarla)" - -msgid "" -"Allocation pool IP addresses to be removed from this subnet e.g.: " -"start=192.168.199.2,end=192.168.199.254 (repeat option to unset multiple " -"allocation pools)" -msgstr "" -"Bu altağdan silinecek IP adres tahsis havuzu örn: başlangıç=192.168.199.2," -"bitiş=192.168.199.254 (birden fazla tahsis havuzu ayarını kaldırmak için " -"seçeneği tekrarla)" - msgid "" "Allow to access private flavor (name or ID) (Must be used with --" "private option)" @@ -441,19 +375,9 @@ msgstr "" "'nin özel flavor'a erişmesine izin verin (isim veya ID) (--private " "seçeneği ile beraber kullanılmalı)" -msgid "" -"Allow to access private type (name or ID) (Must be used with --" -"private option)" -msgstr "" -"Özel türe erişimek için 'ye izin ver (isim veya ID) (--private " -"seçeneği ile kullanılması zorunludur)" - msgid "Allow delete in state other than error or available" msgstr "Hata veya kullanılabilirden başka durumda silinmesine izin ver" -msgid "Allow disk over-commit on the destination host" -msgstr "Hedef ana bilgisayarda disk aşırı-işlemeye izin ver" - msgid "Allow image to be deleted (default)" msgstr "İmajın silinmesine izin ver (varsayılan)" @@ -463,29 +387,16 @@ msgstr "Kullanımdaki birimi yedeklemeye izin ver" msgid "Allow to delete in-use QoS specification(s)" msgstr "Kullanımdaki QoS özelliklerini silmeye izin ver" -msgid "Allow volume to be attached more than once (default to False)" -msgstr "Diskin birden fazla eklenmesine izin ver (varsayılan olarak False)" - #, python-format msgid "An error occurred when reading rules from file %(path)s: %(error)s" msgstr "%(path)s dosaysından kurallar okunurken hata oluştu: %(error)s" -msgid "Anchor for paging" -msgstr "Sayfalama için sabitleyici" - msgid "Apply rule to incoming network traffic (default)" msgstr "Kuralı gelen trafiğe uygula (varsayılan)" msgid "Apply rule to outgoing network traffic" msgstr "Giden ağ trafiğine kural uygula" -msgid "" -"Arbitrary scheduler hint key-value pairs to help boot an instance (repeat " -"option to set multiple hints)" -msgstr "" -"İsteğe bağlı bir önyüklemeye yardımcı olmak için keyfi zamanlayıcı ipucu " -"anahtar-değer çiftleri (birden fazla ipucu ayarlamak için seçeneği tekrarla)" - msgid "" "Argument --dst-port not allowed with arguments --icmp-type and --icmp-code" msgstr "" @@ -494,9 +405,6 @@ msgstr "" msgid "Argument --icmp-type required with argument --icmp-code" msgstr "--icmp-type argümanı --icmp-code ile kullanılması zorunlu" -msgid "Assocaite the floating IP with port (name or ID)" -msgstr "Yüzen IP'yi bağlantı noktasıyla ilişkilendirin (ad veya kimlik)" - msgid "Associate a QoS specification to a volume type" msgstr "Bir disk bölümü türüyle QoS özelliklerini ilişkilendir" @@ -532,9 +440,6 @@ msgstr "" msgid "Authentication URL of remote federated service provider (required)" msgstr "Uzak federe servis sağlayıcının kimlik doğrulama URL'si (gerekli)" -msgid "Authentication token to use" -msgstr "Kullanılacak yetkilendirme jetonu" - msgid "Authorize a request token" msgstr "Bir istek jetonu yetkilendir" @@ -615,16 +520,9 @@ msgstr "Mevcut kullanıcının parolasını değiştir" msgid "Check user membership in group" msgstr "Kullanıcının grup üyeliğini kontrol et" -msgid "Clean project resources, but don't delete the project" -msgstr "Projenin kaynaklarını temizle ama projeyi silme" - msgid "Clean resources associated with a project" msgstr "Bir proje ile alakalı kaynakları temizle" -#, python-format -msgid "Clear all tags associated with the %s" -msgstr "%s ile ilişkili tüm etiketleri sil" - msgid "" "Clear associated allocation-pools from the subnet. Specify both --allocation-" "pool and --no-allocation-pool to overwrite the current allocation pool " @@ -642,15 +540,6 @@ msgstr "" "yönlendirme bilgisinin üzerine yazmak için --host-route ve --no-host-route " "seçeneklerinin her ikisini de belirtin." -msgid "" -"Clear existing allowed-address pairs associatedwith this port.(Specify both " -"--allowed-address and --no-allowed-addressto overwrite the current allowed-" -"address pairs)" -msgstr "" -"Bu bağlantı noktasıyla ilişkili mevcut izinli adres çiftlerini temizleyin." -"(Mevcut izinli adres çiftinin üzerinde yazmak için --allowed-address ve --" -"no-allowed-addressto seçeneklerinin her ikisini de belirtiniz)" - msgid "" "Clear existing information of DNS Nameservers. Specify both --dns-nameserver " "and --no-dns-nameserver to overwrite the current DNS Nameserver information." @@ -659,26 +548,6 @@ msgstr "" "bilgisinin üzerine yazmak için --dns-nameserver ve --no-dns-nameserver " "özelliklerini belirle." -msgid "" -"Clear existing information of binding:profile.Specify both --binding-profile " -"and --no-binding-profile to overwrite the current binding:profile " -"information." -msgstr "" -"binding:profile'in mevcut bilgilerini temizle. Mevcut binding:profile " -"bilgisinin üzerine yazmak için --binding-profile ve --no-binding-profile her " -"ikisini de belirtin." - -msgid "Clear existing information of data plane status" -msgstr "Mevcut veri düzlemi durumu bilgilerini temizle" - -msgid "" -"Clear existing information of fixed IP addresses.Specify both --fixed-ip and " -"--no-fixed-ip to overwrite the current fixed IP addresses." -msgstr "" -"Sabit IP adresleri için mevcut bilgileri silin. Geçerli sabit IP " -"adreslerinin üzerine yazmak için hem --fixed-ip hem de --no-fixed-ip " -"belirtin." - msgid "Clear existing security groups associated with this port" msgstr "Bu bağlantı noktasıyla ilişkili mevcut güvenlik gruplarını temizle" @@ -687,21 +556,6 @@ msgstr "" "Yönlendiricinin yüksek kullanılabilirlik özelliğini temizle (sadece devre " "dışı bırakılmış yönlendirici)" -msgid "" -"Clear routes associated with the router. Specify both --route and --no-route " -"to overwrite current value of route." -msgstr "" -"Yönlendirici ile ilişkili yönleri temizle. Mevcut yön değerinin üzerine " -"yazmak için hem --route hem de --no-route seçeneklerini belirtin." - -#, python-format -msgid "" -"Clear tags associated with the %s. Specify both --tag and --no-tag to " -"overwrite current tags" -msgstr "" -"%s ile ilgili etiketleri temizle. Mevcut etiketlerin üzerine yazmak için hem " -"--tag hem de --no-tag seçeneğini belirtin" - msgid "Command Failed: One or more of the operations failed" msgstr "Komut başarısız: Bir veya birden fazla işlem başarısız" @@ -716,12 +570,6 @@ msgstr "Hesaplama API sürümü, varsayılan=%s (Env: OS_COMPUTE_API_VERSION)" msgid "Compute service %(service)s of host %(host)s failed to set." msgstr "Ana bilgisayar %(host)s'ın hesap hizmeti %(service)s ayarlanamadı." -msgid "Compute service(s) to delete (ID only)" -msgstr "Hesaplama servis(ler)ini sil" - -msgid "Confirm server resize is complete" -msgstr "Sunucu yeniden boyutlandırmasının tamamlandığını doğrula" - msgid "Consistency group containing (name or ID)" msgstr "'ü içeren tutarlılık grubu (isim veya ID)" @@ -775,10 +623,6 @@ msgstr "Silinecek alıcı(lar)" msgid "Container for new object" msgstr "Yeni nesne için kap" -#, python-format -msgid "Container name is %s characters long, the default limit is 256" -msgstr "Kap ismi %s karakter uzunluğunda, varsayılan sınır 256'dır" - msgid "Container to display" msgstr "Gösterilecek kap" @@ -817,63 +661,9 @@ msgstr "" "net-id=' parametresi için bir sarmalayıcıdır. Daha gelişmiş " "kullanım durumları için, '--nic' parametresine bakın." -msgid "" -"Create a NIC on the server and connect it to port. Specify option multiple " -"times to create multiple NICs. This is a wrapper for the '--nic port-" -"id=' parameter that provides simple syntax for the standard use case " -"of connecting a new server to a given port. For more advanced use cases, " -"refer to the '--nic' parameter." -msgstr "" -"Sunucuda bir NIC oluşturun ve bağlantı noktasına bağlayın. Birden çok NIC " -"oluşturmak için seçeneği birden çok kez belirtin. Bu, belirli bir bağlantı " -"noktasına yeni bir sunucu bağlamak için standart kullanım örneği için basit " -"sözdizimi sağlayan '--nic port-id=' parametresi için bir " -"sarmalayıcıdır. Daha gelişmiş kullanım durumları için, '--nic' parametresine " -"bakın." - -msgid "" -"Create a NIC on the server. Specify option multiple times to create multiple " -"NICs. Either net-id or port-id must be provided, but not both. net-id: " -"attach NIC to network with this UUID, port-id: attach NIC to port with this " -"UUID, v4-fixed-ip: IPv4 fixed address for NIC (optional), v6-fixed-ip: IPv6 " -"fixed address for NIC (optional), none: (v2.37+) no network is attached, " -"auto: (v2.37+) the compute service will automatically allocate a network. " -"Specifying a --nic of auto or none cannot be used with any other --nic value." -msgstr "" -"Sunucuda bir NIC oluştur. Birden fazla NIC oluşturmak için seçeneği birden " -"fazla kere belirtin. Ya net-id ya da port-id sağlanmalı, ikisi bir arada " -"değil. net-id: NIC'nin ağa ekleneceği UUID, port-id: NIC'nin bağlantı " -"noktasına takılacağı UUID, v4-fixed-ip: NIC için sabit IPv4 adresi " -"(seçimli), v6-fixed-ip: NIC için sabit IPv6 adresi (seçimli), none: (v2.37+) " -"hiç ağ takılmaz, auto: (v2.37+) hesaplama servisi otomatik olarak bir ağ " -"ayırır. Auto veya none'ı bir --nic ile belirtmek başka bir --nic değeri ile " -"kullanılamaz." - msgid "Create a QoS policy" msgstr "QoS politikası oluştur" -msgid "" -"Create a block device on the server.\n" -"Block device mapping in the format\n" -"=:::\n" -": block device name, like: vdb, xvdc (required)\n" -": UUID of the volume or snapshot (required)\n" -": volume or snapshot; default: volume (optional)\n" -": volume size if create from snapshot (optional)\n" -": true or false; default: false (optional)\n" -"(optional extension)" -msgstr "" -"Sunucu üzerinde blok aygıtı oluştur.\n" -"Blok aygıtı eşleşme formatı\n" -"=:::\n" -": blok aygıt ismi, örn: vdb, xvdc (gerekli)\n" -": disk bölümünün veya anlık görüntünün UUID'si (gerekli)\n" -": disk bölümü veya anlık görüntü; varsayılan: disk bölümü (seçimli)\n" -": eğer anlık görüntüden oluşturulduysa disk bölümü boyutu " -"(seçimli)\n" -": true veya false; varsayılan: false (seçimli)\n" -"(seçimli uzantı)" - msgid "Create a centralized router" msgstr "Merkezi bir yönlendirici oluştur" @@ -931,9 +721,6 @@ msgstr "Bir altağ oluşturun" msgid "Create an access token" msgstr "Erişim jetonu oluştur" -msgid "Create compute agent" -msgstr "Hesaplama ajanı oluştur" - msgid "" "Create credentials for user (name or ID; default: current authenticated user)" msgstr "" @@ -947,9 +734,6 @@ msgstr "" "Projede kimlik bilgileri oluştur (isim veya ID; varsayılan: mevcut kimlik " "doğrulama yapılmış proje)" -msgid "Create description for meter" -msgstr "Sayaç için açıklama oluştur" - msgid "Create floating IP" msgstr "Yüzen IP oluştur" @@ -968,9 +752,6 @@ msgstr "Yeni Ağ QoS kuralı oluştur" msgid "Create new QoS specification" msgstr "Yeni QoS özelliği oluştur" -msgid "Create new backup" -msgstr "Yeni yedek oluştur" - msgid "Create new consistency group snapshot." msgstr "Yeni tutarlılık grubu anlık görüntüsü oluştur." @@ -1040,9 +821,6 @@ msgstr "Yeni servis oluştur" msgid "Create new service provider" msgstr "Yeni servis sağlayıcı oluştur" -msgid "Create new snapshot" -msgstr "Yeni anlık görüntü oluştur" - msgid "Create new trust" msgstr "Yeni güven oluştur" @@ -1067,20 +845,6 @@ msgstr "Bu güvenlik grubunda kural oluştur (isim veya ID)" msgid "Create server boot disk from this image (name or ID)" msgstr "Bu imajdan sunucu ön yükleme diski oluştur (isim veya ID)" -msgid "" -"Create server using this volume as the boot disk (name or ID).\n" -"This option automatically creates a block device mapping with a boot index " -"of 0. On many hypervisors (libvirt/kvm for example) this will be device vda. " -"Do not create a duplicate mapping using --block-device-mapping for this " -"volume." -msgstr "" -"Bu disk bölümünü ön yüklenebilir disk olarak kullanarak sunucu oluştur (isim " -"veya ID).\n" -"Bu seçenek otomatik olarak 0 ön yükleme diziniyle bir blok aygıt eşleşmesi " -"oluşturur. Bir çok yönetici arakatman (örneğin libvirt/kvm) üzerinde bu " -"aygıt vda'dir. Bu disk bölümü için --block-device-mapping kullanarak birden " -"fazla eşleşme oluşturmayın." - msgid "Create server with this flavor (name or ID)" msgstr "Bu flavor ile sunucu oluştur (isim veya ID)" @@ -1105,14 +869,6 @@ msgstr "Kimlik bilgilerine erişim anahtarı" msgid "Credentials access key(s)" msgstr "Kimlik bilgilerine erişim anahtar(lar)ı" -msgid "" -"Custom data to be passed as binding:profile. Data may be passed as " -"= or JSON. (repeat option to set multiple binding:profile data)" -msgstr "" -"binding:profile olarak verilecek özel veri. Veri = şeklinde veya " -"JSON olarak verilebilir. (birden fazla binding:profile verisi ayarlamak için " -"seçeneği tekrarlayın)" - msgid "DNS server for this subnet (repeat option to set multiple DNS servers)" msgstr "" "Bu alt ağ için DNS sunucu (birden fazla DNS sunucusu ayarlamak için seçeneği " @@ -1156,22 +912,9 @@ msgstr "Qos Politika(lar/s)ını sil" msgid "Delete address scope(s)" msgstr "Adres kapsam(lar)ını sil" -msgid "" -"Delete auto allocated topology for a given project. Default is the current " -"project" -msgstr "" -"Belirli bir proje için otomatik ayrılan topolojiyi silin. Varsayılan geçerli " -"projedir" - msgid "Delete auto allocated topology for project" msgstr "Projeye otomatik ayrılan topolojiyi sil" -msgid "Delete backup(s)" -msgstr "Yedek(ler)i sil" - -msgid "Delete compute agent(s)" -msgstr "Hesaplama ajan(lar)ını sil" - msgid "Delete compute service(s)" msgstr "Hesaplama servis(ler)ini sil" @@ -1295,9 +1038,6 @@ msgstr "Servis sağlayıcı(ları/yı) sil" msgid "Delete service(s)" msgstr "Servis(ler)i sil" -msgid "Delete snapshot(s)" -msgstr "Anlık görüntü(yü/leri) sil" - msgid "Delete subnet pool(s)" msgstr "Altağ havuzunu sil" @@ -1325,14 +1065,6 @@ msgstr "Disk bölümü türlerini sil" msgid "Delete volume(s)" msgstr "Disk bölümlerini sil" -#, python-format -msgid "Deleting %(resource)s : %(id)s" -msgstr "%(resource)s siliniyor: %(id)s" - -#, python-format -msgid "Deleting project: %s" -msgstr "Proje siliniyor: %s" - msgid "Description for the flavor" msgstr "Flavor için açıklama" @@ -1357,22 +1089,6 @@ msgstr "Bu tutarlılık grubu anlık görüntüsünün açıklaması" msgid "Description of this port" msgstr "Bu bağlantı noktasının açıklaması" -msgid "" -"Desired IP and/or subnet (name or ID)on external gateway: subnet=,ip-" -"address= (repeat option to set multiple fixed IP addresses)" -msgstr "" -"Harici geçit üzerinde istenen IP ve/veya altağ: subnet=,ip-" -"address= (birden fazla sabit IP adres ayarlamak için seçeneği " -"tekrarla)" - -msgid "" -"Desired IP and/or subnet for filtering ports (name or ID): subnet=," -"ip-address= (repeat option to set multiple fixed IP addresses)" -msgstr "" -"Bağlantı noktalarını filtrelemek için tasarlanan IP ve/veya alt ağ (isim " -"veya ID): subnet=,ip-address= (birden fazla sabit IP " -"adresi ayarlamak için seçeneği tekrarlayın)" - msgid "" "Desired IP and/or subnet for this port (name or ID): subnet=,ip-" "address= (repeat option to set multiple fixed IP addresses)" @@ -1390,22 +1106,6 @@ msgstr "" "veya ID): subnet=,ip-address= (birden fazla sabit IP " "adresini kaldırmak için seçeneği tekrarlayın)" -msgid "" -"Desired allowed-address pair which should be removed from this port: ip-" -"address= [,mac-address=] (repeat option to set " -"multiple allowed-address pairs)" -msgstr "" -"Bu bağlantıdan kaldırılması gereken tasarlanan erişilebilir adres çifti: ip-" -"address= [,mac-address=] (birden fazla izin verilen " -"adres çifti ayarlamak için seçeneği tekrarlayın)" - -msgid "" -"Desired key which should be removed from binding:profile(repeat option to " -"unset multiple binding:profile data)" -msgstr "" -"binding:profile'den çıkarılması gereken istenen anahtar (çoklu binding:" -"profile verisinin ayarını kaldırmak için seçeneği tekrarlayın)" - msgid "" "Destination filename (defaults to object name); using '-' as the filename " "will print the file to stdout" @@ -1416,9 +1116,6 @@ msgstr "" msgid "Destination host (takes the form: host@backend-name#pool)" msgstr "Hedef ana bilgisayar (biçemi: anabilgisayar@artalanismi#havuz)" -msgid "Destination port (ssh -p option)" -msgstr "Hedef bağlantı noktası (ssh -p seçeneği)" - msgid "" "Destination port, may be a single port or a starting and ending port range: " "137:139. Required for IP protocols TCP and UDP. Ignored for ICMP IP " @@ -1539,9 +1236,6 @@ msgstr "Adres kapsam detaylarını göster" msgid "Display aggregate details" msgstr "Küme detaylarını göster" -msgid "Display backup details" -msgstr "Yedek detaylarını göster" - msgid "Display configuration details" msgstr "Yapılandırma detaylarını göster" @@ -1584,9 +1278,6 @@ msgstr "Yüzen IP ayrıntılarını görüntüle" msgid "Display group details" msgstr "Grup detaylarını göster" -msgid "Display host details" -msgstr "Sunucu detaylarını göster" - msgid "Display hypervisor details" msgstr "Yönetici ara katman detaylarını göster" @@ -1671,9 +1362,6 @@ msgstr "Servis detaylarını göster" msgid "Display service provider details" msgstr "Servis sağlayıcı detaylarını göster" -msgid "Display snapshot details" -msgstr "Anlık görüntü detaylarını göster" - msgid "Display subnet details" msgstr "Altağ detaylarını göster" @@ -1701,9 +1389,6 @@ msgstr "Disk bölümü türü detaylarını göster" msgid "Do not make the network VLAN transparent" msgstr "Ağ VLAN'ını transparan yapma" -msgid "Do not over-commit disk on the destination host (default)" -msgstr "Hedef ana bilgisayarda disk aşırı işleme yapma (varsayılan)" - msgid "Do not share meter between projects" msgstr "Ölçümleri projeler arasında paylaşma" @@ -1750,10 +1435,6 @@ msgstr "" "Grubun ait olduğu alan (isim veya ID). Grup isimlerinde bir çatışma olması " "durumunda kullanılabilir." -msgid "Domain the project belongs to (name or ID) [only valid with --absolute]" -msgstr "" -"Projenin ait olduğu alan (isim veya ID) [sadece --absolute ile geçerli]" - msgid "" "Domain the project belongs to (name or ID). This can be used in case " "collisions between project names exist." @@ -1898,11 +1579,6 @@ msgstr "" msgid "Enable port security for this port" msgstr "Bu bağlantı noktası için bağlantı noktası güvenliğini etkinleştir" -msgid "Enable port security for this port (Default)" -msgstr "" -"Bu bağlantı noktası için bağlantı noktası güvenliğini etkinleştir " -"(Varsayılan)" - msgid "Enable project" msgstr "Projeyi etkinleştir" @@ -1962,9 +1638,6 @@ msgstr "Silinecek uç nokta(lar) (sadece ID)" msgid "Ephemeral disk size in GB (default 0G)" msgstr "GB cinsinden geçici disk boyutu (varsayılan 0G)" -msgid "Error creating server\n" -msgstr "Sunucu oluşturulurken hata\n" - #, python-format msgid "Error creating server backup: %s" msgstr "Sunucu yedeği oluşturulurken hata: %s" @@ -1977,55 +1650,30 @@ msgstr "Sunucu imajı oluşturma hatası: %s" msgid "Error creating server: %s" msgstr "Sunucu oluşturma başarısız: %s" -msgid "Error deleting server\n" -msgstr "Sunucu silinirken hata\n" - #, python-format msgid "Error deleting server: %s" msgstr "Sunucu silinirken hata: %s" -msgid "Error migrating server\n" -msgstr "Sunucu göçü sırasında hata\n" - #, python-format msgid "Error migrating server: %s" msgstr "Sunucu göçü sırasında hata: %s" -msgid "Error rebooting server\n" -msgstr "Sunucu yeniden başlatma hatası\n" - #, python-format msgid "Error rebooting server: %s" msgstr "Sunucu yeniden başlatma hatası: %s" -msgid "Error rebuilding server\n" -msgstr "Sunucu yeniden yapılandırması sırasında hata\n" - #, python-format msgid "Error rebuilding server: %s" msgstr "Sunucu yeniden yapılandırması sırasında hata: %s" -msgid "Error resizing server\n" -msgstr "Sunucunun yeniden boyutlandırması sırasında hata\n" - #, python-format msgid "Error resizing server: %s" msgstr "Sunucu yeniden boyutlandırma hatası: %s" -msgid "Error retrieving diagnostics data\n" -msgstr "Teşhis verisi alınırken hata oluştu\n" - #, python-format msgid "Error while executing command: %s" msgstr "Komut çalıştırılırken hata oluştu: %s" -msgid "" -"Error: If a user or group is specified, either --domain or --project must " -"also be specified to list role grants." -msgstr "" -"Hata: Bir kullanıcı veya grup belirtilmişse, rol izinlerini listelemek için " -"--domain veya --project de belirtilmelidir." - msgid "" "Ethertype of network traffic (IPv4, IPv6; default: based on IP protocol)" msgstr "" @@ -2079,9 +1727,6 @@ msgstr "Hesaplama API'ı uzantıların listelemeyi desteklemiyor" msgid "Extensions list not supported by Identity API" msgstr "Kimlik API için uzantıların listlenmesi desteklenmiyor" -msgid "External Network used as router's gateway (name or ID)" -msgstr "Yönlendirici geçidi olarak kullanılan Harici Ağ (isim veya ID)" - #, python-format msgid "Failed to add project %(project)s access to flavor: %(e)s" msgstr "%(project)s projesinin flavor'a erişimi başarısız: %(e)s" @@ -2098,22 +1743,10 @@ msgstr "Anlık görüntü özellikleri silinirken hata oluştu: %s" msgid "Failed to clean volume properties: %s" msgstr "Disk bölümü özelliklerini silme başarısız: %s" -#, python-format -msgid "Failed to clear flavor property: %s" -msgstr "Flavor özelliğinin silinmesi başarısız: %s" - #, python-format msgid "Failed to create Network QoS rule: %(e)s" msgstr "Ağ QoS kuralı oluşturulurken hata oluştu: %(e)s" -#, python-format -msgid "Failed to delete %(dresult)s of %(total)s images." -msgstr "%(total)s imajdan %(dresult)s tanesi silinirken hata oluştu." - -#, python-format -msgid "Failed to delete %(resource)s with ID '%(id)s': %(e)s" -msgstr "'%(id)s' ID'li %(resource)s silinemedi: %(e)s" - #, python-format msgid "Failed to delete %(resource)s with name or ID '%(name_or_id)s': %(e)s" msgstr "" @@ -2355,9 +1988,6 @@ msgstr "" "'%(flavor)s' flavor için erişen projelerin listesinin alınma işlemi " "başarısız: %(e)s" -msgid "Failed to get an image file." -msgstr "İmaj dosyası alma başarısız." - #, python-format msgid "Failed to remove flavor access from project: %s" msgstr "Projeden flavor erişiminin kaldırılması başarısız: %s" @@ -2389,10 +2019,6 @@ msgstr "Bu disk bölümü türü için şifreleme bilgisini ayarlama başarısı msgid "Failed to set flavor access to project: %s" msgstr "Projeye flavor erişiminin ayarlanması başarısız: %s" -#, python-format -msgid "Failed to set flavor property: %s" -msgstr "Flavor özelliğinin ayarlanması başarısız: %s" - #, python-format msgid "Failed to set image property: %s" msgstr "İmaj özelliği ayarlarken hata oluştu: %s" @@ -2429,10 +2055,6 @@ msgstr "Disk bölümü durumunu ayarlama başarısız: %s" msgid "Failed to set volume type access to project: %s" msgstr "Projeye erişim için disk bölümü türü ayarlama başarısız: %s" -#, python-format -msgid "Failed to set volume type property: %s" -msgstr "Disk bölümü türü özelliğini ayarlarken hata oluştu: %s" - #, python-format msgid "Failed to set volume type: %s" msgstr "Disk bölümü türü ayarlanırken hata oluştu: %s" @@ -2469,20 +2091,6 @@ msgstr "İmaj özelliği ayarını kaldırma başarısız: %s" msgid "Failed to unset volume property: %s" msgstr "Disk bölümü özelliği ayarlarını kaldırma başarısız: %s" -#, python-format -msgid "Failed to unset volume type property: %s" -msgstr "Disk bölümü türü özelliğini kaldırma işlemi başarısız: %s" - -#, python-format -msgid "Failed to update backup name or description: %s" -msgstr "Yedek ismi ve açıklaması güncellenirken hata oluştu: %s" - -#, python-format -msgid "Failed to update snapshot display name or display description: %s" -msgstr "" -"Anlık görüntü görünür ismi veya görünür açıklamasını güncelleme işlemi " -"başarısız: %s" - #, python-format msgid "Failed to update snapshot name or description: %s" msgstr "Anlık görüntü adı veya açıklaması güncellenemedi: %s" @@ -2508,23 +2116,12 @@ msgstr "Düzenlenecek federasyon protokolü (isim veya ID)" msgid "Federation protocol(s) to delete (name or ID)" msgstr "Silinecek federasyon protokol(ü/leri) (isim veya ID)" -msgid "" -"File to inject into image before boot (repeat option to set multiple files)" -msgstr "" -"Önyüklemeden önce imaja enjekte etmek için dosya (birden çok dosyayı " -"ayarlamak için seçeneği tekrar edin)" - msgid "" "Filename for private key to save. If not used, print private key in console." msgstr "" "Kapalı anahtarın kaydedileceği dosyanın ismi. Kullanılmazsa, kapalı anahtar " "konsola basılır." -msgid "Filename for public key to add. If not used, creates a private key." -msgstr "" -"Eklenecek açık anahtarın dosya ismi. Kullanılmazsa, bir kapalı anahtar " -"oluşturur." - msgid "Filename that contains a new set of mapping rules" msgstr "Eşleşme kurallarının yeni bir kümesini içeren dosya adı" @@ -2552,10 +2149,6 @@ msgstr "Grup listesini 'e göre filtrele (isim veya ID)" msgid "Filter group list by (name or ID)" msgstr "'ya göre grup listesini filtrele (isim veya ID)" -msgid "Filter hypervisors using substring" -msgstr "" -" alt karakter dizisini kullanarak yönetici arakatmanları filtrele" - msgid "Filter images based on name." msgstr "İmajları isme göre filtrele." @@ -2589,12 +2182,6 @@ msgstr "Sonuçları kullanıcıya göre filtrele (isim veya ID) (sadece yönetic msgid "Filter results by volume name" msgstr "Sonuçları disk bölümü ismine göre filtrele" -msgid "Filter roles by (name or ID)" -msgstr "Rolleri 'ye göre filtrele (isim veya ID)" - -msgid "Filter roles by (name or ID)" -msgstr "Rolleri 'ya göre filtrele (isim veya ID)" - msgid "Filter users by (name or ID)" msgstr "Kullanıcıları 'e göre filtrele (isim veya ID)" @@ -2620,35 +2207,18 @@ msgstr "" "Sonuçları bir duruma göre filtrele (\"available\", \"error\", \"creating\", " "\"deleting\" veya \"error_deleting\")" -msgid "" -"Filters results by a status. ('available', 'error', 'creating', 'deleting' " -"or 'error-deleting')" -msgstr "" -"Bir duruma göre sonuçları filtrele. ('kullanılabilir', 'hata', " -"'oluşturuluyor', 'siliniyor' veya 'silinirken hata')" - msgid "Filters results by a volume (name or ID)." msgstr "Bir disk bölümüne göre sonuçları filtrele (isim veya ID)." msgid "Filters results by the backup name" msgstr "Yedek ismine göre sonuçları listele" -msgid "" -"Filters results by the backup status ('creating', 'available', 'deleting', " -"'error', 'restoring' or 'error_restoring')" -msgstr "" -"Yedek durumuna göre sonuçları filtrele ('oluşturuluyor', 'kullanılabilir', " -"'siliniyor', 'hata' veya 'geri yüklenirken hata')" - msgid "Filters results by the volume which they backup (name or ID)" msgstr "Yedeklenen disk bölümüne göre sonuçları filtrele (isim veya ID)" msgid "Fixed IP address mapped to the floating IP" msgstr "Sabit IP adres yüzen IP adresi ile eşleşti" -msgid "Fixed IP address to associate with this floating IP address" -msgstr "Bu kayan IP adresi ile ilişkili sabit IP adresi" - msgid "Fixed IP address to remove from the server (IP only)" msgstr "Sunucudan kaldırılacak sabit IP adresi (sadece IP)" @@ -2687,15 +2257,9 @@ msgstr "Silinecek flavor(lar) (isim veya ID)" msgid "Floating IP address" msgstr "Yüzen IP adresi" -msgid "Floating IP address to assign to server (IP only)" -msgstr "Sunucuya atanacak kayan IP adresi (sadece IP)" - msgid "Floating IP address to remove from server (IP only)" msgstr "Sunucudan kaldırılacak kayan IP adresi (sadece IP)" -msgid "Floating IP to associate (IP address or ID)" -msgstr "İlişkilendirilecek yüzen IP adresi (IP adres veya ID)" - msgid "Floating IP to disassociate (IP address or ID)" msgstr "Bağlantıyı kesmek için kayan IP (IP adresi veya kimliği)" @@ -2709,9 +2273,6 @@ msgid "Floating ip pool operations are only available for Compute v2 network." msgstr "" "Yüzen IP havuz işlemleri sadece Hesaplama v2 ağı için kullanılabilirdir." -msgid "Force down service" -msgstr "Servisi durmaya zorla" - msgid "Force image change if volume is in use (only meaningful with --volume)" msgstr "" "Disk bölümü kullanımda ise imajı değişmeye zorla (sadece --volume ile " @@ -2723,9 +2284,6 @@ msgstr "" "Disk bölümü kullanımda ise imaj oluşturmaya zorla (sadece --volume ile " "anlamlıdır)" -msgid "Force up service" -msgstr "Servisi açılmaya zorla" - msgid "Freeze and disable the specified volume host" msgstr "Belirtilen disk bölümü sunucusunu dondur ve devre dışı bırak" @@ -2753,9 +2311,6 @@ msgstr "Silinecek grup(lar) (isim veya ID)" msgid "Helper class capable of reading rules from files" msgstr "Yardımcı sınıf kuralları dosyadan okuyabilir" -msgid "Hints for the scheduler (optional extension)" -msgstr "Zamanlayıcı için ipuçları (isteğe bağlı eklenti)" - msgid "Host to add to " msgstr " için eklenecek sunucu" @@ -2802,25 +2357,12 @@ msgstr "Bu imajı oluşturmak için kullanılan sunucu örneği kimliği" msgid "ID of the agent" msgstr "Ajanın ID'si" -msgid "IP address to add to server (name only)" -msgstr "Sunucuya eklenecek IP adresi (sadece isim)" - -msgid "IP address to remove from server (name only)" -msgstr "Sunucudan kaldırılacak IP adresi (sadece isim)" - msgid "IP protocol (icmp, tcp, udp; default: tcp)" msgstr "IP protokolü (icmp, tcp, udp; varsayılan: tcp)" msgid "IP version (default is 4)" msgstr "IP sürümü (varsayılan 4)" -msgid "" -"IP version (default is 4). Note that when subnet pool is specified, IP " -"version is determined from the subnet pool and this option is ignored." -msgstr "" -"IP sürümü (varsayılan 4). Alt ağ havuzu belirtildiğinde, IP sürümü alt ağ " -"havuzundan belirlenir ve bu seçenek göz ardı edilir." - msgid "IPv4 subnet for fixed IPs (in CIDR notation)" msgstr "Sabit IP'ler için IPv4 alt ağı (CIDR gösteriminde)" @@ -2871,16 +2413,6 @@ msgstr "" "Belirtilirse, disk bölümü durumu kilitlenir ve geçiş işleminin iptal " "edilmesine izin vermeyecektir (muhtemelen başka bir işlemle)" -msgid "" -"If specified, the volume state will not be locked and the a migration can be " -"aborted (default) (possibly by another operation)" -msgstr "" -"Belirtilirse, disk bölümü durumu kilitlenmez ve bir taşıma işlemi iptal " -"edilebilir (varsayılan) (muhtemelen başka bir işlemle)" - -msgid "If topology exists returns the topology's information (Default)" -msgstr "Topoloji var ise, topolojinin bilgisini döndürür (Varsayılan)" - #, python-format msgid "Image %(id)s was %(status)s." msgstr "%(id)s imaj %(status)s idi." @@ -2892,9 +2424,6 @@ msgstr "İmaj API sürümü, öntanımlı=%s (Env: OS_IMAGE_API_VERSION)" msgid "Image ID to reserve" msgstr "Ayrılacak imaj ID'si" -msgid "Image can be shared" -msgstr "İmaj paylaşılabilir" - #, python-format msgid "" "Image container format. The supported options are: %(option_list)s. The " @@ -2925,9 +2454,6 @@ msgstr "" msgid "Image hash used for verification" msgstr "Doğrulama için kullanılan imaj özeti" -msgid "Image is accessible to the community" -msgstr "İmaj topluluk tarafından erişilebilir" - msgid "Image is accessible to the public" msgstr "İmaj genele açıktır" @@ -2974,9 +2500,6 @@ msgstr "Tüm projeleri dahil et (sadece yönetici)" msgid "Include remote IP prefix from traffic count (default)" msgstr "Trafik sayımından uzak IP önekini ekle (varsayılan)" -msgid "Include reservations count [only valid with --absolute]" -msgstr "Rezervasyon sayısını ekle [sadece --absolute ile geçerlidir]" - msgid "" "Incorrect set of arguments provided. See openstack --help for more details" msgstr "" @@ -2986,22 +2509,10 @@ msgstr "" msgid "Ingress traffic direction from the project point of view" msgstr "Projenin bakış açısından gelen trafik yönü" -#, python-format -msgid "Invalid argument %s, characters ',' and '=' are not allowed" -msgstr "%s geçersiz değişken, ',' ve '=' karakterlerine izin verilmiyor" - #, python-format msgid "Invalid changes-since value: %s" msgstr "Geçersiz değişiklikler-şu değerden beri:%s" -#, python-format -msgid "" -"Invalid nic argument '%s'. Nic arguments must be of the form --nic ." -msgstr "" -"Geçersiz '%s' nic argümanı, Nic argümanları --nic biçiminde olmalıdır." - msgid "Issue new token" msgstr "Yeni jeton yayınla" @@ -3013,18 +2524,9 @@ msgstr "%(private_key)s anahtar dosyası kaydedilemedi: %(exception)s" msgid "Key file %(public_key)s not found: %(exception)s" msgstr "%(public_key)s anahtar dosyası bulunamadı: %(exception)s" -msgid "Keypair to inject into this server (optional extension)" -msgstr "Bu sunucuya enjekte etmek için anahtarlık (isteğe bağlı uzantı)" - msgid "Label to associate with this metering rule (name or ID)" msgstr "Bu ölçüm kuralı ile ilişkili etiket (isim veya ID)" -msgid "Limit the number of containers returned" -msgstr "İade edilen kap sayısını sınırla" - -msgid "Limit the number of objects returned" -msgstr "Döndürülen nesnelerin sayısını sınırla" - #, python-format msgid "List %s which have all given tag(s) (Comma-separated list of tags)" msgstr "" @@ -3069,9 +2571,6 @@ msgstr "QoS özelliklerini listele" msgid "List Service Providers" msgstr "Servis Sağlayıcıları listele" -msgid "List a project's resources" -msgstr "Bir projenin kaynaklarını listele" - msgid "List accessible domains" msgstr "Erişilebilir alanları listele" @@ -3126,21 +2625,12 @@ msgstr "Kullanılabilirlik alanlarını ve durumlarını listele" msgid "List available images" msgstr "Kullanılabilir imajlar listesi" -msgid "List backups" -msgstr "Yedekleri listele" - -msgid "List compute agents" -msgstr "Hesaplama ajanlarını listele" - msgid "List compute availability zones" msgstr "Hesaplama kullanılabilirlik alanlarını listele" msgid "List compute quota" msgstr "Hesaplama kotasını listele" -msgid "List compute services" -msgstr "Hesaplama servislerini listele" - msgid "List consistency group snapshots." msgstr "Tutarlılık grubu anlık görüntülerini listele." @@ -3219,9 +2709,6 @@ msgstr "Verilen duruma göre yüzen IP(ler) listesi ('AKTİF', 'KAPALI')" msgid "List groups" msgstr "Grupları listele" -msgid "List hosts" -msgstr "Sunucuları listele" - msgid "List hypervisors" msgstr "Yönetici arakatman listesi" @@ -3253,13 +2740,6 @@ msgstr "" "Verilen eyleme göre ağ RBAC ilkelerini listele (\"access_as_external\" veya " "\"access_as_shared\")" -msgid "" -"List network RBAC policies according to given object type (\"qos_policy\" or " -"\"network\")" -msgstr "" -"Verilen nesne türüne göre Ağ RBAC politikalarının listesi (\"qos_policy\" " -"veya \"network\")" - msgid "List network agents" msgstr "Ağ ajanlarını listele" @@ -3300,13 +2780,6 @@ msgstr "Ağları fiziksel ağın adına göre listeleme" msgid "List networks according to their name" msgstr "Ağları isimlerine göre listele" -msgid "" -"List networks according to their physical mechanisms. The supported options " -"are: flat, geneve, gre, local, vlan, vxlan." -msgstr "" -"Ağları fiziksel mekanizmalarına göre listeleyin. Desteklenen seçenekler " -"şunlardır: flat, geneve, gre, local, vlan, vxlan." - msgid "List networks according to their project (name or ID)" msgstr "Projelerine göre ağları listele (isim veya ID)" @@ -3333,15 +2806,6 @@ msgstr "Çıktıda verilen adın sadece adres kapsamlarını listeleyin" msgid "List only agents running on the specified host" msgstr "Yalnızca belirtilen ana bilgisayarda çalışan ajanları listele" -msgid "" -"List only agents with the specified agent type. The supported agent types " -"are: bgp, dhcp, open-vswitch, linux-bridge, ofa, l3, loadbalancer, metering, " -"metadata, macvtap, nic." -msgstr "" -"Yalnızca belirtilen aracı türü olan aracıları listeleyin. Desteklenen aracı " -"türleri şunlardır: bgp, dhcp, open-vswitch, linux-bridge, ofa, l3, " -"loadbalancer, metering, metadata, macvtap, nic." - msgid "List only ports attached to this router (name or ID)" msgstr "" "Yalnızca bu yönlendiriciye bağlı bağlantı noktalarını listeleyin (adı veya " @@ -3382,16 +2846,6 @@ msgstr "Sadece genele açık imajları listele" msgid "List only public types" msgstr "Sadece genele açık türleri listele" -msgid "" -"List only servers changed after a certain point of time. The provided time " -"should be an ISO 8061 formatted time. ex 2016-03-04T06:27:59Z ." -msgstr "" -"Yalnızca belirli bir zaman noktasından sonra sunucuları listeleyin. Verilen " -"süre bir ISO 8061 biçiminde olmalıdır. Örn 2016-03-04T06: 27: 59Z ." - -msgid "List only shared images" -msgstr "Sadece paylaşılan imajları listele" - msgid "List only specified service (name only)" msgstr "Sadece belirtilen servisleri listele (sadece isim)" @@ -3403,36 +2857,12 @@ msgstr "" msgid "List only subnet pools of given name in output" msgstr "Verilen isimdeki sadece altağ havuzlarını çıktıda listele" -msgid "" -"List only subnets of a given service type in output e.g.: network:" -"floatingip_agent_gateway. Must be a valid device owner value for a network " -"port (repeat option to list multiple service types)" -msgstr "" -"Çıktıda verilen servisin sadece altağlarını listele örn: network:" -"floatingip_agent_gateway. Bir ağ bağlantı noktası için geçerli bir aygıt " -"sahibi değeri olmalıdır (birden fazla servis türü listelemek için seçeneği " -"tekrarla)" - -msgid "" -"List only subnets of given IP version in output.Allowed values for IP " -"version are 4 and 6." -msgstr "" -"Verilen IP sürümünün sadece altağlarını çıktıda listele. IP sürümü için izin " -"verilen sürümler 4 ve 6." - msgid "List only subnets of given gateway IP in output" msgstr "Verilen geçit IP'sinin sadece alt ağlarını çıktıda listele" msgid "List only subnets of given name in output" msgstr "Verilen isimdeki sadece altağları çıktıda listele" -msgid "" -"List only subnets of given subnet range (in CIDR notation) in output e.g.: --" -"subnet-range 10.10.0.0/16" -msgstr "" -"Verilen alt ağ aralığında (CIDR notasyonu ile) sadece altağları çıktıda " -"listele örn: --subnet-range 10.10.0.0/16" - msgid "" "List only subnets which belong to a given network in output (name or ID)" msgstr "Verilen bir ağa ait sadece altağları çıktıda listele (isim veya ID)" @@ -3462,22 +2892,6 @@ msgstr "" "Yetkilendirilmiş kullanıcı için projeleri listele. Diğer filtrelerin yerini " "alır." -msgid "List qos policies according to their project (name or ID)" -msgstr "Projelerine göre QoS politikalarını listele (isim veya ID)" - -msgid "List qos policies not shared between projects" -msgstr "Projeler arasında paylaştırılmayan qos ilkelerini listele" - -msgid "List qos policies shared between projects" -msgstr "Projeler arasında paylaşılan qos politikalarını listele" - -msgid "List quotas for all projects with non-default quota values" -msgstr "" -"Varsayılan olmayan kota değerlerine sahip tüm projelerin kotalarını listeleme" - -msgid "List recent events of a server" -msgstr "Bir sunucunun son olaylarını listele" - msgid "List recognized commands by group" msgstr "Grup tarafından tanınan komutları listele" @@ -3511,15 +2925,6 @@ msgstr "Gelen ağ trafiğine uygulanan kurallar listesi" msgid "List rules applied to outgoing network traffic" msgstr "Giden ağ trafiğine uygulanan kuralları listele" -msgid "" -"List rules by the IP protocol (ah, dhcp, egp, esp, gre, icmp, igmp, ipv6-" -"encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, ipv6-opts, ipv6-route, ospf, pgm, " -"rsvp, sctp, tcp, udp, udplite, vrrp and integer representations [0-255])." -msgstr "" -"Kuralları IP protokolüne göre listele (ah, dhcp, egp, esp, gre, icmp, igmp, " -"ipv6-encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, ipv6-opts, ipv6-route, ospf, " -"pgm, rsvp, sctp, tcp, udp, udplite, vrrp ve tam sayı gösterimleri [0-255])" - msgid "List security group rules" msgstr "Güvenlik grubu kurallarını listele" @@ -3547,9 +2952,6 @@ msgstr "Servis katalogundaki servisleri listele" msgid "List services on specified host (name only)" msgstr "Belirtilen sunucu üzerindeki servisleri listele (sadece isim)" -msgid "List snapshots" -msgstr "Anlık görüntüleri listele" - msgid "List subnet pools" msgstr "Altağ havuzlarını listele" @@ -3585,9 +2987,6 @@ msgstr "Varsayılan disk bölümü türünü listele" msgid "List trusts" msgstr "Güvenleri listele" -msgid "List user-role assignments" -msgstr "Kullanıcı rol atamalarını listele" - msgid "List users" msgstr "Kullanıcıları listele" @@ -3609,76 +3008,12 @@ msgstr "Disk bölümü türlerini listele" msgid "List volumes" msgstr "Disk bölümlerini listele" -msgid "" -"Listing assignments using role list is deprecated as of the Newton release. " -"Use role assignment list --user --project --names " -"instead." -msgstr "" -"Rol listesi kullanarak liste atamaları, Newton sürümünden itibaren " -"kullanımdan kaldırılmıştır. Onun yerine rol atama listesi --user " -"--project --names komutunu kullanın." - -msgid "" -"Listing assignments using role list is deprecated. Use role assignment list " -"--group --domain --names instead." -msgstr "" -"Rol listesini kullanarak liste atamaları kullanımdan kaldırıldı. Onun yerine " -"role assignment list --group --domain --names " -"kullanın." - -msgid "" -"Listing assignments using role list is deprecated. Use role assignment list " -"--group --project --names instead." -msgstr "" -"Rol listesini kullanarak liste atamaları kullanımdan kaldırıldı. Onun yerine " -"role assignment list --group --project --names " -"kullanın." - -msgid "" -"Listing assignments using role list is deprecated. Use role assignment list " -"--user --domain --names instead." -msgstr "" -"Rol listesini kullanarak liste atamaları kullanımdan kaldırıldı. Bunun " -"yerine role assignment list --user --domain --" -"names kullanın." - -msgid "" -"Listing assignments using role list is deprecated. Use role assignment list " -"--user --domain default --names instead." -msgstr "" -"Rol listesini kullanarak liste atamaları kullanımdan kaldırıldı. Onun yerine " -"role assignment list --user --domain default --names kullanın." - -msgid "" -"Listing assignments using role list is deprecated. Use role assignment list " -"--user --project --names instead." -msgstr "" -"Rol listesini kullanarak liste atamaları kullanımdan kaldırıldı. Bunun " -"yerine, role assignment list --user --project --" -"names kullanın." - -msgid "" -"Listing assignments using user role list is deprecated as of the Newton " -"release. Use role assignment list --user --project --names instead." -msgstr "" -"Atamaları kullanıcı rol listesi kullanarak listelemek Newton sürümünden " -"itibaren kullanımdan kaldırılmıştır. Onun yerine role assignment list --user " -" --project --names komutunu kullanın." - msgid "Lists all volume transfer requests." msgstr "Tüm disk bölümü aktarım isteklerini listele." msgid "Local filename(s) to upload" msgstr "Yüklenecek yerel dosya ad(lar)ı" -msgid "Lock server(s). A non-admin user will not be able to execute actions" -msgstr "" -"Sunucu(ları/yu) kilitle. Yönetici olmayan kullanıcılar işlem yapamayacaktır." - -msgid "Login name (ssh -l option)" -msgstr "Giriş adı (ssh -l seçeneği)" - msgid "MAC address of this port (admin only)" msgstr "Bu bağlantı noktasının MAC adresi (yalnızca yönetici)" @@ -3729,47 +3064,14 @@ msgstr "Disk bölümünü ön yüklemesiz olarak işaretle (varsayılan)" msgid "Maximum bandwidth in kbps" msgstr "En büyük bant genişliği kbps cinsinden" -msgid "Maximum burst in kilobits, 0 means automatic" -msgstr "Kilo bit cinsinden en büyük atış, 0 otomatik anlamına gelir" +msgid "Maximum number of servers to launch (default=1)" +msgstr "Başlatılması gereken en fazla sunucu sayısı (varsayılan=1)" -msgid "Maximum number of backups to display" -msgstr "En fazla gösterilecek yedek sayısı " +msgid "Memory size in MB (default 256M)" +msgstr "MB cinsinden bellek boyutu (varsayılan 256M)" -msgid "Maximum number of flavors to display" -msgstr "Gösterilecek en fazla flavor sayısı" - -msgid "Maximum number of images to display." -msgstr "Gösterilecek en fazla imaj sayısı." - -msgid "" -"Maximum number of servers to display. If limit equals -1, all servers will " -"be displayed. If limit is greater than 'osapi_max_limit' option of Nova API, " -"'osapi_max_limit' will be used instead." -msgstr "" -"Gösterilecek en fazla sunucu sayısı. Eğer sınır -1'e eşitse, tüm sunucular " -"gösterilir. Eğer limit Nova API'nın 'osapi_max_limit' seçeneğinden daha " -"büyükse, onun yerine 'osapi_max_limit' kullanılacaktır." - -msgid "Maximum number of servers to launch (default=1)" -msgstr "Başlatılması gereken en fazla sunucu sayısı (varsayılan=1)" - -msgid "Maximum number of snapshots to display" -msgstr "Gösterilecek en büyük anlık görüntü sayısı" - -msgid "Maximum number of volumes to display" -msgstr "Gösterilecek en çok disk bölümü sayısı" - -msgid "Memory size in MB (default 256M)" -msgstr "MB cinsinden bellek boyutu (varsayılan 256M)" - -msgid "" -"Metainfo for the flavor profile. This becomes required if --driver is " -"missing and vice versa" -msgstr "" -"Flavor profili için Metainfo. --driver eksikse bu gereklidir ya da tam tersi" - -msgid "Meter rule (ID only)" -msgstr "Ölçek kuralı (sadece ID)" +msgid "Meter rule (ID only)" +msgstr "Ölçek kuralı (sadece ID)" msgid "Meter rule to delete (ID only)" msgstr "Silinecek ölçüm kuralı (sadece ID)" @@ -3780,9 +3082,6 @@ msgstr "Silinecek ölçek (isim veya ID)" msgid "Meter to display (name or ID)" msgstr "Gösterilecek ölçek (isim veya ID)" -msgid "Migrate server to different host" -msgstr "Farklı konakçıya sunucuyu göç ettir" - msgid "Migrate volume to a new host" msgstr "Disk bölümünü yeni bir sunucuya göç ettir" @@ -3872,9 +3171,6 @@ msgid "" "Name of the physical network over which the virtual network is implemented" msgstr "Sanal ağın üzerinde uygulandığı fiziksel ağın adı" -msgid "Name of the snapshot" -msgstr "Anlık görüntü ismi" - msgid "Name of this port" msgstr "Bu bağlantı noktasının ismi" @@ -3884,12 +3180,6 @@ msgstr "Disk bölümü anabilgisayarının ismi" msgid "Name or ID of project to show usage for" msgstr "Kullanımı gösterilecek projenin isim veya ID'si" -msgid "Name or ID of security group to remove from server" -msgstr "Sunucudan kaldırılacak güvenlik grubunun isim veya ID'si" - -msgid "Name or ID of server to use" -msgstr "Kullanılacak sunucunun isim veya ID'si" - #, python-format msgid "Network API version, default=%s (Env: OS_NETWORK_API_VERSION)" msgstr "Ağ API sürümü, varsayılan=%s (Env: OS_NETWORK_API_VERSION)" @@ -3948,9 +3238,6 @@ msgstr "Bir ajandan kaldırılacak ağ (isim veya ID)" msgid "Network to display (name or ID)" msgstr "Gösterilecek ağ (isim veya ID)" -msgid "Network to fetch an IP address from (name or ID)" -msgstr "IP adresi çekilecek ağ (isim veya ID)" - msgid "Network to modify (name or ID)" msgstr "Düzenlenecek ağ (isim veya ID)" @@ -3981,21 +3268,6 @@ msgstr "Yeni adres kapsam ismi" msgid "New aggregate name" msgstr "Yeni küme ismi" -msgid "New backup description" -msgstr "Yeni yedek açıklaması" - -msgid "New backup name" -msgstr "Yeni yedek ismi" - -msgid "" -"New backup state (\"available\" or \"error\") (admin only) (This option " -"simply changes the state of the backup in the database with no regard to " -"actual status, exercise caution when using)" -msgstr "" -"Yeni yedek durumu (\"kullanılabilir\" veya \"hata\") (sadece yönetici) (Bu " -"seçenek, veritabanındaki yedeklemenin durumunu gerçek durumu dikkate almadan " -"değiştirir, kullanırken dikkatli olun.)" - msgid "New consistency group description" msgstr "Yeni tutarlılık grubu açıklaması" @@ -4109,9 +3381,6 @@ msgstr "Yeni sunucu grup ismi" msgid "New server name" msgstr "Yeni sunucu ismi" -msgid "New server state (valid value: active, error)" -msgstr "Yeni sunucu durumu (geçerli değer: aktif, hata)" - msgid "New service description" msgstr "Yeni servis tanımı" @@ -4218,10 +3487,6 @@ msgstr "'%s' türünde, isminde veya ID'si ile bir servis kataloğu yok." msgid "No service with a type, name or ID of '%s' exists." msgstr "'%s' türünde, isminde veya ID'si ile bir servis bulunamadı." -#, python-format -msgid "No tags associated with the %s" -msgstr "%s ile ilişkilendirilmiş hiç etiket yok" - msgid "Number of backups to keep (default: 1)" msgstr "Tutulacak yedekleme sayısı (varsayılan: 1)" @@ -4256,9 +3521,6 @@ msgstr "Bir veya birden fazla ayar kaldırma işlemi başarısız" msgid "Only an authorized user may issue a new token." msgstr "Yalnızca yetkili bir kullanıcı yeni bir jeton verebilir." -msgid "Only display deleted servers (Admin only)." -msgstr "Sadece silinen sunucuları göster (sadece admin)." - msgid "Only return hosts in the availability zone" msgstr "Sadece kullanılabilirlik bölgesindeki sunucuları döndürür" @@ -4277,9 +3539,6 @@ msgstr "Işletim sistemi dağıtım sürümü" msgid "Optional backup container name" msgstr "Seçimli yedek kap ismi" -msgid "Options in ssh_config(5) format (ssh -o option)" -msgstr "ssh_config(5) biçemi içerisindeki seçenekler (ssh -o seçeneği)" - msgid "Original user password" msgstr "Orijinal kullanıcı parolası" @@ -4298,18 +3557,12 @@ msgstr "Parolalar eşleşmedi, parola değiştirilmedi" msgid "Pause server(s)" msgstr "Sunucu(ları/yu) durdur" -msgid "Perform a block live migration" -msgstr "Bir blok gerçek göç gerçekleştir" - msgid "Perform a hard or soft server reboot" msgstr "Sert veya yumuşak sunucu yeniden başlatmayı gerçekleştir" msgid "Perform a hard reboot" msgstr "Sert yeniden başlatmayı gerçekleştir" -msgid "Perform a shared live migration (default)" -msgstr "Paylaşımlı canlı göç gerçekleştir (varsayılan)" - msgid "Perform a soft reboot" msgstr "Yumuşak yeniden başlatmayı gerçekleştir" @@ -4380,9 +3633,6 @@ msgstr "İmajı silinmekten koru" msgid "Print image size in a human-friendly format." msgstr "İmaj boyutunu insan dostu bir biçimde bastırın." -msgid "Private key file (ssh -i option)" -msgstr "Gizli anahtar dosyası (ssh -i seçeneği)" - msgid "Project and User must be specified" msgstr "Proje ve kullanıcı belirtilmeli" @@ -4392,15 +3642,9 @@ msgstr "Devredilen proje (isim veya ID) (gerekli)" msgid "Project description" msgstr "Proje açıklaması" -msgid "Project must be specified" -msgstr "Proje belirtilmek zorunda" - msgid "Project that consumer wants to access (name or ID) (required)" msgstr "Alıcının erişmek istediği proje (isim veya ID) (gerekli)" -msgid "Project to associate with image (name or ID)" -msgstr "İmaj ile ilişkili proje (isim veya ID)" - msgid "Project to clean (name or ID)" msgstr "Temizlenecek proje (isim veya ID)" @@ -4525,16 +3769,6 @@ msgstr "" msgid "Public or private key to display (name only)" msgstr "Gösterilecek açık veya kapalı anahtar (sadece isim)" -msgid "Put server in rescue mode" -msgstr "Sunucuyu kurtarma kipine getir" - -msgid "" -"Python module path to driver. This becomes required if --metainfo is missing " -"and vice versa" -msgstr "" -"Python modülünün sürücüye yolu. --metainfo eksikse bu gereklidir ya da tam " -"tersi" - msgid "QoS policy that contains the rule (name or ID)" msgstr "Kuralı içeren QoS politikası (isim veya ID)" @@ -4607,18 +3841,12 @@ msgstr "Düzenlenecek bölge" msgid "Regular expression to match IP addresses" msgstr "IP adresleriyle eşleşen düzenli ifadeler" -msgid "Regular expression to match IPv6 addresses" -msgstr "IPv6 adresleriyle eşleşen düzenli ifadeler" - msgid "Regular expression to match instance name (admin only)" msgstr "Sunucu adıyla eşleşen düzenli ifadeler (yalnızca yönetici)" msgid "Regular expression to match names" msgstr "Adları eşleştirmek için düzenli ifadeler" -msgid "Reject the image membership" -msgstr "İmaj üyeliğini red et" - msgid "" "Remote IDs to associate with the Identity Provider (repeat option to provide " "multiple values)" @@ -4626,13 +3854,6 @@ msgstr "" "Kimlik Sağlayıcısı ile ilişkilendirilecek uzak kimlikler (birden fazla değer " "sağlamak için seçeneği tekrarlayın)" -msgid "" -"Remote IP address block (may use CIDR notation; default for IPv4 rule: " -"0.0.0.0/0)" -msgstr "" -"Uzak IP adres bloğu (CIDR notasyonu kullanılabilir; IPv4 kuralı için " -"varsayılan: 0.0.0.0/0)" - msgid "Remote security group (name or ID)" msgstr "Uzak güvenlik grubu (isim veya ID)" @@ -4740,43 +3961,24 @@ msgstr "Sunucudan güvenlik grubunu kaldır" msgid "Remove service profile from network flavor" msgstr "Servis profilini ağ flavor'ından kaldır" -msgid "" -"Remove subnet pool prefixes (in CIDR notation). (repeat option to unset " -"multiple prefixes)." -msgstr "" -"Alt ağ havuzu öneklerini kaldır (CIDR gösteriminde). (Birden fazla önekin " -"ayarını kaldırmak için seçeneği tekrarlayın)." - msgid "Remove the QoS policy attached to the port" msgstr "Bağlantı noktasına eklenmiş QoS politikasını kaldır" msgid "Remove the QoS policy attached to this network" msgstr "Bu ağa bağlı QoS politikasını kaldırın" -msgid "Remove the encryption type for this volume type (admin oly)" -msgstr "Bu disk bölümü için şifreleme türünü sil (sadece yönetici)" - msgid "Remove the encryption type for this volume type (admin only)" msgstr "Bu disk bölümü türü için şifreleme türü kaldırıldı (sadece yönetici)" msgid "Remove user from group" msgstr "Kullanıcıyı gruptan kaldır" -msgid "Remove volume from server" -msgstr "Disk bölümünü sunucudan kaldır" - msgid "Remove volume(s) from consistency group" msgstr "Tutarlılık grubundan disk bölümlerini sil" -msgid "Removes a role assignment from domain/project : user/group" -msgstr "Bir rol atamasını alan/proje : kullanıcı/gruptan kaldırır." - msgid "Removes volume type access to project (name or ID) (admin only)" msgstr "Projeye disk türü erişimini kaldırır (isim veya ID) (sadece yönetici)" -msgid "Replicated volume to clone (name or ID)" -msgstr "Klonlanacak yinelenmiş disk bölümü (isim veya ID)" - msgid "Request ID of the event to show (ID only)" msgstr "Gösterilecek olayın ID'sini iste (sadece ID)" @@ -4795,15 +3997,9 @@ msgstr "İmaj üyeliğini 'beklemede' olarak sıfırlayın" msgid "Resize server to specified flavor" msgstr "Sunucuyu belirtilen flavor'a yeniden boyutlandır" -msgid "Restore backup" -msgstr "Yedeği yeniden yükle" - msgid "Restore server from rescue mode" msgstr "Kurtarma kipinden sunucuyu onar" -msgid "Restore server state before resize" -msgstr "Sunucu durumunu yeniden boyutlandırmadan önceki haline geri getir" - msgid "Restore server(s)" msgstr "Sunucu(ları/yu) onar " @@ -4828,13 +4024,6 @@ msgstr "Mevcut rolü döndür" msgid "Return existing user" msgstr "Mevcut kullanıcıyı döndür" -msgid "" -"Return the auto allocated topology for a given project. Default is current " -"project" -msgstr "" -"Belirli bir proje için otomatik olarak ayrılan topolojiyi döndürür. " -"Varsayılan geçerli projedir" - #, python-format msgid "Returning existing domain %s" msgstr "Mevcut alanı döndür %s" @@ -4909,15 +4098,6 @@ msgstr "" msgid "Roll up items with " msgstr "Elemanları ile toparla" -msgid "" -"Route to be removed from this subnet e.g.: destination=10.10.0.0/16," -"gateway=192.168.71.254 destination: destination subnet (in CIDR notation) " -"gateway: nexthop IP address (repeat option to unset multiple host routes)" -msgstr "" -"Bu altağdan silinecek yön örn: hedef=10.10.0.0/16,geçit=192.168.71.254 hedef:" -"hedef altağ (CIDR notasyonunda) geçit: bir sonraki uğranacak IP adresi " -"(birden fazla sunucu yönü ayarı kaldırmak için seçeneği tekrar et)" - #, python-format msgid "Router does not contain route %s" msgstr "Yönlendirici %s yönünü içermiyor" @@ -4949,23 +4129,6 @@ msgstr "Alt ağın ekleneceği yönlendirici (isim veya ID)" msgid "Router(s) to delete (name or ID)" msgstr "Silinecek yönlendirici(ler) (isim veya ID)" -msgid "" -"Routes associated with the router destination: destination subnet (in CIDR " -"notation) gateway: nexthop IP address (repeat option to set multiple routes)" -msgstr "" -"Yönlendirici hedefi ile ilişkili yönlendiriciler: hedef altağ (CIDR " -"notasyonunda) geçit: bir sonraki atlanacak IP adresi (birden fazla " -"yönlendirici ayarlamak için seçeneği tekrarla)" - -msgid "" -"Routes to be removed from the router destination: destination subnet (in " -"CIDR notation) gateway: nexthop IP address (repeat option to unset multiple " -"routes)" -msgstr "" -"Yönlendirici hedefinden kaldırılacak yönlendiriciler: hedef alt ağ (CIDR " -"notasyonuyla) geçit: atlanacak sonraki IP adresi (birden fazla " -"yönlendiricinin ayarını kaldırmak için seçeneği tekrarla)" - #, python-format msgid "Rule ID %(rule_id)s not found" msgstr "%(rule_id)s kural ID'si bulunamadı" @@ -4976,10 +4139,6 @@ msgstr "Gelen ağ trafiğine uygulanacak kural (varsayılan)" msgid "Rule applies to outgoing network traffic" msgstr "Dışarıya doğru ağ trafiğine uygulanacak kural" -#, python-format -msgid "Rule type \"%(rule_type)s\" only requires arguments %(args)s" -msgstr "\"%(rule_type)s\" kural türü sadece %(args)s argümanlarını gerektirir" - msgid "SSH to server" msgstr "Sunucuya SSH ile bağlan" @@ -4992,29 +4151,6 @@ msgstr "Kap içeriğini yerel olarak kaydet" msgid "Save object locally" msgstr "Nesneyi yerel olarak kaydet" -msgid "" -"Scale server to a new flavor.\n" -"\n" -"A resize operation is implemented by creating a new server and copying the\n" -"contents of the original disk into a new one. It is also a two-step process " -"for\n" -"the user: the first is to perform the resize, the second is to either " -"confirm\n" -"(verify) success and release the old server, or to declare a revert to " -"release\n" -"the new server and restart the old one." -msgstr "" -"Sunucuyu yeni bir flavor'a ölçekle.\n" -"\n" -"Bir yeniden boyutlandırma işlemi yeni bir sunucu oluşturma ve orijinal \n" -"diskteki içeriğin yeni bir tanesine kopyalanması ile gerçekleştirilir. Bu " -"ayrıca\n" -"kullanıcı için iki adımlı bir süreçtir: birincisi yeniden boyutlandırmayı " -"gerçekleştirmek,\n" -"ikincisi ya başarılı olduğunu doğrulamak ve eski sunucuyu silmek, ya da " -"yenisini silip\n" -"eskisini yeniden başlatma isteğini belirtmek." - msgid "Search by flavor (name or ID)" msgstr "Flavor'a göre ara (isim veya ID)" @@ -5030,9 +4166,6 @@ msgstr "Projeye göre ara (sadece yönetici) (isim veya ID)" msgid "Search by server status" msgstr "Sunucu durumuna göre ara" -msgid "Search by user (admin only) (name or ID)" -msgstr "Kullanıcıya göre ara (sadece yönetici) (isim veya ID)" - msgid "Secret associated with (required)" msgstr " ile ilişkili gizli anahtar (gerekli)" @@ -5045,16 +4178,6 @@ msgstr "Gösterilecek güvenlik grubu kuralları (sadece ID)" msgid "Security group rule(s) to delete (ID only)" msgstr "Silinecek güvenlik grubu kuralları (sadece ID)" -msgid "Security group to add (name or ID)" -msgstr "Eklenecek güvenlik grubu (isim veya ID)" - -msgid "" -"Security group to assign to this server (name or ID) (repeat option to set " -"multiple groups)" -msgstr "" -"Bu sunucuya atanan güvenlik grubu (isim veya ID) (birden fazla grup " -"ayarlamak için seçeneği tekrar edin)" - msgid "" "Security group to associate with this port (name or ID) (repeat option to " "set multiple security groups)" @@ -5086,9 +4209,6 @@ msgstr "" "Ağ türüne göre bölüm belirteci, vlan ağ türü için VLAN ID, geneve, gre ve " "vxlan ağ türleri için tünel ID" -msgid "Select an availability zone for the server" -msgstr "Sunucu için kullanılabilirlik bölgesi seçin" - msgid "Server (name or ID)" msgstr "Sunucu (isim veya ID)" @@ -5107,18 +4227,12 @@ msgstr "İmaj oluşturulacak sunucu (isim veya ID)" msgid "Server to list events (name or ID)" msgstr "Olayları listelenecek sunucu (isim veya ID)" -msgid "Server to receive the IP address (name or ID)" -msgstr "IP adresin alınacağı sunucu (isim veya ID)" - msgid "Server to receive the fixed IP address (name or ID)" msgstr "Sabit IP adresleri alınacak sunucu (isim veya ID)" msgid "Server to receive the floating IP address (name or ID)" msgstr "Kayan IP adresi alınacak sunucu (isim veya ID)" -msgid "Server to remove the IP address from (name or ID)" -msgstr "IP adresin kaldırılacağı sunucu (isim veya ID)" - msgid "Server to remove the fixed IP address from (name or ID)" msgstr "Sabit IP adresin silineceği sunucu (isim veya ID)" @@ -5200,39 +4314,9 @@ msgstr "Gösterilecek servis (tür, isim veya ID)" msgid "Service to modify (type, name or ID)" msgstr "Düzenlenecek servis (tür, isim veya ID)" -msgid "" -"Service type for this subnet e.g.: network:floatingip_agent_gateway. Must be " -"a valid device owner value for a network port (repeat option to set multiple " -"service types)" -msgstr "" -"Bu alt ağ için servis türü örn: network:floatingip_agent_gateway. Bir ağ " -"bağlantı noktası için geçerli bir cihaz sahibi değeri olmalıdır (birden " -"fazla servis türü ayarlamak için seçeneği tekrarlayın)" - -msgid "" -"Service type to be removed from this subnet e.g.: network:" -"floatingip_agent_gateway. Must be a valid device owner value for a network " -"port (repeat option to unset multiple service types)" -msgstr "" -"Bu altağdan kaldırılacak servis türü örn: network:floatingip_agent_gateway. " -"Bir ağ bağlantı noktası için geçerli bir aygıt sahibi değeri olmalıdır " -"(birden fazla servis türünün ayarını kaldırmak için seçeneği tekrarlayın)" - -msgid "" -"Service type to which the flavor applies to: e.g. VPN (See openstack network " -"service provider list for loaded examples.)" -msgstr "" -"Flavor'ın uygulanacağı hizmet türü: ör. VPN (Yüklü sunucular için açık devre " -"ağ servis sağlayıcısı listesine bakın.)" - msgid "Service(s) to delete (type, name or ID)" msgstr "Silinecek servis(ler) (tür, isim veya ID)" -msgid "Set DNS name to this port (requires DNS integration extension)" -msgstr "" -"DNS adını bu bağlantı noktasına ayarlayın (DNS entegrasyon uzantısı " -"gerektirir)" - msgid "Set Network QoS rule properties" msgstr "Ağ QoS kural özelliklerini ayarla" @@ -5353,9 +4437,6 @@ msgstr "" msgid "Set availability zone name" msgstr "Kullanılabilirlik bölge adını ayarla" -msgid "Set compute agent properties" -msgstr "Hesaplama ajanı özelliklerini ayarla" - msgid "Set compute service properties" msgstr "Hesaplama servisi özelliklerini ayarla" @@ -5371,24 +4452,9 @@ msgstr "Kap özelliklerini ayarla" msgid "Set credential properties" msgstr "Kimlik bilgileri özelliklerini ayarla" -msgid "" -"Set data plane status of this port (ACTIVE | DOWN). Unset it to None with " -"the 'port unset' command (requires data plane status extension)" -msgstr "" -"Bu bağlantı noktasının veri düzlemi durumunu ayarlayın (ACTIVE | DOWN). " -"'port unset' komutu ile None'a getirin (veri düzlemi durum uzantısı " -"gerektirir)" - msgid "Set default project (name or ID)" msgstr "Varsayılan projeyi ayarla (isim veya ID)" -msgid "" -"Set default quota for subnet pool as the number ofIP addresses allowed in a " -"subnet" -msgstr "" -"Bir alt ağda izin verilen IP adreslerinin sayısı olarak alt ağ havuzu için " -"varsayılan kota ayarla" - msgid "Set domain properties" msgstr "Alan özelliklerini ayarla" @@ -5416,9 +4482,6 @@ msgstr "Yüzen IP açıklaması ayarla" msgid "Set group properties" msgstr "Grup özelliklerini ayarla" -msgid "Set host properties" -msgstr "Sunucu özelliklerini ayarla" - msgid "Set identity provider description" msgstr "Kimlik sağlayıcı tanımlarını ayarla" @@ -5467,9 +4530,6 @@ msgstr "Ağ bölüm ismi ayarla" msgid "Set network segment properties" msgstr "Ağ kesimi özelliklerini ayarla" -msgid "Set new root password (interactive only)" -msgstr "Yeni root parolası ayarla (sadece interaktif)" - msgid "Set object properties" msgstr "Nesne özelliklerini ayarla" @@ -5491,18 +4551,12 @@ msgstr "Proje ismini ayarla" msgid "Set project properties" msgstr "Proje özelliklerini ayarla" -msgid "Set quotas for " -msgstr " için kotaları ayarla" - msgid "Set quotas for a specific " msgstr "Belirli bir için kota ayarla" msgid "Set quotas for project or class" msgstr "Proje veya sınıf için kotaları ayarla" -msgid "Set quotas for this project or class (name/ID)" -msgstr "Bu proje veya sınıf için (isim/ID) kotaları ayarla" - msgid "Set region properties" msgstr "Bölgenin özelliklerini ayarla" @@ -5546,9 +4600,6 @@ msgstr "Servis özelliklerini ayarla" msgid "Set service provider properties" msgstr "Servis sağlayıcı özelliklerini ayarlayın" -msgid "Set snapshot properties" -msgstr "Anlık görüntü özelliklerini ayarlayın" - msgid "Set subnet description" msgstr "Alt ağ açıklaması ayarla" @@ -5580,32 +4631,6 @@ msgstr "Alt ağ havuzu özellikleri ayarla" msgid "Set subnet properties" msgstr "Altağ özelliklerini ayarla" -msgid "" -"Set the class that provides encryption support for this volume type (e.g " -"\"LuksEncryptor\") (admin only) (This option is required when setting " -"encryption type of a volume for the first time. Consider using other " -"encryption options such as: \"--encryption-cipher\", \"--encryption-key-size" -"\" and \"--encryption-control-location\")" -msgstr "" -"Bu birim türü için şifreleme desteği sağlayan sınıfı ayarlayın (örn " -"\"LuksEncryptor\") (sadece yönetici) (Bu seçenek, bir disk bölümü şifreleme " -"türünü ilk kez ayarlarken gereklidir. Şu diğer şifreleme seçeneklerini " -"kullanmayı düşünün: \"--encryption-cipher\", \"--encryption-key-size\" ve " -"\"--encryption-control-location\")" - -msgid "" -"Set the class that provides encryption support for this volume type (e.g " -"\"LuksEncryptor\") (admin only) (This option is required when setting " -"encryption type of a volume. Consider using other encryption options such " -"as: \"--encryption-cipher\", \"--encryption-key-size\" and \"--encryption-" -"control-location\")" -msgstr "" -"Bu disk bölümü türü için şifreleme desteği sağlayan sınıfı ayarla (örn " -"\"LuksEncryptor\") (sadece yönetici) (Bir disk bölümünün şifreleme türü " -"ayarlanırken bu seçenek zorunludur. \"--encryption-cipher\", \"--encryption-" -"key-size\" ve \"--encryption-control-location\" gibi diğer şifreleme " -"seçeneklerini kullanmayı göz önünde bulundurun)" - msgid "" "Set the encryption algorithm or mode for this volume type (e.g \"aes-xts-" "plain64\") (admin only)" @@ -5642,9 +4667,6 @@ msgstr "" "encryption-cipher\", \"--encryption-key-size\" ve \"--encryption-provider\" " "gibi diğer şifreleme seçeneklerini kullanmayı göz önünde bulundurun)" -msgid "Set the password on the rebuilt instance" -msgstr "Yeniden oluşturulmuş sunucudaki parolayı ayarla" - msgid "Set the router as highly available (disabled router only)" msgstr "" "Yönlendiriciyi yüksek kullanılabilirlikli olarak ayarla (sadece " @@ -5672,17 +4694,6 @@ msgstr "Bunu varsayılan olmayan bir ağ QoS politikası olarak ayarla" msgid "Set this as a non-default subnet pool" msgstr "Bunu varsayılan olmayan altağ havuzu olarak ayarla" -msgid "" -"Set this network as an external network (external-net extension required)" -msgstr "" -"Bu şebekeyi harici bir ağ olarak ayarlayın (harici ağ uzantısı gerekir)" - -msgid "Set this network as an internal network" -msgstr "Bu şebekeyi dahili bir şebeke olarak ayarlayın" - -msgid "Set this network as an internal network (default)" -msgstr "Bu ağı dahili bir ağ olarak ayarlayın (varsayılan)" - msgid "Set this subnet pool as not shared" msgstr "Bu alt ağ havuzunu paylaşılmayan olarak ayarlayın" @@ -5754,9 +4765,6 @@ msgstr "Projeler arasında adres kapsamını paylaş" msgid "Share the network between projects" msgstr "Ağları projeler arasında paylaşın" -msgid "Shelve server(s)" -msgstr "Sunucu(yu/ları) raftan çıkart" - msgid "Show API extension" msgstr "API uzantısını göster" @@ -5804,12 +4812,6 @@ msgstr "" "Tüm projeler için detayları göster. Sadece yönetici. (varsayılan olarak " "False)" -msgid "" -"Show limits for a specific project (name or ID) [only valid with --absolute]" -msgstr "" -"Belirli bir proje için sınırları göster (isim veya ID) [sadece --absolute " -"ile geçerli]" - msgid "Show network IP availability details" msgstr "Ağ IP kullanılabilirlik detaylarını göster" @@ -5832,30 +4834,12 @@ msgstr "Parolaları düz metin olarak göster" msgid "Show project's subtree (children) as a list" msgstr "Projenin alt projelerini (çocuklarını) bir liste olarak göster" -msgid "Show quotas for " -msgstr " için kotaları göster" - -msgid "Show quotas for project or class" -msgstr "Proje veya sınıf için kotaları göster" - -msgid "Show quotas for this project or class (name or ID)" -msgstr "Bu proje veya sınıf (isim veaya ID) için kotaları göster" - -msgid "Show rate limits" -msgstr "Oran sınırları göster" - msgid "Show resource usage for a single project" msgstr "Tek bir proje için kaynak kullanımını göster" msgid "Show serial console URL" msgstr "serial konsol URL'ini göster" -msgid "Show server details" -msgstr "Sunucu detaylarını göster" - -msgid "Show server event details" -msgstr "Sunucu olay detaylarını listele" - msgid "Show server's console output" msgstr "Sunucunun konsol çıktısını göster" @@ -5868,9 +4852,6 @@ msgstr "Servis katalog bilgisini göster" msgid "Show the project's parents as a list" msgstr "Projenin üst projelerini bir liste olarak göster" -msgid "Show volume details" -msgstr "Disk bölümü detaylarını göster" - msgid "Show volume transfer request details." msgstr "Disk bölümü aktarım isteği detaylarını göster." @@ -5880,9 +4861,6 @@ msgstr "xvpvnc konsol URL'ini göster" msgid "Size of image data (in bytes)" msgstr "İmaj verisinin boyutu (bayt cinsinden)" -msgid "Skip flavor and image name lookup." -msgstr "Flavor veya imaj ismi aramasını atla." - msgid "Snapshot to backup (name or ID)" msgstr "Yedeği alınacak anlık görüntü (isim veya ID)" @@ -5911,40 +4889,10 @@ msgstr "" "(varsayılan: ad:artan), virgülle ayrılmış olarak birden çok anahtar ve yön " "belirlenebilir" -msgid "Specific service endpoint to use" -msgstr "Kullanılacak belirli bir servis uç noktası" - msgid "Specifies if the role grant is inheritable to the sub projects" msgstr "" "Rol atamasının alt projelere miras bırakılıp bırakılmayacağını belirler" -msgid "" -"Specify a gateway for the subnet. The three options are: : " -"Specific IP address to use as the gateway, 'auto': Gateway address should " -"automatically be chosen from within the subnet itself, 'none': This subnet " -"will not use a gateway, e.g.: --gateway 192.168.9.1, --gateway auto, --" -"gateway none (default is 'auto')." -msgstr "" -"Alt ağ için bir geçit belirle. Üç seçenek: : geçit olarak " -"kullanılacak belirli bir IP adresi, 'auto': Geçit adresi ağdan otomatik " -"olarak seçilmelidir, 'none': Bu alt ağ bir geçit kullanmayacak, örn: --" -"gateway 192.168.9.1, --gateway auto, --gateway none (varsayılan 'auto')." - -msgid "" -"Specify a gateway for the subnet. The options are: : Specific IP " -"address to use as the gateway, 'none': This subnet will not use a gateway, e." -"g.: --gateway 192.168.9.1, --gateway none." -msgstr "" -"Alt ağ için bir geçit belirle. Seçenekler: : Geçit olarak " -"kullanılacak belirli bir IP adresi, 'none': Bu alt ağ geçit olarak " -"kullanılmayacak, örn: --gateway 192.168.9.1, --gateway none." - -msgid "Specify an alternate project (name or ID)" -msgstr "Alternatif bir proje belirle (isim veya ID)" - -msgid "Specify an alternate user (name or ID)" -msgstr "Alternatif bir kullanıcı belirle (isim veya ID)" - msgid "Specify if this network should be used as the default external network" msgstr "" "Bu ağın varsayılan dış ağ olarak kullanılması gerekip gerekmediğini " @@ -5957,19 +4905,6 @@ msgstr "" "auto veya none --nic'i belirtmek başka bir --nic, --network veya --port " "değeriyle kullanılamaz." -msgid "" -"Specifying the auth-key as a positional argument has been deprecated. " -"Please use the --auth-key option in the future." -msgstr "" -"Yetki anahtarını pozisyonel argüman olarak belirleme kullanımdan " -"kaldırıldı. Lütfen gelecekte --auth-key seçeneğini kullanın." - -msgid "Start server(s)." -msgstr "Sunucu(ları/yu) başlat." - -msgid "Stop server(s)." -msgstr "Sunucu(ları/yu) durdur." - #, python-format msgid "Subnet does not contain %(option)s %(value)s" msgstr "Ağ %(option)s %(value)s'yü içermiyor" @@ -5977,10 +4912,6 @@ msgstr "Ağ %(option)s %(value)s'yü içermiyor" msgid "Subnet on which you want to create the floating IP (name or ID)" msgstr "Yüzen IP'yi oluşturmak istediğiniz alt ağ (isim veya ID)" -#, python-format -msgid "Subnet pool does not contain prefix %s" -msgstr "%s önekini içermeyen altağ havuzu" - msgid "Subnet pool from which this subnet will obtain a CIDR (Name or ID)" msgstr "Bu alt ağın bir CIDR alacağı alt ağ havuzu (isim veya ID)" @@ -6024,242 +4955,27 @@ msgstr "" "%s'e eklenecek etiketler (birden fazla etiket ayarlamak için seçeneği tekrar " "et)" -#, python-format -msgid "Tag to be removed from the %s (repeat option to remove multiple tags)" -msgstr "" -"%s'den kaldırılacak etiket (birden fazla etiket silmek için seçenekleri " -"tekrarlayın)" - -msgid "Target hostname" -msgstr "Hedef sunucu adı" - msgid "Thaw and enable the specified volume host" msgstr "Belirtilen disk bölümü barındırıcısını çöz ve etkinleştir" -#, python-format -msgid "The %(old)s option is deprecated, please use %(new)s instead." -msgstr "" -"%(old)s seçeneği kullanımdan kaldırıldı, lütfen onun yerine %(new)s kullanın." - -msgid "The --clear-routes option is deprecated, please use --no-route instead." -msgstr "" -"--clear-routes seçeneği kullanımdan kaldırıldı, onun yerine --no-route " -"kullanın." - -msgid "The --device-id option is deprecated, please use --device instead." -msgstr "" -"--device-id seçeneği kullanımdan kaldırıldı, lütfen onun yerine --device " -"komutunu kullanın." - -msgid "The --host-id option is deprecated, please use --host instead." -msgstr "" -"--host-id seçeneği kullanımdan kaldırıldı, lütfen onun yerine --host " -"komutunu kullanın." - -msgid "The --owner option is deprecated, please use --project instead." -msgstr "" -"--owner seçeneği kullanımdan kaldırıldı, onun yerine lütfen --project " -"seçeneğini kullanın." - msgid "" "The ID of the volume backend replication target where the host will failover " "to (required)" msgstr "" "Ana bilgisayarın üstleneceği disk bölümü arkaplan kopyası ID'si (zorunlu)" -msgid "" -"The argument --type is deprecated, use service create --name " -"type instead." -msgstr "" -"--type argümanı kullanımdan kaldırılmıştır, onun yerine service create --" -"name type komutunu kullanın." - -msgid "" -"The attribute(s) of the exsiting remote volume snapshot (admin required) " -"(repeat option to specify multiple attributes) e.g.: '--remote-source source-" -"name=test_name --remote-source source-id=test_id'" -msgstr "" -"Varolan uzak birimin anlık görüntüsünün nitelikleri (yönetici gerekli) " -"(birden fazla özellik belirlemek için seçeneği tekrarlayın) örn: '--remote-" -"source source-name=test_name --remote-source source-id=test_id'" - -msgid "The last backup of the previous page (name or ID)" -msgstr "Bir önceki sayfanın son yedeği (isim veya ID)" - -msgid "The last flavor ID of the previous page" -msgstr "Bir önceki sayfanın son flavor ID'si" - -msgid "" -"The last image of the previous page. Display list of images after marker. " -"Display all images if not specified. (name or ID)" -msgstr "" -"Bir önceki sayfanın son imajı. İşaretçiden sonraki imajların listesini " -"görüntüle. Belirtilmemişse tüm imajları görüntüleyin. (isim veya ID)" - -msgid "" -"The last server of the previous page. Display list of servers after marker. " -"Display all servers if not specified. (name or ID)" -msgstr "" -"Bir önceki sayfanın son sunucusu. İşaretçi sonrasındaki sunucuların " -"listesini görüntüle. Belirtilmemişse tüm sunucuları görüntüleyin. (isim veya " -"ID)" - -msgid "The last snapshot ID of the previous page" -msgstr "Bir önceki sayfanın son anlık görüntü ID'si" - -msgid "The last volume ID of the previous page" -msgstr "Bir önceki sayfanın son disk bölümü ID'si" - msgid "The object to which this RBAC policy affects (name or ID)" msgstr "Bu RBAC politikasının etkilediği nesne (ad veya kimlik)" msgid "The owner project (name or ID)" msgstr "Projenin sahibi (isim veya ID)" -msgid "" -"The physical mechanism by which the virtual network is implemented. For " -"example: flat, geneve, gre, local, vlan, vxlan." -msgstr "" -"Sanal ağın uygulandığı fiziksel mekanizma. Örneğin: düz, geneve, gre, yerel, " -"vlan, vxlan." - msgid "The project to which the RBAC policy will be enforced (name or ID)" msgstr "RBAC politikası dayatılacak proje (isim veya ID)" msgid "The remote IP prefix to associate with this rule" msgstr "Bu kural ile ilişkilendirilecek uzak IP ön eki" -msgid "" -"This command has been deprecated. Please use \"floating ip create\" instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"floating ip create\" " -"kullanın." - -msgid "" -"This command has been deprecated. Please use \"floating ip delete\" instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen yerine \"floating ip delete\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"floating ip list\" instead." -msgstr "" -"Bu komut kullanımdan kalktı. Onun yerine lütfen \"floating ip list\" " -"kullanın." - -msgid "" -"This command has been deprecated. Please use \"floating ip pool list\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen, onun yerine \"floating ip pool list" -"\" komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"floating ip show\" instead." -msgstr "" -"Bu komut önerilmiyor. Lütfen bunun yerine \"floating ip show\"ı kullanın." - -msgid "" -"This command has been deprecated. Please use \"server add fixed ip\" instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Onun yerine lütfen \"server add fixed ip\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"server add floating ip\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırılmıştır. Onun yerine lütfen \"server add " -"floating ip\" kullanın." - -msgid "" -"This command has been deprecated. Please use \"server remove fixed ip\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"server remove fixed ip" -"\" komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"server remove floating ip\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Onun yerine lütfen \"server remove floating " -"ip\" komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume backup create\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Onun yerine lütfen \"volume backup create\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume backup delete\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume backup delete\" " -"kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume backup list\" instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume backup list\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume backup restore\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume backup restore" -"\" komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume backup show\" instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume backup show\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume snapshot create\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume snapshot create" -"\" komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume snapshot delete\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume snapshot delete" -"\" komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume snapshot list\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Onun yerine lütfen \"volume snapshot list\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume snapshot set\" instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen bunun yerine \"volume snapshot set\" " -"komutunu kullanın." - -msgid "" -"This command has been deprecated. Please use \"volume snapshot show\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume snapshot show\" " -"komutunu kullan." - -msgid "" -"This command has been deprecated. Please use \"volume snapshot unset\" " -"instead." -msgstr "" -"Bu komut kullanımdan kaldırıldı. Lütfen onun yerine \"volume snapshot unset" -"\" komutunu kullanın." - msgid "Token to be deleted" msgstr "Silinecek jeton" @@ -6283,20 +4999,12 @@ msgstr "Mimari türü" msgid "Type of hypervisor" msgstr "Arakatman türü" -msgid "" -"Type of the object that RBAC policy affects (\"qos_policy\" or \"network\")" -msgstr "" -"RBAC politikasını etkileyen nesne türü (\"qos_policy\" veya \"network\")" - msgid "URL" msgstr "URL" msgid "URL of the agent" msgstr "Ajanın URL'i" -msgid "Unique flavor ID; 'auto' creates a UUID (default: auto)" -msgstr "Benzersiz flavor ID'si; 'auto' bir UUID oluşturur (varsayılan: auto)" - msgid "Unlock server(s)" msgstr "Sunucu(ların/nun) kilidini kaldır" @@ -6344,12 +5052,6 @@ msgstr "Proje özelliklerini kaldır" msgid "Unset router properties" msgstr "Yönlendirici özelliklerini kaldır" -msgid "Unset server properties" -msgstr "Sunucu özelliklerini kaldır" - -msgid "Unset snapshot properties" -msgstr "Anlık görüntü özelliklerinin ayarını kaldır" - msgid "Unset subnet pool properties" msgstr "Altağ havuz özelliklerinin ayarlarını kaldır" @@ -6432,12 +5134,6 @@ msgstr "Gizli IP adresi kullan" msgid "Use public IP address" msgstr "Genel IP adresi kullan" -msgid "" -"Use specified volume as the config drive, or 'True' to use an ephemeral drive" -msgstr "" -"Belirtilen birimi yapılandırma sürücüsü olarak kullanın veya kısa ömürlü bir " -"sürücüyü kullanmak için 'True' kullanın" - msgid "" "Used to populate the backup_type property of the backup image (default: " "empty)" @@ -6451,9 +5147,6 @@ msgstr "Üst veri sunucusundan sunmak için kullanıcı veri dosyası" msgid "User description" msgstr "Kullanıcı tanımı" -msgid "User must be specified" -msgstr "Kullanıcı belirtilmeli" - msgid "User that is assuming authorization (name or ID)" msgstr "Yetki varsayan kullanıcı (adı veya kimliği)" @@ -6472,9 +5165,6 @@ msgstr "Gösterilecek kullanıcı (isim veya ID)" msgid "User to filter (name or ID)" msgstr "Filtrelenecek kullanıcı (isim veya ID)" -msgid "User to list (name or ID)" -msgstr "Listelenecek kullanıcı (isim veya ID)" - msgid "User to modify (name or ID)" msgstr "Düzenlenecek kullanıcı (isim veya ID)" @@ -6497,13 +5187,6 @@ msgstr "" msgid "VLAN ID for VLAN networks or Tunnel ID for GENEVE/GRE/VXLAN networks" msgstr "VLAN ağları için VLAN ID veya GENEVA/GRE/VXLAN ağları için Tünel ID" -msgid "" -"VNIC type for this port (direct | direct-physical | macvtap | normal | " -"baremetal | virtio-forwarder, default: normal)" -msgstr "" -"Bu bağlantı noktası için VNIC türü (direct | direct-physical | macvtap | " -"normal | baremetal | virtio-forwarder, varsayılan: normal)" - msgid "" "Validate the requirements for auto allocated topology. Does not return a " "topology." @@ -6536,25 +5219,6 @@ msgstr "" msgid "Volume name" msgstr "Disk bölümü ismi" -msgid "" -"Volume or snapshot (name or ID) must be specified if --block-device-mapping " -"is specified" -msgstr "" -"Disk bölümü veya anlık görüntü (isim veya ID), eğer --block-device-mapping " -"belirtildiyse, belirtilmek zorundadır." - -msgid "Volume size in GB (Required unless --snapshot or --source is specified)" -msgstr "" -"GB cinsinden disk bölümü boyutu (--snapshot veya --source belirtilmezse " -"gereklidir)" - -msgid "" -"Volume size in GB (Required unless --snapshot or --source or --source-" -"replicated is specified)" -msgstr "" -"GB cinsinden disk bölümü boyutu (--snapshot veya --source veya --source-" -"replicated belirtilmedikçe gereklidir)" - msgid "Volume to add (name or ID)" msgstr "Eklenecek disk bölümü (isim veya ID)" @@ -6576,12 +5240,6 @@ msgstr "Düzenlenecek disk bölümü (isim veya ID)" msgid "Volume to remove (name or ID)" msgstr "Kaldırılacak disk bölümü (isim veya ID)" -msgid "Volume to restore to (name or ID)" -msgstr "Geri yüklenecek disk bölümü (isim veya ID)" - -msgid "Volume to snapshot (name or ID)" -msgstr "Anlık görüntüsü oluşturulacak disk bölümü (isim veya ID)" - msgid "Volume to snapshot (name or ID) (default is )" msgstr "" "Anlık görüntüsü alınacak disk bölümü (isim veya ID) (varsayılan _argparse.ArgumentParser: LOG.debug('get_parser(%s)', prog_name) - parser = super(NetDetectionMixin, self).get_parser(prog_name) + parser = super().get_parser(prog_name) # type: ignore parser = self.update_parser_common(parser) LOG.debug('common parser: %s', parser) if self.is_neutron or self.is_docs_build: @@ -150,11 +159,15 @@ def update_parser_compute(self, parser): def take_action(self, parsed_args): if self.is_neutron: - return self.take_action_network(self.app.client_manager.network, - parsed_args) + return self.take_action_network( + self.app.client_manager.network, # type: ignore + parsed_args, + ) elif self.is_nova_network: - return self.take_action_compute(self.app.client_manager.compute, - parsed_args) + return self.take_action_compute( + self.app.client_manager.compute, # type: ignore + parsed_args, + ) def take_action_network(self, client, parsed_args): """Override to do something useful.""" @@ -165,8 +178,9 @@ def take_action_compute(self, client, parsed_args): pass -class NetworkAndComputeCommand(NetDetectionMixin, command.Command, - metaclass=abc.ABCMeta): +class NetworkAndComputeCommand( + NetDetectionMixin, command.Command, metaclass=abc.ABCMeta +): """Network and Compute Command Command class for commands that support implementation via @@ -174,11 +188,11 @@ class NetworkAndComputeCommand(NetDetectionMixin, command.Command, implementations for take_action() and may even have different arguments. """ + pass -class NetworkAndComputeDelete(NetworkAndComputeCommand, - metaclass=abc.ABCMeta): +class NetworkAndComputeDelete(NetworkAndComputeCommand, metaclass=abc.ABCMeta): """Network and Compute Delete Delete class for commands that support implementation via @@ -188,6 +202,8 @@ class NetworkAndComputeDelete(NetworkAndComputeCommand, following the rules in doc/source/command-errors.rst. """ + resource: str + def take_action(self, parsed_args): ret = 0 resources = getattr(parsed_args, self.resource, []) @@ -196,17 +212,22 @@ def take_action(self, parsed_args): self.r = r try: if self.app.client_manager.is_network_endpoint_enabled(): - self.take_action_network(self.app.client_manager.network, - parsed_args) + self.take_action_network( + self.app.client_manager.network, parsed_args + ) else: - self.take_action_compute(self.app.client_manager.compute, - parsed_args) + self.take_action_compute( + self.app.client_manager.compute, + parsed_args, + ) except Exception as e: - msg = _("Failed to delete %(resource)s with name or ID " - "'%(name_or_id)s': %(e)s") % { - "resource": self.resource, - "name_or_id": r, - "e": e, + msg = _( + "Failed to delete %(resource)s with name or ID " + "'%(name_or_id)s': %(e)s" + ) % { + "resource": self.resource, + "name_or_id": r, + "e": e, } LOG.error(msg) ret += 1 @@ -221,8 +242,9 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) -class NetworkAndComputeLister(NetDetectionMixin, command.Lister, - metaclass=abc.ABCMeta): +class NetworkAndComputeLister( + NetDetectionMixin, command.Lister, metaclass=abc.ABCMeta +): """Network and Compute Lister Lister class for commands that support implementation via @@ -230,11 +252,13 @@ class NetworkAndComputeLister(NetDetectionMixin, command.Lister, implementations for take_action() and may even have different arguments. """ + pass -class NetworkAndComputeShowOne(NetDetectionMixin, command.ShowOne, - metaclass=abc.ABCMeta): +class NetworkAndComputeShowOne( + NetDetectionMixin, command.ShowOne, metaclass=abc.ABCMeta +): """Network and Compute ShowOne ShowOne class for commands that support implementation via @@ -247,10 +271,12 @@ def take_action(self, parsed_args): try: if self.app.client_manager.is_network_endpoint_enabled(): return self.take_action_network( - self.app.client_manager.network, parsed_args) + self.app.client_manager.network, parsed_args + ) else: return self.take_action_compute( - self.app.client_manager.compute, parsed_args) + self.app.client_manager.compute, parsed_args + ) except openstack.exceptions.HttpException as exc: msg = _("Error while executing command: %s") % exc.message if exc.details: @@ -275,21 +301,23 @@ class NeutronCommandWithExtraArgs(command.Command): } def _get_property_converter(self, _property): - if 'type' not in _property: - converter = str - else: + if 'type' in _property: converter = self._allowed_types_dict.get(_property['type']) + else: + converter = str if not converter: raise exceptions.CommandError( - _("Type {property_type} of property {name} " - "is not supported").format( - property_type=_property['type'], - name=_property['name'])) + _( + "Type {property_type} of property {name} is not supported" + ).format( + property_type=_property['type'], name=_property['name'] + ) + ) return converter def _parse_extra_properties(self, extra_properties): - result = {} + result: dict[str, ty.Any] = {} if extra_properties: for _property in extra_properties: converter = self._get_property_converter(_property) @@ -297,31 +325,32 @@ def _parse_extra_properties(self, extra_properties): return result def get_parser(self, prog_name): - parser = super(NeutronCommandWithExtraArgs, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--extra-property', metavar='type=,name=,' - 'value=', + 'value=', dest='extra_properties', action=parseractions.MultiKeyValueAction, required_keys=['name', 'value'], optional_keys=['type'], - help=_("Additional parameters can be passed using this property. " - "Default type of the extra property is string ('str'), but " - "other types can be used as well. Available types are: " - "'dict', 'list', 'str', 'bool', 'int'. " - "In case of 'list' type, 'value' can be " - "semicolon-separated list of values. " - "For 'dict' value is semicolon-separated list of the " - "key:value pairs.") + help=_( + "Additional parameters can be passed using this property. " + "Default type of the extra property is string ('str'), but " + "other types can be used as well. Available types are: " + "'dict', 'list', 'str', 'bool', 'int'. " + "In case of 'list' type, 'value' can be " + "semicolon-separated list of values. " + "For 'dict' value is semicolon-separated list of the " + "key:value pairs." + ), ) return parser class NeutronUnsetCommandWithExtraArgs(NeutronCommandWithExtraArgs): - def _parse_extra_properties(self, extra_properties): - result = {} + result: dict[str, ty.Any] = {} if extra_properties: for _property in extra_properties: result[_property['name']] = None diff --git a/openstackclient/network/utils.py b/openstackclient/network/utils.py index 4d4d18e470..30c0da0645 100644 --- a/openstackclient/network/utils.py +++ b/openstackclient/network/utils.py @@ -23,11 +23,11 @@ def transform_compute_security_group_rule(sg_rule): from_port = info.pop('from_port') to_port = info.pop('to_port') if isinstance(from_port, int) and isinstance(to_port, int): - port_range = {'port_range': "%u:%u" % (from_port, to_port)} + port_range = {'port_range': f"{from_port}:{to_port}"} elif from_port is None and to_port is None: port_range = {'port_range': ""} else: - port_range = {'port_range': "%s:%s" % (from_port, to_port)} + port_range = {'port_range': f"{from_port}:{to_port}"} info.update(port_range) if 'cidr' in info['ip_range']: info['ip_range'] = info['ip_range']['cidr'] @@ -58,12 +58,12 @@ def str2list(strlist): return result -def str2dict(strdict): +def str2dict(strdict: str) -> dict[str, str]: """Convert key1:value1;key2:value2;... string into dictionary. :param strdict: string in the form of key1:value1;key2:value2 """ - result = {} + result: dict[str, str] = {} if not strdict: return result i = 0 @@ -76,8 +76,108 @@ def str2dict(strdict): msg = _("missing value for key '%s'") raise exceptions.CommandError(msg % kv) else: - kvlist[i - 1] = "%s;%s" % (kvlist[i - 1], kv) + kvlist[i - 1] = f"{kvlist[i - 1]};{kv}" for kv in kvlist: key, sep, value = kv.partition(':') result[key] = value return result + + +def format_security_group_rule_show(obj): + data = transform_compute_security_group_rule(obj) + return zip(*sorted(data.items())) + + +def format_network_port_range(rule): + # Display port range or ICMP type and code. For example: + # - ICMP type: 'type=3' + # - ICMP type and code: 'type=3:code=0' + # - ICMP code: Not supported + # - Matching port range: '443:443' + # - Different port range: '22:24' + # - Single port: '80:80' + # - No port range: '' + port_range = '' + if is_icmp_protocol(rule['protocol']): + if rule['port_range_min']: + port_range += 'type=' + str(rule['port_range_min']) + if rule['port_range_max']: + port_range += ':code=' + str(rule['port_range_max']) + elif rule['port_range_min'] or rule['port_range_max']: + port_range_min = str(rule['port_range_min']) + port_range_max = str(rule['port_range_max']) + if rule['port_range_min'] is None: + port_range_min = port_range_max + if rule['port_range_max'] is None: + port_range_max = port_range_min + port_range = port_range_min + ':' + port_range_max + return port_range + + +def format_remote_ip_prefix(rule): + remote_ip_prefix = rule['remote_ip_prefix'] + if remote_ip_prefix is None: + ethertype = rule['ether_type'] + if ethertype == 'IPv4': + remote_ip_prefix = '0.0.0.0/0' + elif ethertype == 'IPv6': + remote_ip_prefix = '::/0' + return remote_ip_prefix + + +def convert_ipvx_case(string): + if string.lower() == 'ipv4': + return 'IPv4' + if string.lower() == 'ipv6': + return 'IPv6' + return string + + +def is_icmp_protocol(protocol): + # NOTE(rtheis): Neutron has deprecated protocol icmpv6. + # However, while the OSC CLI doesn't document the protocol, + # the code must still handle it. In addition, handle both + # protocol names and numbers. + if protocol in ['icmp', 'icmpv6', 'ipv6-icmp', '1', '58']: + return True + else: + return False + + +def convert_to_lowercase(string): + return string.lower() + + +def get_protocol(parsed_args, default_protocol='any'): + protocol = default_protocol + if parsed_args.protocol is not None: + protocol = parsed_args.protocol + if hasattr(parsed_args, "proto") and parsed_args.proto is not None: + protocol = parsed_args.proto + if protocol == 'any': + protocol = None + return protocol + + +def get_ethertype(parsed_args, protocol): + ethertype = 'IPv4' + if parsed_args.ethertype is not None: + ethertype = parsed_args.ethertype + elif is_ipv6_protocol(protocol): + ethertype = 'IPv6' + return ethertype + + +def is_ipv6_protocol(protocol): + # NOTE(rtheis): Neutron has deprecated protocol icmpv6. + # However, while the OSC CLI doesn't document the protocol, + # the code must still handle it. In addition, handle both + # protocol names and numbers. + if ( + protocol is not None + and protocol.startswith('ipv6-') + or protocol in ['icmpv6', '41', '43', '44', '58', '59', '60'] + ): + return True + else: + return False diff --git a/openstackclient/network/v2/address_group.py b/openstackclient/network/v2/address_group.py index b22fd8aa1a..178c3afbf3 100644 --- a/openstackclient/network/v2/address_group.py +++ b/openstackclient/network/v2/address_group.py @@ -16,10 +16,10 @@ import logging import netaddr -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -28,12 +28,9 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) @@ -63,29 +60,28 @@ class CreateAddressGroup(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create a new Address Group") def get_parser(self, prog_name): - parser = super(CreateAddressGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar="", - help=_("New address group name") + 'name', metavar="", help=_("New address group name") ) parser.add_argument( '--description', metavar="", - help=_("New address group description") + help=_("New address group description"), ) parser.add_argument( "--address", metavar="", action='append', default=[], - help=_("IP address or CIDR " - "(repeat option to set multiple addresses)"), + help=_( + "IP address or CIDR (repeat option to set multiple addresses)" + ), ) parser.add_argument( '--project', metavar="", - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -96,7 +92,8 @@ def take_action(self, parsed_args): attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_address_group(**attrs) display_columns, columns = _get_columns(obj) @@ -109,12 +106,12 @@ class DeleteAddressGroup(command.Command): _description = _("Delete address group(s)") def get_parser(self, prog_name): - parser = super(DeleteAddressGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_group', metavar="", nargs='+', - help=_("Address group(s) to delete (name or ID)") + help=_("Address group(s) to delete (name or ID)"), ) return parser @@ -129,14 +126,19 @@ def take_action(self, parsed_args): client.delete_address_group(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete address group with " - "name or ID '%(group)s': %(e)s"), - {'group': group, 'e': e}) + LOG.error( + _( + "Failed to delete address group with " + "name or ID '%(group)s': %(e)s" + ), + {'group': group, 'e': e}, + ) if result > 0: total = len(parsed_args.address_group) - msg = (_("%(result)s of %(total)s address groups failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s address groups failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -144,18 +146,20 @@ class ListAddressGroup(command.Lister): _description = _("List address groups") def get_parser(self, prog_name): - parser = super(ListAddressGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--name', metavar='', - help=_("List only address groups of given name in output") + help=_("List only address groups with the specified name"), ) parser.add_argument( '--project', metavar="", - help=_("List address groups according to their project " - "(name or ID)") + help=_( + "List only address groups with the specified project " + "(name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) @@ -190,71 +194,79 @@ def take_action(self, parsed_args): attrs['project_id'] = project_id data = client.address_groups(**attrs) - return (column_headers, - (utils.get_item_properties( - s, columns, formatters={}, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), + ) class SetAddressGroup(common.NeutronCommandWithExtraArgs): _description = _("Set address group properties") def get_parser(self, prog_name): - parser = super(SetAddressGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_group', metavar="", - help=_("Address group to modify (name or ID)") + help=_("Address group to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar="", - help=_('Set address group name') + '--name', metavar="", help=_('Set address group name') ) parser.add_argument( '--description', metavar="", - help=_('Set address group description') + help=_('Set address group description'), ) parser.add_argument( "--address", metavar="", action='append', default=[], - help=_("IP address or CIDR " - "(repeat option to set multiple addresses)"), + help=_( + "IP address or CIDR (repeat option to set multiple addresses)" + ), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_address_group( - parsed_args.address_group, - ignore_missing=False) + parsed_args.address_group, ignore_missing=False + ) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name if parsed_args.description is not None: attrs['description'] = parsed_args.description attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_address_group(obj, **attrs) if parsed_args.address: client.add_addresses_to_address_group( - obj, _format_addresses(parsed_args.address)) + obj, _format_addresses(parsed_args.address) + ) class ShowAddressGroup(command.ShowOne): _description = _("Display address group details") def get_parser(self, prog_name): - parser = super(ShowAddressGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_group', metavar="", - help=_("Address group to display (name or ID)") + help=_("Address group to display (name or ID)"), ) return parser @@ -262,8 +274,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_address_group( - parsed_args.address_group, - ignore_missing=False) + parsed_args.address_group, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) @@ -274,27 +286,30 @@ class UnsetAddressGroup(command.Command): _description = _("Unset address group properties") def get_parser(self, prog_name): - parser = super(UnsetAddressGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_group', metavar="", - help=_("Address group to modify (name or ID)") + help=_("Address group to modify (name or ID)"), ) parser.add_argument( "--address", metavar="", action='append', default=[], - help=_("IP address or CIDR " - "(repeat option to unset multiple addresses)"), + help=_( + "IP address or CIDR " + "(repeat option to unset multiple addresses)" + ), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_address_group( - parsed_args.address_group, - ignore_missing=False) + parsed_args.address_group, ignore_missing=False + ) if parsed_args.address: client.remove_addresses_from_address_group( - obj, _format_addresses(parsed_args.address)) + obj, _format_addresses(parsed_args.address) + ) diff --git a/openstackclient/network/v2/address_scope.py b/openstackclient/network/v2/address_scope.py index 91f581b55c..8a38dab4d5 100644 --- a/openstackclient/network/v2/address_scope.py +++ b/openstackclient/network/v2/address_scope.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -32,9 +32,7 @@ def _get_columns(item): } hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -64,23 +62,21 @@ class CreateAddressScope(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create a new Address Scope") def get_parser(self, prog_name): - parser = super(CreateAddressScope, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar="", - help=_("New address scope name") + 'name', metavar="", help=_("New address scope name") ) parser.add_argument( '--ip-version', type=int, default=4, choices=[4, 6], - help=_("IP version (default is 4)") + help=_("IP version (default is 4)"), ) parser.add_argument( '--project', metavar="", - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -88,12 +84,14 @@ def get_parser(self, prog_name): share_group.add_argument( '--share', action='store_true', - help=_('Share the address scope between projects') + help=_('Share the address scope between projects'), ) share_group.add_argument( '--no-share', action='store_true', - help=_('Do not share the address scope between projects (default)') + help=_( + 'Do not share the address scope between projects (default)' + ), ) return parser @@ -102,7 +100,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_address_scope(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) @@ -114,12 +113,12 @@ class DeleteAddressScope(command.Command): _description = _("Delete address scope(s)") def get_parser(self, prog_name): - parser = super(DeleteAddressScope, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_scope', metavar="", nargs='+', - help=_("Address scope(s) to delete (name or ID)") + help=_("Address scope(s) to delete (name or ID)"), ) return parser @@ -134,14 +133,19 @@ def take_action(self, parsed_args): client.delete_address_scope(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete address scope with " - "name or ID '%(scope)s': %(e)s"), - {'scope': scope, 'e': e}) + LOG.error( + _( + "Failed to delete address scope with " + "name or ID '%(scope)s': %(e)s" + ), + {'scope': scope, 'e': e}, + ) if result > 0: total = len(parsed_args.address_scope) - msg = (_("%(result)s of %(total)s address scopes failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s address scopes failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -151,12 +155,12 @@ class ListAddressScope(command.Lister): _description = _("List address scopes") def get_parser(self, prog_name): - parser = super(ListAddressScope, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--name', metavar='', - help=_("List only address scopes of given name in output") + help=_("List only address scopes with the specified name"), ) parser.add_argument( '--ip-version', @@ -164,13 +168,18 @@ def get_parser(self, prog_name): choices=[4, 6], metavar='', dest='ip_version', - help=_("List address scopes of given IP version networks (4 or 6)") + help=_( + "List only address scopes with the specified IP version " + "networks (4 or 6)" + ), ) parser.add_argument( '--project', metavar="", - help=_("List address scopes according to their project " - "(name or ID)") + help=_( + "List only address scopes with the specified project " + "(name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) @@ -178,12 +187,12 @@ def get_parser(self, prog_name): shared_group.add_argument( '--share', action='store_true', - help=_("List address scopes shared between projects") + help=_("List only address scopes shared between projects"), ) shared_group.add_argument( '--no-share', action='store_true', - help=_("List address scopes not shared between projects") + help=_("List only address scopes not shared between projects"), ) return parser @@ -222,10 +231,17 @@ def take_action(self, parsed_args): attrs['project_id'] = project_id data = client.address_scopes(**attrs) - return (column_headers, - (utils.get_item_properties( - s, columns, formatters={}, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), + ) # TODO(rtheis): Use the SDK resource mapped attribute names once the @@ -234,27 +250,25 @@ class SetAddressScope(common.NeutronCommandWithExtraArgs): _description = _("Set address scope properties") def get_parser(self, prog_name): - parser = super(SetAddressScope, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_scope', metavar="", - help=_("Address scope to modify (name or ID)") + help=_("Address scope to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar="", - help=_('Set address scope name') + '--name', metavar="", help=_('Set address scope name') ) share_group = parser.add_mutually_exclusive_group() share_group.add_argument( '--share', action='store_true', - help=_('Share the address scope between projects') + help=_('Share the address scope between projects'), ) share_group.add_argument( '--no-share', action='store_true', - help=_('Do not share the address scope between projects') + help=_('Do not share the address scope between projects'), ) return parser @@ -262,8 +276,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_address_scope( - parsed_args.address_scope, - ignore_missing=False) + parsed_args.address_scope, ignore_missing=False + ) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name @@ -272,7 +286,8 @@ def take_action(self, parsed_args): if parsed_args.no_share: attrs['shared'] = False attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_address_scope(obj, **attrs) @@ -280,11 +295,11 @@ class ShowAddressScope(command.ShowOne): _description = _("Display address scope details") def get_parser(self, prog_name): - parser = super(ShowAddressScope, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'address_scope', metavar="", - help=_("Address scope to display (name or ID)") + help=_("Address scope to display (name or ID)"), ) return parser @@ -292,8 +307,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_address_scope( - parsed_args.address_scope, - ignore_missing=False) + parsed_args.address_scope, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) diff --git a/openstackclient/network/v2/default_security_group_rule.py b/openstackclient/network/v2/default_security_group_rule.py new file mode 100644 index 0000000000..24475852fb --- /dev/null +++ b/openstackclient/network/v2/default_security_group_rule.py @@ -0,0 +1,418 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Default Security Group Rule action implementations""" + +import logging + +from osc_lib.cli import parseractions +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ +from openstackclient.network import common +from openstackclient.network import utils as network_utils + +LOG = logging.getLogger(__name__) + + +def _get_columns(item): + hidden_columns = ['location', 'name', 'revision_number'] + return utils.get_osc_show_columns_for_sdk_resource( + item, {}, hidden_columns + ) + + +class CreateDefaultSecurityGroupRule( + command.ShowOne, common.NeutronCommandWithExtraArgs +): + """Add a new security group rule to the default security group template. + + These rules will be applied to the default security groups created for any + new project. They will not be applied to any existing default security + groups. + """ + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + + parser.add_argument( + '--description', + metavar='', + help=_("Set default security group rule description"), + ) + parser.add_argument( + '--icmp-type', + metavar='', + type=int, + help=_("ICMP type for ICMP IP protocols"), + ) + parser.add_argument( + '--icmp-code', + metavar='', + type=int, + help=_("ICMP code for ICMP IP protocols"), + ) + direction_group = parser.add_mutually_exclusive_group() + direction_group.add_argument( + '--ingress', + action='store_true', + help=_("Rule will apply to incoming network traffic (default)"), + ) + direction_group.add_argument( + '--egress', + action='store_true', + help=_("Rule will apply to outgoing network traffic"), + ) + parser.add_argument( + '--ethertype', + metavar='', + choices=['IPv4', 'IPv6'], + type=network_utils.convert_ipvx_case, + help=_( + "Ethertype of network traffic " + "(IPv4, IPv6; default: based on IP protocol)" + ), + ) + remote_group = parser.add_mutually_exclusive_group() + remote_group.add_argument( + "--remote-ip", + metavar="", + help=_( + "Remote IP address block (may use CIDR notation; " + "default for IPv4 rule: 0.0.0.0/0, " + "default for IPv6 rule: ::/0)" + ), + ) + remote_group.add_argument( + "--remote-group", + metavar="", + help=_("Remote security group (ID)"), + ) + remote_group.add_argument( + "--remote-address-group", + metavar="", + help=_("Remote address group (ID)"), + ) + + parser.add_argument( + '--dst-port', + metavar='', + action=parseractions.RangeAction, + help=_( + "Destination port, may be a single port or a starting and " + "ending port range: 137:139. Required for IP protocols TCP " + "and UDP. Ignored for ICMP IP protocols." + ), + ) + parser.add_argument( + '--protocol', + metavar='', + type=network_utils.convert_to_lowercase, + help=_( + "IP protocol (ah, dccp, egp, esp, gre, icmp, igmp, " + "ipv66-encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, ipv6-opts, " + "ipv6-route, ospf, pgm, rsvp, sctp, tcp, udp, udplite, vrrp " + "and integer representations [0-255] or any; " + "default: any (all protocols))" + ), + ) + parser.add_argument( + '--for-default-sg', + action='store_true', + default=False, + help=_( + "Set this default security group rule to be used in all " + "default security groups created automatically for each " + "project" + ), + ) + parser.add_argument( + '--for-custom-sg', + action='store_true', + default=False, + help=_( + "Set this default security group rule to be used in all " + "custom security groups created manually by users" + ), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + # Build the create attributes. + attrs = {} + attrs['protocol'] = network_utils.get_protocol(parsed_args) + + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + + # NOTE: A direction must be specified and ingress + # is the default. + if parsed_args.ingress or not parsed_args.egress: + attrs['direction'] = 'ingress' + if parsed_args.egress: + attrs['direction'] = 'egress' + + # NOTE(rtheis): Use ethertype specified else default based + # on IP protocol. + attrs['ethertype'] = network_utils.get_ethertype( + parsed_args, attrs['protocol'] + ) + + # NOTE(rtheis): Validate the port range and ICMP type and code. + # It would be ideal if argparse could do this. + if parsed_args.dst_port and ( + parsed_args.icmp_type or parsed_args.icmp_code + ): + msg = _( + 'Argument --dst-port not allowed with arguments ' + '--icmp-type and --icmp-code' + ) + raise exceptions.CommandError(msg) + if parsed_args.icmp_type is None and parsed_args.icmp_code is not None: + msg = _('Argument --icmp-type required with argument --icmp-code') + raise exceptions.CommandError(msg) + is_icmp_protocol = network_utils.is_icmp_protocol(attrs['protocol']) + if not is_icmp_protocol and ( + parsed_args.icmp_type or parsed_args.icmp_code + ): + msg = _( + 'ICMP IP protocol required with arguments ' + '--icmp-type and --icmp-code' + ) + raise exceptions.CommandError(msg) + # NOTE(rtheis): For backwards compatibility, continue ignoring + # the destination port range when an ICMP IP protocol is specified. + if parsed_args.dst_port and not is_icmp_protocol: + attrs['port_range_min'] = parsed_args.dst_port[0] + attrs['port_range_max'] = parsed_args.dst_port[1] + if parsed_args.icmp_type is not None and parsed_args.icmp_type >= 0: + attrs['port_range_min'] = parsed_args.icmp_type + if parsed_args.icmp_code is not None and parsed_args.icmp_code >= 0: + attrs['port_range_max'] = parsed_args.icmp_code + + if parsed_args.remote_group is not None: + attrs['remote_group_id'] = parsed_args.remote_group + elif parsed_args.remote_address_group is not None: + attrs['remote_address_group_id'] = parsed_args.remote_address_group + elif parsed_args.remote_ip is not None: + attrs['remote_ip_prefix'] = parsed_args.remote_ip + elif attrs['ethertype'] == 'IPv4': + attrs['remote_ip_prefix'] = '0.0.0.0/0' + elif attrs['ethertype'] == 'IPv6': + attrs['remote_ip_prefix'] = '::/0' + + attrs['used_in_default_sg'] = parsed_args.for_default_sg + attrs['used_in_non_default_sg'] = parsed_args.for_custom_sg + + attrs.update( + self._parse_extra_properties(parsed_args.extra_properties) + ) + + # Create and show the security group rule. + obj = client.create_default_security_group_rule(**attrs) + display_columns, columns = _get_columns(obj) + data = utils.get_item_properties(obj, columns) + return (display_columns, data) + + +class DeleteDefaultSecurityGroupRule(command.Command): + """Remove security group rule(s) from the default security group template. + + These rules will not longer be applied to the default security groups + created for any new project. They will not be removed from any existing + default security groups. + """ + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'rule', + metavar='', + nargs="+", + help=_("Default security group rule(s) to delete (ID only)"), + ) + return parser + + def take_action(self, parsed_args): + result = 0 + client = self.app.client_manager.network + for r in parsed_args.rule: + try: + obj = client.find_default_security_group_rule( + r, ignore_missing=False + ) + client.delete_default_security_group_rule(obj) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete default SG rule with " + "ID '%(rule_id)s': %(e)s" + ), + {'rule_id': r, 'e': e}, + ) + + if result > 0: + total = len(parsed_args.rule) + msg = _( + "%(result)s of %(total)s default rules failed to delete." + ) % {'result': result, 'total': total} + raise exceptions.CommandError(msg) + + +class ListDefaultSecurityGroupRule(command.Lister): + """List security group rules used for new default security groups. + + This shows the rules that will be added to any new default security groups + created. These rules may differ for the rules present on existing default + security groups. + """ + + def _format_network_security_group_rule(self, rule): + """Transform the SDK DefaultSecurityGroupRule object to a dict + + The SDK object gets in the way of reformatting columns... + Create port_range column from port_range_min and port_range_max + """ + rule = rule.to_dict() + rule['port_range'] = network_utils.format_network_port_range(rule) + rule['remote_ip_prefix'] = network_utils.format_remote_ip_prefix(rule) + return rule + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + + parser.add_argument( + '--protocol', + metavar='', + type=network_utils.convert_to_lowercase, + help=_( + "List only default rules with the specified IP protocol " + "(ah, dhcp, egp, esp, gre, " + "icmp, igmp, ipv6-encap, ipv6-frag, ipv6-icmp, " + "ipv6-nonxt, ipv6-opts, ipv6-route, ospf, pgm, rsvp, " + "sctp, tcp, udp, udplite, vrrp and integer " + "representations [0-255] or any; " + "default: any (all protocols))" + ), + ) + parser.add_argument( + '--ethertype', + metavar='', + type=network_utils.convert_to_lowercase, + help=_("List default rules by the Ethertype (IPv4 or IPv6)"), + ) + direction_group = parser.add_mutually_exclusive_group() + direction_group.add_argument( + '--ingress', + action='store_true', + help=_( + "List only default rules which will be applied to incoming " + "network traffic" + ), + ) + direction_group.add_argument( + '--egress', + action='store_true', + help=_( + "List only default rules which will be applied to outgoing " + "network traffic" + ), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + column_headers = ( + 'ID', + 'IP Protocol', + 'Ethertype', + 'IP Range', + 'Port Range', + 'Direction', + 'Remote Security Group', + 'Remote Address Group', + 'Used in default Security Group', + 'Used in custom Security Group', + ) + columns = ( + 'id', + 'protocol', + 'ether_type', + 'remote_ip_prefix', + 'port_range', + 'direction', + 'remote_group_id', + 'remote_address_group_id', + 'used_in_default_sg', + 'used_in_non_default_sg', + ) + + # Get the security group rules using the requested query. + query = {} + if parsed_args.ingress: + query['direction'] = 'ingress' + if parsed_args.egress: + query['direction'] = 'egress' + if parsed_args.protocol is not None: + query['protocol'] = parsed_args.protocol + + rules = [ + self._format_network_security_group_rule(r) + for r in client.default_security_group_rules(**query) + ] + + return ( + column_headers, + ( + utils.get_dict_properties( + s, + columns, + ) + for s in rules + ), + ) + + +class ShowDefaultSecurityGroupRule(command.ShowOne): + """Show a security group rule used for new default security groups. + + This shows a rule that will be added to any new default security groups + created. This rule may not be present on existing default security groups. + """ + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'rule', + metavar="", + help=_("Default security group rule to display (ID only)"), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + obj = client.find_default_security_group_rule( + parsed_args.rule, ignore_missing=False + ) + # necessary for old rules that have None in this field + if not obj['remote_ip_prefix']: + obj['remote_ip_prefix'] = network_utils.format_remote_ip_prefix( + obj + ) + display_columns, columns = _get_columns(obj) + data = utils.get_item_properties(obj, columns) + return (display_columns, data) diff --git a/openstackclient/network/v2/floating_ip.py b/openstackclient/network/v2/floating_ip.py index 22096bc4bf..76b91a0ecd 100644 --- a/openstackclient/network/v2/floating_ip.py +++ b/openstackclient/network/v2/floating_ip.py @@ -9,30 +9,28 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# """IP Floating action implementations""" +from openstack import exceptions as sdk_exceptions +from osc_lib.cli import format_columns from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient.api import compute_v2 from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common - _formatters = { - 'port_details': utils.format_dict, + 'port_details': format_columns.DictColumn, } def _get_network_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) @@ -47,18 +45,19 @@ def _get_attrs(client_manager, parsed_args): # Name of a network could be empty string. if parsed_args.network is not None: - network = network_client.find_network(parsed_args.network, - ignore_missing=False) + network = network_client.find_network( + parsed_args.network, ignore_missing=False + ) attrs['floating_network_id'] = network.id if parsed_args.subnet: - subnet = network_client.find_subnet(parsed_args.subnet, - ignore_missing=False) + subnet = network_client.find_subnet( + parsed_args.subnet, ignore_missing=False + ) attrs['subnet_id'] = subnet.id if parsed_args.port: - port = network_client.find_port(parsed_args.port, - ignore_missing=False) + port = network_client.find_port(parsed_args.port, ignore_missing=False) attrs['port_id'] = port.id if parsed_args.floating_ip_address: @@ -69,7 +68,8 @@ def _get_attrs(client_manager, parsed_args): if parsed_args.qos_policy: attrs['qos_policy_id'] = network_client.find_qos_policy( - parsed_args.qos_policy, ignore_missing=False).id + parsed_args.qos_policy, ignore_missing=False + ).id if parsed_args.description is not None: attrs['description'] = parsed_args.description @@ -92,8 +92,9 @@ def _get_attrs(client_manager, parsed_args): return attrs -class CreateFloatingIP(common.NetworkAndComputeShowOne, - common.NeutronCommandWithExtraArgs): +class CreateFloatingIP( + common.NetworkAndComputeShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create floating IP") def update_parser_common(self, parser): @@ -103,7 +104,7 @@ def update_parser_common(self, parser): parser.add_argument( 'network', metavar='', - help=_("Network to allocate floating IP from (name or ID)") + help=_("Network to allocate floating IP from (name or ID)"), ) return parser @@ -112,72 +113,83 @@ def update_parser_network(self, parser): '--subnet', metavar='', help=self.enhance_help_neutron( - _("Subnet on which you want to create the floating IP " - "(name or ID)")) + _( + "Subnet on which you want to create the floating IP " + "(name or ID)" + ) + ), ) parser.add_argument( '--port', metavar='', help=self.enhance_help_neutron( - _("Port to be associated with the floating IP " - "(name or ID)")) + _("Port to be associated with the floating IP (name or ID)") + ), ) parser.add_argument( '--floating-ip-address', metavar='', dest='floating_ip_address', - help=self.enhance_help_neutron(_("Floating IP address")) + help=self.enhance_help_neutron(_("Floating IP address")), ) parser.add_argument( '--fixed-ip-address', metavar='', dest='fixed_ip_address', help=self.enhance_help_neutron( - _("Fixed IP address mapped to the floating IP")) + _("Fixed IP address mapped to the floating IP") + ), ) parser.add_argument( '--qos-policy', metavar='', help=self.enhance_help_neutron( - _("Attach QoS policy to the floating IP (name or ID)")) + _("Attach QoS policy to the floating IP (name or ID)") + ), ) parser.add_argument( '--description', metavar='', - help=self.enhance_help_neutron(_('Set floating IP description')) + help=self.enhance_help_neutron(_('Set floating IP description')), ) parser.add_argument( '--project', metavar='', - help=self.enhance_help_neutron(_("Owner's project (name or ID)")) + help=self.enhance_help_neutron(_("Owner's project (name or ID)")), ) parser.add_argument( '--dns-domain', metavar='', dest='dns_domain', help=self.enhance_help_neutron( - _("Set DNS domain for this floating IP")) + _("Set DNS domain for this floating IP") + ), ) parser.add_argument( '--dns-name', metavar='', dest='dns_name', help=self.enhance_help_neutron( - _("Set DNS name for this floating IP")) + _("Set DNS name for this floating IP") + ), ) identity_common.add_project_domain_option_to_parser( - parser, enhance_help=self.enhance_help_neutron) + parser, enhance_help=self.enhance_help_neutron + ) _tag.add_tag_option_to_parser_for_create( - parser, _('floating IP'), enhance_help=self.enhance_help_neutron) + parser, _('floating IP'), enhance_help=self.enhance_help_neutron + ) return parser def take_action_network(self, client, parsed_args): attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) with common.check_missing_extension_if_error( - self.app.client_manager.network, attrs): + self.app.client_manager.network, attrs + ): obj = client.create_ip(**attrs) # tags cannot be set when created, so tags need to be set later. @@ -188,7 +200,7 @@ def take_action_network(self, client, parsed_args): return (display_columns, data) def take_action_compute(self, client, parsed_args): - obj = client.api.floating_ip_create(parsed_args.network) + obj = compute_v2.create_floating_ip(client, parsed_args.network) columns = _get_columns(obj) data = utils.get_dict_properties(obj, columns) return (columns, data) @@ -206,7 +218,7 @@ def update_parser_common(self, parser): 'floating_ip', metavar="", nargs="+", - help=_("Floating IP(s) to delete (IP address or ID)") + help=_("Floating IP(s) to delete (IP address or ID)"), ) return parser @@ -218,7 +230,7 @@ def take_action_network(self, client, parsed_args): client.delete_ip(obj) def take_action_compute(self, client, parsed_args): - client.api.floating_ip_delete(self.r) + compute_v2.delete_floating_ip(client, self.r) class ListFloatingIP(common.NetworkAndComputeLister): @@ -231,61 +243,95 @@ def update_parser_network(self, parser): parser.add_argument( '--network', metavar='', + dest='networks', + action='append', help=self.enhance_help_neutron( - _("List floating IP(s) according to " - "given network (name or ID)")) + _( + "List only floating IP(s) with the specified network " + "(name or ID) " + "(repeat option to fiter on multiple networks)" + ) + ), ) parser.add_argument( '--port', metavar='', + dest='ports', + action='append', help=self.enhance_help_neutron( - _("List floating IP(s) according to given port (name or ID)")) + _( + "List only floating IP(s) with the specified port " + "(name or ID) " + "(repeat option to fiter on multiple ports)" + ) + ), ) parser.add_argument( '--fixed-ip-address', metavar='', help=self.enhance_help_neutron( - _("List floating IP(s) according to given fixed IP address")) + _( + "List only floating IP(s) with the specified fixed IP " + "address" + ) + ), ) parser.add_argument( '--floating-ip-address', metavar='', help=self.enhance_help_neutron( - _("List floating IP(s) according to given floating IP " - "address")) - ) - parser.add_argument( - '--long', - action='store_true', - default=False, - help=self.enhance_help_neutron( - _("List additional fields in output")) + _( + "List only floating IP(s) with the specified floating IP " + "address" + ) + ), ) parser.add_argument( '--status', metavar='', choices=['ACTIVE', 'DOWN'], help=self.enhance_help_neutron( - _("List floating IP(s) according to given status ('ACTIVE', " - "'DOWN')")) + _( + "List only floating IP(s) with the specified status " + "('ACTIVE', 'DOWN')" + ) + ), ) parser.add_argument( '--project', metavar='', help=self.enhance_help_neutron( - _("List floating IP(s) according to given project (name or " - "ID)")) + _( + "List only floating IP(s) with the specified project " + "(name or ID)" + ) + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--router', metavar='', + dest='routers', + action='append', help=self.enhance_help_neutron( - _("List floating IP(s) according to given router (name or " - "ID)")) + _( + "List only floating IP(s) with the specified router " + "(name or ID) " + "(repeat option to fiter on multiple routers)" + ) + ), ) _tag.add_tag_filtering_option_to_parser( - parser, _('floating IP'), enhance_help=self.enhance_help_neutron) + parser, _('floating IP'), enhance_help=self.enhance_help_neutron + ) + parser.add_argument( + '--long', + action='store_true', + default=False, + help=self.enhance_help_neutron( + _("List additional fields in output") + ), + ) return parser @@ -293,7 +339,7 @@ def take_action_network(self, client, parsed_args): network_client = self.app.client_manager.network identity_client = self.app.client_manager.identity - columns = ( + columns: tuple[str, ...] = ( 'id', 'floating_ip_address', 'fixed_ip_address', @@ -301,7 +347,7 @@ def take_action_network(self, client, parsed_args): 'floating_network_id', 'project_id', ) - headers = ( + headers: tuple[str, ...] = ( 'ID', 'Floating IP Address', 'Fixed IP Address', @@ -310,7 +356,7 @@ def take_action_network(self, client, parsed_args): 'Project', ) if parsed_args.long: - columns = columns + ( + columns += ( 'router_id', 'status', 'description', @@ -318,7 +364,7 @@ def take_action_network(self, client, parsed_args): 'dns_name', 'dns_domain', ) - headers = headers + ( + headers += ( 'Router', 'Status', 'Description', @@ -329,20 +375,33 @@ def take_action_network(self, client, parsed_args): query = {} - if parsed_args.network is not None: - network = network_client.find_network(parsed_args.network, - ignore_missing=False) - query['floating_network_id'] = network.id - if parsed_args.port is not None: - port = network_client.find_port(parsed_args.port, - ignore_missing=False) - query['port_id'] = port.id + if parsed_args.networks is not None: + network_ids = [] + for network in parsed_args.networks: + network_id = network_client.find_network( + network, ignore_missing=False + ).id + network_ids.append(network_id) + query['floating_network_id'] = network_ids + + if parsed_args.ports is not None: + port_ids = [] + for port in parsed_args.ports: + port_id = network_client.find_port( + port, ignore_missing=False + ).id + port_ids.append(port_id) + query['port_id'] = port_ids + if parsed_args.fixed_ip_address is not None: query['fixed_ip_address'] = parsed_args.fixed_ip_address + if parsed_args.floating_ip_address is not None: query['floating_ip_address'] = parsed_args.floating_ip_address + if parsed_args.status: query['status'] = parsed_args.status + if parsed_args.project is not None: project = identity_common.find_project( identity_client, @@ -350,30 +409,44 @@ def take_action_network(self, client, parsed_args): parsed_args.project_domain, ) query['project_id'] = project.id - if parsed_args.router is not None: - router = network_client.find_router(parsed_args.router, - ignore_missing=False) - query['router_id'] = router.id - _tag.get_tag_filtering_args(parsed_args, query) + if parsed_args.routers is not None: + router_ids = [] + for router in parsed_args.routers: + router_id = network_client.find_router( + router, ignore_missing=False + ).id + router_ids.append(router_id) + query['router_id'] = router_ids - data = client.ips(**query) + _tag.get_tag_filtering_args(parsed_args, query) - return (headers, - (utils.get_item_properties( - s, columns, + try: + data = list(client.ips(**query)) + except sdk_exceptions.NotFoundException: + data = [] + + return ( + headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) def take_action_compute(self, client, parsed_args): - columns = ( + columns: tuple[str, ...] = ( 'ID', 'IP', 'Fixed IP', 'Instance ID', 'Pool', ) - headers = ( + headers: tuple[str, ...] = ( 'ID', 'Floating IP Address', 'Fixed IP Address', @@ -381,50 +454,58 @@ def take_action_compute(self, client, parsed_args): 'Pool', ) - data = client.api.floating_ip_list() - - return (headers, - (utils.get_dict_properties( - s, columns, + objs = compute_v2.list_floating_ips(client) + return ( + headers, + ( + utils.get_dict_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in objs + ), + ) class SetFloatingIP(common.NeutronCommandWithExtraArgs): _description = _("Set floating IP Properties") def get_parser(self, prog_name): - parser = super(SetFloatingIP, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP to modify (IP address or ID)")) + help=_("Floating IP to modify (IP address or ID)"), + ) parser.add_argument( '--port', metavar='', - help=_("Associate the floating IP with port (name or ID)")), + help=_("Associate the floating IP with port (name or ID)"), + ) parser.add_argument( '--fixed-ip-address', metavar='', dest='fixed_ip_address', - help=_("Fixed IP of the port " - "(required only if port has multiple IPs)") + help=_( + "Fixed IP of the port (required only if port has multiple IPs)" + ), ) parser.add_argument( '--description', metavar='', - help=_('Set floating IP description') + help=_('Set floating IP description'), ) qos_policy_group = parser.add_mutually_exclusive_group() qos_policy_group.add_argument( '--qos-policy', metavar='', - help=_("Attach QoS policy to the floating IP (name or ID)") + help=_("Attach QoS policy to the floating IP (name or ID)"), ) qos_policy_group.add_argument( '--no-qos-policy', action='store_true', - help=_("Remove the QoS policy attached to the floating IP") + help=_("Remove the QoS policy attached to the floating IP"), ) _tag.add_tag_option_to_parser_for_set(parser, _('floating IP')) @@ -439,8 +520,7 @@ def take_action(self, parsed_args): ignore_missing=False, ) if parsed_args.port: - port = client.find_port(parsed_args.port, - ignore_missing=False) + port = client.find_port(parsed_args.port, ignore_missing=False) attrs['port_id'] = port.id if parsed_args.fixed_ip_address: @@ -451,13 +531,15 @@ def take_action(self, parsed_args): if parsed_args.qos_policy: attrs['qos_policy_id'] = client.find_qos_policy( - parsed_args.qos_policy, ignore_missing=False).id + parsed_args.qos_policy, ignore_missing=False + ).id if 'no_qos_policy' in parsed_args and parsed_args.no_qos_policy: attrs['qos_policy_id'] = None attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_ip(obj, **attrs) @@ -473,7 +555,7 @@ def update_parser_common(self, parser): parser.add_argument( 'floating_ip', metavar="", - help=_("Floating IP to display (IP address or ID)") + help=_("Floating IP to display (IP address or ID)"), ) return parser @@ -487,7 +569,7 @@ def take_action_network(self, client, parsed_args): return (display_columns, data) def take_action_compute(self, client, parsed_args): - obj = client.api.floating_ip_find(parsed_args.floating_ip) + obj = compute_v2.get_floating_ip(client, parsed_args.floating_ip) columns = _get_columns(obj) data = utils.get_dict_properties(obj, columns) return (columns, data) @@ -497,22 +579,23 @@ class UnsetFloatingIP(common.NeutronCommandWithExtraArgs): _description = _("Unset floating IP Properties") def get_parser(self, prog_name): - parser = super(UnsetFloatingIP, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP to disassociate (IP address or ID)")) + help=_("Floating IP to disassociate (IP address or ID)"), + ) parser.add_argument( '--port', action='store_true', default=False, - help=_("Disassociate any port associated with the floating IP") + help=_("Disassociate any port associated with the floating IP"), ) parser.add_argument( '--qos-policy', action='store_true', default=False, - help=_("Remove the QoS policy attached to the floating IP") + help=_("Remove the QoS policy attached to the floating IP"), ) _tag.add_tag_option_to_parser_for_unset(parser, _('floating IP')) @@ -524,13 +607,14 @@ def take_action(self, parsed_args): parsed_args.floating_ip, ignore_missing=False, ) - attrs = {} + attrs: dict[str, None] = {} if parsed_args.port: attrs['port_id'] = None if parsed_args.qos_policy: attrs['qos_policy_id'] = None attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_ip(obj, **attrs) diff --git a/openstackclient/network/v2/floating_ip_pool.py b/openstackclient/network/v2/floating_ip_pool.py index 32852004c5..2030eb8ad6 100644 --- a/openstackclient/network/v2/floating_ip_pool.py +++ b/openstackclient/network/v2/floating_ip_pool.py @@ -9,14 +9,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# """Floating IP Pool action implementations""" - from osc_lib import exceptions -from osc_lib import utils +from openstackclient.api import compute_v2 from openstackclient.i18n import _ from openstackclient.network import common @@ -25,17 +23,16 @@ class ListFloatingIPPool(common.NetworkAndComputeLister): _description = _("List pools of floating IP addresses") def take_action_network(self, client, parsed_args): - msg = _("Floating ip pool operations are only available for " - "Compute v2 network.") + msg = _( + "Floating ip pool operations are only available for " + "Compute v2 network." + ) raise exceptions.CommandError(msg) def take_action_compute(self, client, parsed_args): - columns = ( - 'Name', - ) - data = client.api.floating_ip_pool_list() + columns = ('Name',) + data = [ + (x['name'],) for x in compute_v2.list_floating_ip_pools(client) + ] - return (columns, - (utils.get_dict_properties( - s, columns, - ) for s in data)) + return (columns, data) diff --git a/openstackclient/network/v2/floating_ip_port_forwarding.py b/openstackclient/network/v2/floating_ip_port_forwarding.py index 0156af8e04..cf770c2cdc 100644 --- a/openstackclient/network/v2/floating_ip_port_forwarding.py +++ b/openstackclient/network/v2/floating_ip_port_forwarding.py @@ -14,11 +14,12 @@ """Floating IP Port Forwarding action implementations""" import logging +import typing as ty -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.network import common @@ -31,8 +32,10 @@ def validate_ports_diff(ports): ports_diff = ports[-1] - ports[0] if ports_diff < 0: - msg = _("The last number in port range must be" - " greater or equal to the first") + msg = _( + "The last number in port range must be" + " greater or equal to the first" + ) raise exceptions.CommandError(msg) return ports_diff @@ -42,8 +45,10 @@ def validate_ports_match(internal_ports, external_ports): external_ports_diff = validate_ports_diff(external_ports) if internal_ports_diff != 0 and internal_ports_diff != external_ports_diff: - msg = _("The relation between internal and external ports does not " - "match the pattern 1:N and N:N") + msg = _( + "The relation between internal and external ports does not " + "match the pattern 1:N and N:N" + ) raise exceptions.CommandError(msg) @@ -81,75 +86,86 @@ def validate_port(port): def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) -class CreateFloatingIPPortForwarding(command.ShowOne, - common.NeutronCommandWithExtraArgs): +class CreateFloatingIPPortForwarding( + command.ShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create floating IP port forwarding") def get_parser(self, prog_name): - parser = super(CreateFloatingIPPortForwarding, - self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--internal-ip-address', required=True, metavar='', - help=_("The fixed IPv4 address of the network " - "port associated to the floating IP port forwarding") + help=_( + "The fixed IPv4 address of the network " + "port associated to the floating IP port forwarding" + ), ) parser.add_argument( '--port', metavar='', required=True, - help=_("The name or ID of the network port associated " - "to the floating IP port forwarding") + help=_( + "The name or ID of the network port associated " + "to the floating IP port forwarding" + ), ) parser.add_argument( '--internal-protocol-port', metavar='', required=True, - help=_("The protocol port number " - "of the network port fixed IPv4 address " - "associated to the floating IP port forwarding") + help=_( + "The protocol port number " + "of the network port fixed IPv4 address " + "associated to the floating IP port forwarding" + ), ) parser.add_argument( '--external-protocol-port', metavar='', required=True, - help=_("The protocol port number of " - "the port forwarding's floating IP address") + help=_( + "The protocol port number of " + "the port forwarding's floating IP address" + ), ) parser.add_argument( '--protocol', metavar='', required=True, - help=_("The protocol used in the floating IP " - "port forwarding, for instance: TCP, UDP") - ), + help=_( + "The protocol used in the floating IP " + "port forwarding, for instance: TCP, UDP" + ), + ) parser.add_argument( '--description', metavar='', - help=_("A text to describe/contextualize the use of the " - "port forwarding configuration") + help=_( + "Text to describe/contextualize the use of the " + "port forwarding configuration" + ), ) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP that the port forwarding belongs to " - "(IP address or ID)") + help=_( + "Floating IP that the port forwarding belongs to " + "(IP address or ID)" + ), ) return parser def take_action(self, parsed_args): - attrs = {} + attrs: dict[str, ty.Any] = {} client = self.app.client_manager.network floating_ip = client.find_ip( parsed_args.floating_ip, @@ -159,8 +175,7 @@ def take_action(self, parsed_args): validate_and_assign_port_ranges(parsed_args, attrs) if parsed_args.port: - port = client.find_port(parsed_args.port, - ignore_missing=False) + port = client.find_port(parsed_args.port, ignore_missing=False) attrs['internal_port_id'] = port.id attrs['internal_ip_address'] = parsed_args.internal_ip_address attrs['protocol'] = parsed_args.protocol @@ -169,11 +184,11 @@ def take_action(self, parsed_args): attrs['description'] = parsed_args.description attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_floating_ip_port_forwarding( - floating_ip.id, - **attrs + floating_ip.id, **attrs ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) @@ -184,19 +199,20 @@ class DeleteFloatingIPPortForwarding(command.Command): _description = _("Delete floating IP port forwarding") def get_parser(self, prog_name): - parser = super(DeleteFloatingIPPortForwarding, - self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP that the port forwarding belongs to " - "(IP address or ID)") + help=_( + "Floating IP that the port forwarding belongs to " + "(IP address or ID)" + ), ) parser.add_argument( 'port_forwarding_id', nargs="+", metavar="", - help=_("The ID of the floating IP port forwarding(s) to delete") + help=_("The ID of the floating IP port forwarding(s) to delete"), ) return parser @@ -217,13 +233,18 @@ def take_action(self, parsed_args): ) except Exception as e: result += 1 - LOG.error(_("Failed to delete floating IP port forwarding " - "'%(port_forwarding_id)s': %(e)s"), - {'port_forwarding_id': port_forwarding_id, 'e': e}) + LOG.error( + _( + "Failed to delete floating IP port forwarding " + "'%(port_forwarding_id)s': %(e)s" + ), + {'port_forwarding_id': port_forwarding_id, 'e': e}, + ) if result > 0: total = len(parsed_args.port_forwarding_id) - msg = (_("%(result)s of %(total)s Port forwarding failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s Port forwarding failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -231,31 +252,39 @@ class ListFloatingIPPortForwarding(command.Lister): _description = _("List floating IP port forwarding") def get_parser(self, prog_name): - parser = super(ListFloatingIPPortForwarding, - self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP that the port forwarding belongs to " - "(IP address or ID)") + help=_( + "Floating IP that the port forwarding belongs to " + "(IP address or ID)" + ), ) parser.add_argument( '--port', metavar='', - help=_("Filter the list result by the ID or name of " - "the internal network port") + help=_( + "List only floating IP port forwardings with the " + "specified internal network port (name or ID)" + ), ) parser.add_argument( '--external-protocol-port', metavar='', dest='external_protocol_port', - help=_("Filter the list result by the " - "protocol port number of the floating IP") + help=_( + "List only floating IP port forwardings with the " + "specified external protocol port number" + ), ) parser.add_argument( '--protocol', - metavar='protocol', - help=_("Filter the list result by the port protocol") + metavar='', + help=_( + "List only floating IP port forwardings with the " + "specified protocol number" + ), ) return parser @@ -289,8 +318,7 @@ def take_action(self, parsed_args): query = {} if parsed_args.port: - port = client.find_port(parsed_args.port, - ignore_missing=False) + port = client.find_port(parsed_args.port, ignore_missing=False) query['internal_port_id'] = port.id external_port = parsed_args.external_protocol_port if external_port: @@ -298,7 +326,8 @@ def take_action(self, parsed_args): query['external_port_range'] = external_port else: query['external_port'] = int( - parsed_args.external_protocol_port) + parsed_args.external_protocol_port + ) if parsed_args.protocol is not None: query['protocol'] = parsed_args.protocol @@ -309,66 +338,83 @@ def take_action(self, parsed_args): data = client.floating_ip_port_forwardings(obj, **query) - return (headers, - (utils.get_item_properties( - s, columns, + return ( + headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetFloatingIPPortForwarding(common.NeutronCommandWithExtraArgs): _description = _("Set floating IP Port Forwarding Properties") def get_parser(self, prog_name): - parser = super(SetFloatingIPPortForwarding, - self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP that the port forwarding belongs to " - "(IP address or ID)") + help=_( + "Floating IP that the port forwarding belongs to " + "(IP address or ID)" + ), ) parser.add_argument( 'port_forwarding_id', metavar='', - help=_("The ID of the floating IP port forwarding") + help=_("The ID of the floating IP port forwarding"), ) parser.add_argument( '--port', metavar='', - help=_("The ID of the network port associated to " - "the floating IP port forwarding") + help=_( + "The ID of the network port associated to " + "the floating IP port forwarding" + ), ) parser.add_argument( '--internal-ip-address', metavar='', - help=_("The fixed IPv4 address of the network port " - "associated to the floating IP port forwarding") + help=_( + "The fixed IPv4 address of the network port " + "associated to the floating IP port forwarding" + ), ) parser.add_argument( '--internal-protocol-port', metavar='', - help=_("The TCP/UDP/other protocol port number of the " - "network port fixed IPv4 address associated to " - "the floating IP port forwarding") + help=_( + "The TCP/UDP/other protocol port number of the " + "network port fixed IPv4 address associated to " + "the floating IP port forwarding" + ), ) parser.add_argument( '--external-protocol-port', metavar='', - help=_("The TCP/UDP/other protocol port number of the " - "port forwarding's floating IP address") + help=_( + "The TCP/UDP/other protocol port number of the " + "port forwarding's floating IP address" + ), ) parser.add_argument( '--protocol', metavar='', choices=['tcp', 'udp'], - help=_("The IP protocol used in the floating IP port forwarding") - ), + help=_("The IP protocol used in the floating IP port forwarding"), + ) parser.add_argument( '--description', metavar='', - help=_("A text to describe/contextualize the use of " - "the port forwarding configuration") + help=_( + "Text to describe/contextualize the use of " + "the port forwarding configuration" + ), ) return parser @@ -382,8 +428,7 @@ def take_action(self, parsed_args): attrs = {} if parsed_args.port: - port = client.find_port(parsed_args.port, - ignore_missing=False) + port = client.find_port(parsed_args.port, ignore_missing=False) attrs['internal_port_id'] = port.id if parsed_args.internal_ip_address: @@ -398,28 +443,31 @@ def take_action(self, parsed_args): attrs['description'] = parsed_args.description attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_floating_ip_port_forwarding( - floating_ip.id, parsed_args.port_forwarding_id, **attrs) + floating_ip.id, parsed_args.port_forwarding_id, **attrs + ) class ShowFloatingIPPortForwarding(command.ShowOne): _description = _("Display floating IP Port Forwarding details") def get_parser(self, prog_name): - parser = super(ShowFloatingIPPortForwarding, - self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'floating_ip', metavar='', - help=_("Floating IP that the port forwarding belongs to " - "(IP address or ID)") + help=_( + "Floating IP that the port forwarding belongs to " + "(IP address or ID)" + ), ) parser.add_argument( 'port_forwarding_id', metavar="", - help=_("The ID of the floating IP port forwarding") + help=_("The ID of the floating IP port forwarding"), ) return parser diff --git a/openstackclient/network/v2/ip_availability.py b/openstackclient/network/v2/ip_availability.py index b065ab1ea9..f78c7ec866 100644 --- a/openstackclient/network/v2/ip_availability.py +++ b/openstackclient/network/v2/ip_availability.py @@ -14,9 +14,9 @@ """IP Availability Info implementations""" from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -26,12 +26,9 @@ def _get_columns(item): - column_map = {} hidden_columns = ['id', 'name', 'location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) @@ -41,7 +38,7 @@ class ListIPAvailability(command.Lister): _description = _("List IP availability for network") def get_parser(self, prog_name): - parser = super(ListIPAvailability, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--ip-version', type=int, @@ -49,13 +46,18 @@ def get_parser(self, prog_name): choices=[4, 6], metavar='', dest='ip_version', - help=_("List IP availability of given IP version " - "networks (default is 4)"), + help=_( + "List only IP availability with the specified IP version " + "networks (4 or 6, default is 4)" + ), ) parser.add_argument( '--project', metavar='', - help=_("List IP availability of given project (name or ID)"), + help=_( + "List only IP availability with the specified project " + "(name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) return parser @@ -89,17 +91,23 @@ def take_action(self, parsed_args): ).id filters['project_id'] = project_id data = client.network_ip_availabilities(**filters) - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) class ShowIPAvailability(command.ShowOne): _description = _("Show network IP availability details") def get_parser(self, prog_name): - parser = super(ShowIPAvailability, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network', metavar="", @@ -109,10 +117,12 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network - network_id = client.find_network(parsed_args.network, - ignore_missing=False).id - obj = client.find_network_ip_availability(network_id, - ignore_missing=False) + network_id = client.find_network( + parsed_args.network, ignore_missing=False + ).id + obj = client.find_network_ip_availability( + network_id, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters=_formatters) return (display_columns, data) diff --git a/openstackclient/network/v2/l3_conntrack_helper.py b/openstackclient/network/v2/l3_conntrack_helper.py index df153dd815..742c263964 100644 --- a/openstackclient/network/v2/l3_conntrack_helper.py +++ b/openstackclient/network/v2/l3_conntrack_helper.py @@ -15,22 +15,19 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) @@ -51,31 +48,32 @@ class CreateConntrackHelper(command.ShowOne): _description = _("Create a new L3 conntrack helper") def get_parser(self, prog_name): - parser = super(CreateConntrackHelper, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_('Router for which conntrack helper will be created') + help=_('Router for which conntrack helper will be created'), ) parser.add_argument( '--helper', required=True, metavar='', - help=_('The netfilter conntrack helper module') + help=_('The netfilter conntrack helper module'), ) parser.add_argument( '--protocol', required=True, metavar='', - help=_('The network protocol for the netfilter conntrack target ' - 'rule') + help=_( + 'The network protocol for the netfilter conntrack target rule' + ), ) parser.add_argument( '--port', required=True, metavar='', type=int, - help=_('The network port for the netfilter conntrack target rule') + help=_('The network port for the netfilter conntrack target rule'), ) return parser @@ -95,17 +93,17 @@ class DeleteConntrackHelper(command.Command): _description = _("Delete L3 conntrack helper") def get_parser(self, prog_name): - parser = super(DeleteConntrackHelper, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_('Router that the conntrack helper belong to') + help=_('Router that the conntrack helper belongs to'), ) parser.add_argument( 'conntrack_helper_id', metavar='', nargs='+', - help=_('The ID of the conntrack helper(s) to delete') + help=_('The ID of the conntrack helper(s) to delete'), ) return parser @@ -118,17 +116,24 @@ def take_action(self, parsed_args): for ct_helper in parsed_args.conntrack_helper_id: try: client.delete_conntrack_helper( - ct_helper, router.id, ignore_missing=False) + ct_helper, router.id, ignore_missing=False + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete L3 conntrack helper with " - "ID '%(ct_helper)s': %(e)s"), - {'ct_helper': ct_helper, 'e': e}) + LOG.error( + _( + "Failed to delete L3 conntrack helper with " + "ID '%(ct_helper)s': %(e)s" + ), + {'ct_helper': ct_helper, 'e': e}, + ) if result > 0: total = len(parsed_args.conntrack_helper_id) - msg = (_("%(result)s of %(total)s L3 conntrack helpers failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s L3 conntrack helpers failed " + "to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -136,27 +141,35 @@ class ListConntrackHelper(command.Lister): _description = _("List L3 conntrack helpers") def get_parser(self, prog_name): - parser = super(ListConntrackHelper, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_('Router that the conntrack helper belong to') + help=_('Router that the conntrack helper belongs to'), ) parser.add_argument( '--helper', metavar='', - help=_('The netfilter conntrack helper module') + help=_( + 'List only helpers using the specified netfilter conntrack ' + 'helper module' + ), ) parser.add_argument( '--protocol', metavar='', - help=_('The network protocol for the netfilter conntrack target ' - 'rule') + help=_( + 'List only helpers with the specified network protocol for ' + 'the netfilter conntrack target rule' + ), ) parser.add_argument( '--port', metavar='', - help=_('The network port for the netfilter conntrack target rule') + help=_( + 'List only helpers with the specified network port for ' + 'the netfilter conntrack target rule (name or ID)' + ), ) return parser @@ -180,43 +193,51 @@ def take_action(self, parsed_args): attrs = _get_attrs(client, parsed_args) data = client.conntrack_helpers(attrs.pop('router_id'), **attrs) - return (column_headers, - (utils.get_item_properties( - s, columns, formatters={}, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), + ) class SetConntrackHelper(command.Command): _description = _("Set L3 conntrack helper properties") def get_parser(self, prog_name): - parser = super(SetConntrackHelper, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_('Router that the conntrack helper belong to') + help=_('Router that the conntrack helper belongs to'), ) parser.add_argument( 'conntrack_helper_id', metavar='', - help=_('The ID of the conntrack helper(s)') + help=_('The ID of the conntrack helper(s)'), ) parser.add_argument( '--helper', metavar='', - help=_('The netfilter conntrack helper module') + help=_('The netfilter conntrack helper module'), ) parser.add_argument( '--protocol', metavar='', - help=_('The network protocol for the netfilter conntrack target ' - 'rule') + help=_( + 'The network protocol for the netfilter conntrack target rule' + ), ) parser.add_argument( '--port', metavar='', type=int, - help=_('The network port for the netfilter conntrack target rule') + help=_('The network port for the netfilter conntrack target rule'), ) return parser @@ -225,24 +246,26 @@ def take_action(self, parsed_args): attrs = _get_attrs(client, parsed_args) if attrs: client.update_conntrack_helper( - parsed_args.conntrack_helper_id, attrs.pop('router_id'), - **attrs) + parsed_args.conntrack_helper_id, + attrs.pop('router_id'), + **attrs, + ) class ShowConntrackHelper(command.ShowOne): _description = _("Display L3 conntrack helper details") def get_parser(self, prog_name): - parser = super(ShowConntrackHelper, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_('Router that the conntrack helper belong to') + help=_('Router that the conntrack helper belongs to'), ) parser.add_argument( 'conntrack_helper_id', metavar='', - help=_('The ID of the conntrack helper') + help=_('The ID of the conntrack helper'), ) return parser @@ -251,7 +274,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network router = client.find_router(parsed_args.router, ignore_missing=False) obj = client.get_conntrack_helper( - parsed_args.conntrack_helper_id, router.id) + parsed_args.conntrack_helper_id, router.id + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) diff --git a/openstackclient/network/v2/local_ip.py b/openstackclient/network/v2/local_ip.py index e8fb5f8aac..dde6ccd0cd 100644 --- a/openstackclient/network/v2/local_ip.py +++ b/openstackclient/network/v2/local_ip.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -28,10 +28,10 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, {}, hidden_columns + ) def _get_attrs(client_manager, parsed_args): @@ -51,14 +51,16 @@ def _get_attrs(client_manager, parsed_args): ).id attrs['project_id'] = project_id if parsed_args.network: - network = network_client.find_network(parsed_args.network, - ignore_missing=False) + network = network_client.find_network( + parsed_args.network, ignore_missing=False + ) attrs['network_id'] = network.id if parsed_args.local_ip_address: attrs['local_ip_address'] = parsed_args.local_ip_address if parsed_args.local_port: - port = network_client.find_port(parsed_args.local_port, - ignore_missing=False) + port = network_client.find_port( + parsed_args.local_port, ignore_missing=False + ) attrs['local_port_id'] = port.id if parsed_args.ip_mode: attrs['ip_mode'] = parsed_args.ip_mode @@ -71,34 +73,32 @@ class CreateLocalIP(command.ShowOne): def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( - '--name', - metavar="", - help=_("New local IP name") + '--name', metavar="", help=_("New Local IP name") ) parser.add_argument( '--description', metavar="", - help=_("New local IP description") + help=_("Description for Local IP"), ) parser.add_argument( '--network', metavar='', - help=_("Network to allocate Local IP (name or ID)") + help=_("Network to allocate Local IP from (name or ID)"), ) parser.add_argument( '--local-port', metavar='', - help=_("Port to allocate Local IP (name or ID)") + help=_("Port to allocate Local IP from (name or ID)"), ) parser.add_argument( "--local-ip-address", metavar="", - help=_("IP address or CIDR "), + help=_("IP address or CIDR for Local IP"), ) parser.add_argument( '--ip-mode', metavar='', - help=_("local IP ip mode") + help=_("IP mode to use for Local IP"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -117,7 +117,7 @@ def take_action(self, parsed_args): class DeleteLocalIP(command.Command): - _description = _("Delete local IP(s)") + _description = _("Delete Local IP(s)") def get_parser(self, prog_name): parser = super().get_parser(prog_name) @@ -125,7 +125,7 @@ def get_parser(self, prog_name): 'local_ip', metavar="", nargs='+', - help=_("Local IP(s) to delete (name or ID)") + help=_("Local IP(s) to delete (name or ID)"), ) return parser @@ -140,44 +140,46 @@ def take_action(self, parsed_args): client.delete_local_ip(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete Local IP with " - "name or ID '%(lip)s': %(e)s"), - {'lip': lip, 'e': e}) + LOG.error( + _( + "Failed to delete Local IP with " + "name or ID '%(lip)s': %(e)s" + ), + {'lip': lip, 'e': e}, + ) if result > 0: total = len(parsed_args.local_ip) - msg = (_("%(result)s of %(total)s local IPs failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s local IPs failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) class SetLocalIP(command.Command): - _description = _("Set local ip properties") + _description = _("Set Local IP properties") def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( 'local_ip', metavar="", - help=_("Local IP to modify (name or ID)") + help=_("Local IP to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar="", - help=_('Set local IP name') + '--name', metavar="", help=_('Set local IP name') ) parser.add_argument( '--description', metavar="", - help=_('Set local IP description') + help=_('Set Local IP description'), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_local_ip( - parsed_args.local_ip, - ignore_missing=False) + obj = client.find_local_ip(parsed_args.local_ip, ignore_missing=False) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name @@ -188,7 +190,7 @@ def take_action(self, parsed_args): class ListLocalIP(command.Lister): - _description = _("List local IPs") + _description = _("List Local IPs") def get_parser(self, prog_name): parser = super().get_parser(prog_name) @@ -196,37 +198,38 @@ def get_parser(self, prog_name): parser.add_argument( '--name', metavar='', - help=_("List only local IPs of given name in output") + help=_("List only local IP(s) with the specified name"), ) parser.add_argument( '--project', metavar="", - help=_("List Local IPs according to their project " - "(name or ID)") + help=_( + "List only local IP(s) with the specified project (name or ID)" + ), ) parser.add_argument( '--network', metavar='', - help=_("List Local IP(s) according to " - "given network (name or ID)") + help=_( + "List only local IP(s) with the specified network (name or ID)" + ), ) parser.add_argument( '--local-port', metavar='', - help=_("List Local IP(s) according to " - "given port (name or ID)") + help=_( + "List only local IP(s) with the specified port (name or ID)" + ), ) parser.add_argument( '--local-ip-address', metavar='', - help=_("List Local IP(s) according to " - "given Local IP Address") + help=_("List only local IP(s) with the specified IP address"), ) parser.add_argument( '--ip-mode', metavar='', - help=_("List Local IP(s) according to " - "given IP mode") + help=_("List only local IP(s) with the specified IP mode"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -267,12 +270,14 @@ def take_action(self, parsed_args): ).id attrs['project_id'] = project_id if parsed_args.network is not None: - network = client.find_network(parsed_args.network, - ignore_missing=False) + network = client.find_network( + parsed_args.network, ignore_missing=False + ) attrs['network_id'] = network.id if parsed_args.local_port: - port = client.find_port(parsed_args.local_port, - ignore_missing=False) + port = client.find_port( + parsed_args.local_port, ignore_missing=False + ) attrs['local_port_id'] = port.id if parsed_args.local_ip_address: attrs['local_ip_address'] = parsed_args.local_ip_address @@ -280,30 +285,35 @@ def take_action(self, parsed_args): attrs['ip_mode'] = parsed_args.ip_mode data = client.local_ips(**attrs) - return (column_headers, - (utils.get_item_properties(s, - columns, - formatters={},) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), + ) class ShowLocalIP(command.ShowOne): - _description = _("Display local IP details") + _description = _("Display Local IP details") def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( 'local_ip', metavar="", - help=_("Local IP to display (name or ID)") + help=_("Local IP to display (name or ID)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_local_ip( - parsed_args.local_ip, - ignore_missing=False) + obj = client.find_local_ip(parsed_args.local_ip, ignore_missing=False) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) diff --git a/openstackclient/network/v2/local_ip_association.py b/openstackclient/network/v2/local_ip_association.py index 4cd7707ae2..123faa67ce 100644 --- a/openstackclient/network/v2/local_ip_association.py +++ b/openstackclient/network/v2/local_ip_association.py @@ -17,10 +17,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -28,10 +28,10 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location', 'name', 'id', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, {}, hidden_columns + ) class CreateLocalIPAssociation(command.ShowOne): @@ -42,18 +42,19 @@ def get_parser(self, prog_name): parser.add_argument( 'local_ip', metavar='', - help=_("Local IP that the port association belongs to " - "(Name or ID)") + help=_( + "Local IP that the port association belongs to (Name or ID)" + ), ) parser.add_argument( 'fixed_port', metavar='', - help=_("The ID or Name of Port to allocate Local IP Association") + help=_("The ID or Name of Port to allocate Local IP Association"), ) parser.add_argument( '--fixed-ip', metavar='', - help=_("Fixed IP for Local IP Association") + help=_("Fixed IP for Local IP Association"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -64,8 +65,7 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = {} - port = client.find_port(parsed_args.fixed_port, - ignore_missing=False) + port = client.find_port(parsed_args.fixed_port, ignore_missing=False) attrs['fixed_port_id'] = port.id if parsed_args.fixed_ip: attrs['fixed_ip'] = parsed_args.fixed_ip @@ -88,14 +88,15 @@ def get_parser(self, prog_name): parser.add_argument( 'local_ip', metavar="", - help=_("Local IP that the port association belongs to " - "(Name or ID)") + help=_( + "Local IP that the port association belongs to (Name or ID)" + ), ) parser.add_argument( 'fixed_port_id', nargs="+", metavar="", - help=_("The fixed port ID of Local IP Association") + help=_("The fixed port ID of Local IP Association"), ) return parser @@ -116,15 +117,21 @@ def take_action(self, parsed_args): ) except Exception as e: result += 1 - LOG.error(_("Failed to delete Local IP Association with " - "fixed port " - "name or ID '%(fixed_port_id)s': %(e)s"), - {'fixed port ID': fixed_port_id, 'e': e}) + LOG.error( + _( + "Failed to delete Local IP Association with " + "fixed port " + "name or ID '%(fixed_port_id)s': %(e)s" + ), + {'fixed_port_id': fixed_port_id, 'e': e}, + ) if result > 0: total = len(parsed_args.fixed_port_id) - msg = (_("%(result)s of %(total)s Local IP Associations failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s Local IP Associations failed " + "to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -137,23 +144,27 @@ def get_parser(self, prog_name): parser.add_argument( 'local_ip', metavar='', - help=_("Local IP that port associations belongs to") + help=_("Local IP that port associations belongs to"), ) parser.add_argument( '--fixed-port', metavar='', - help=_("Filter the list result by the ID or name of " - "the fixed port") + help=_( + "List only local IP assocations with the specified fixed IP " + "port (name or ID)" + ), ) parser.add_argument( '--fixed-ip', metavar='', - help=_("Filter the list result by fixed ip") + help=_( + "List only local IP associations with the specified fixed IP" + ), ) parser.add_argument( '--host', metavar='', - help=_("Filter the list result by given host") + help=_("List only local IP associations with the specified host"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -173,7 +184,7 @@ def take_action(self, parsed_args): 'Local IP Address', 'Fixed port ID', 'Fixed IP', - 'Host' + 'Host', ) attrs = {} obj = client.find_local_ip( @@ -181,8 +192,9 @@ def take_action(self, parsed_args): ignore_missing=False, ) if parsed_args.fixed_port: - port = client.find_port(parsed_args.fixed_port, - ignore_missing=False) + port = client.find_port( + parsed_args.fixed_port, ignore_missing=False + ) attrs['fixed_port_id'] = port.id if parsed_args.fixed_ip: attrs['fixed_ip'] = parsed_args.fixed_ip @@ -191,7 +203,10 @@ def take_action(self, parsed_args): data = client.local_ip_associations(obj, **attrs) - return (column_headers, - (utils.get_item_properties(s, - columns, - formatters={}) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties(s, columns, formatters={}) + for s in data + ), + ) diff --git a/openstackclient/network/v2/ndp_proxy.py b/openstackclient/network/v2/ndp_proxy.py index 25b287f3af..78a7ae9117 100644 --- a/openstackclient/network/v2/ndp_proxy.py +++ b/openstackclient/network/v2/ndp_proxy.py @@ -14,12 +14,13 @@ # under the License. """Router NDP proxy action implementations""" + import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -28,10 +29,10 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, {}, hidden_columns + ) class CreateNDPProxy(command.ShowOne): @@ -40,31 +41,36 @@ class CreateNDPProxy(command.ShowOne): def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( - 'router', - metavar='', - help=_("The name or ID of a router")) + 'router', metavar='', help=_("The name or ID of a router") + ) parser.add_argument( - '--name', - metavar='', - help=_("New NDP proxy name") + '--name', metavar='', help=_("New NDP proxy name") ) parser.add_argument( '--port', metavar='', required=True, - help=_("The name or ID of the network port associated " - "to the NDP proxy")) + help=_( + "The name or ID of the network port associated " + "to the NDP proxy" + ), + ) parser.add_argument( '--ip-address', metavar='', - help=_("The IPv6 address that is to be proxied. In case the port " - "has multiple addresses assigned, use this option to " - "select which address is to be used.")) + help=_( + "The IPv6 address that is to be proxied. In case the port " + "has multiple addresses assigned, use this option to " + "select which address is to be used." + ), + ) parser.add_argument( '--description', metavar='', - help=_("A text to describe/contextualize the use of the " - "NDP proxy configuration") + help=_( + "Text to describe/contextualize the use of the " + "NDP proxy configuration" + ), ) return parser @@ -81,8 +87,7 @@ def take_action(self, parsed_args): if parsed_args.ip_address: attrs['ip_address'] = parsed_args.ip_address - port = client.find_port(parsed_args.port, - ignore_missing=False) + port = client.find_port(parsed_args.port, ignore_missing=False) attrs['port_id'] = port.id if parsed_args.description is not None: @@ -103,7 +108,7 @@ def get_parser(self, prog_name): 'ndp_proxy', nargs="+", metavar="", - help=_("NDP proxy(s) to delete (name or ID)") + help=_("NDP proxy(s) to delete (name or ID)"), ) return parser @@ -117,13 +122,15 @@ def take_action(self, parsed_args): client.delete_ndp_proxy(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete NDP proxy " - "'%(ndp_proxy)s': %(e)s"), - {'ndp_proxy': ndp_proxy, 'e': e}) + LOG.error( + _("Failed to delete NDP proxy '%(ndp_proxy)s': %(e)s"), + {'ndp_proxy': ndp_proxy, 'e': e}, + ) if result > 0: total = len(parsed_args.ndp_proxy) - msg = (_("%(result)s of %(total)s NDP Proxy failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s NDP proxies failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -135,27 +142,38 @@ def get_parser(self, prog_name): parser.add_argument( '--router', metavar='', - help=_("List only NDP proxies belong to this router (name or ID)") + help=_( + "List only NDP proxies associated with the specifed router " + "(name or ID)" + ), ) parser.add_argument( '--port', metavar='', - help=_("List only NDP proxies assocate to this port (name or ID)") + help=_( + "List only NDP proxies associated with the specified port " + "(name or ID)" + ), ) parser.add_argument( '--ip-address', - metavar='ip-address', - help=_("List only NDP proxies according to their IPv6 address") + metavar='', + help=_( + "List only NDP proxies associated with the specified " + "IPv6 address" + ), ) parser.add_argument( '--project', metavar='', - help=_("List NDP proxies according to their project (name or ID)") + help=_( + "List only NDP proxies with the specified project (name or ID)" + ), ) parser.add_argument( '--name', metavar='', - help=_("List NDP proxies according to their name") + help=_("List only NDP proxies with the specified name"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -184,12 +202,12 @@ def take_action(self, parsed_args): query = {} if parsed_args.router: - router = client.find_router(parsed_args.router, - ignore_missing=False) + router = client.find_router( + parsed_args.router, ignore_missing=False + ) query['router_id'] = router.id if parsed_args.port: - port = client.find_port(parsed_args.port, - ignore_missing=False) + port = client.find_port(parsed_args.port, ignore_missing=False) query['port_id'] = port.id if parsed_args.ip_address is not None: query['ip_address'] = parsed_args.ip_address @@ -205,11 +223,17 @@ def take_action(self, parsed_args): data = client.ndp_proxies(**query) - return (headers, - (utils.get_item_properties( - s, columns, + return ( + headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetNDPProxy(command.Command): @@ -220,18 +244,18 @@ def get_parser(self, prog_name): parser.add_argument( 'ndp_proxy', metavar='', - help=_("The ID or name of the NDP proxy to update") + help=_("The ID or name of the NDP proxy to update"), ) parser.add_argument( - '--name', - metavar='', - help=_("Set NDP proxy name") + '--name', metavar='', help=_("Set NDP proxy name") ) parser.add_argument( '--description', metavar='', - help=_("A text to describe/contextualize the use of " - "the NDP proxy configuration") + help=_( + "Text to describe/contextualize the use of " + "the NDP proxy configuration" + ), ) return parser @@ -244,7 +268,8 @@ def take_action(self, parsed_args): attrs['name'] = parsed_args.name obj = client.find_ndp_proxy( - parsed_args.ndp_proxy, ignore_missing=False) + parsed_args.ndp_proxy, ignore_missing=False + ) client.update_ndp_proxy(obj, **attrs) @@ -256,14 +281,15 @@ def get_parser(self, prog_name): parser.add_argument( 'ndp_proxy', metavar="", - help=_("The ID or name of the NDP proxy") + help=_("The ID or name of the NDP proxy"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_ndp_proxy(parsed_args.ndp_proxy, - ignore_missing=False) + obj = client.find_ndp_proxy( + parsed_args.ndp_proxy, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return (display_columns, data) diff --git a/openstackclient/network/v2/network.py b/openstackclient/network/v2/network.py index 54e2821c4f..c8e1f360a7 100644 --- a/openstackclient/network/v2/network.py +++ b/openstackclient/network/v2/network.py @@ -9,26 +9,27 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# """Network action implementations""" from cliff import columns as cliff_columns from osc_lib.cli import format_columns +from osc_lib import exceptions from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient.api import compute_v2 from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common -class AdminStateColumn(cliff_columns.FormattableColumn): +class AdminStateColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return 'UP' if self._value else 'DOWN' -class RouterExternalColumn(cliff_columns.FormattableColumn): +class RouterExternalColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return 'External' if self._value else 'Internal' @@ -61,17 +62,13 @@ def _get_columns_network(item): 'tags': 'tags', } hidden_columns = ['location', 'tenant_id'] - hidden_columns = ['location'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) def _get_columns_compute(item): - column_map = {} - return utils.get_osc_show_columns_for_sdk_resource(item, column_map) + return utils.get_osc_show_columns_for_sdk_resource(item, {}) def _get_attrs_network(client_manager, parsed_args): @@ -102,12 +99,14 @@ def _get_attrs_network(client_manager, parsed_args): attrs['project_id'] = project_id # "network set" command doesn't support setting availability zone hints. - if 'availability_zone_hints' in parsed_args and \ - parsed_args.availability_zone_hints is not None: + if ( + 'availability_zone_hints' in parsed_args + and parsed_args.availability_zone_hints is not None + ): attrs['availability_zone_hints'] = parsed_args.availability_zone_hints # set description - if parsed_args.description: + if parsed_args.description is not None: attrs['description'] = parsed_args.description # set mtu @@ -132,13 +131,14 @@ def _get_attrs_network(client_manager, parsed_args): attrs['provider:segmentation_id'] = parsed_args.segmentation_id if parsed_args.qos_policy is not None: network_client = client_manager.network - _qos_policy = network_client.find_qos_policy(parsed_args.qos_policy, - ignore_missing=False) + _qos_policy = network_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ) attrs['qos_policy_id'] = _qos_policy.id if 'no_qos_policy' in parsed_args and parsed_args.no_qos_policy: attrs['qos_policy_id'] = None # Update DNS network options - if parsed_args.dns_domain: + if parsed_args.dns_domain is not None: attrs['dns_domain'] = parsed_args.dns_domain return attrs @@ -162,53 +162,63 @@ def _add_additional_network_options(parser): parser.add_argument( '--provider-network-type', metavar='', - help=_("The physical mechanism by which the virtual network " - "is implemented. For example: " - "flat, geneve, gre, local, vlan, vxlan.")) + help=_( + "The physical mechanism by which the virtual network " + "is implemented. For example: " + "flat, geneve, gre, local, vlan or vxlan." + ), + ) parser.add_argument( '--provider-physical-network', metavar='', dest='physical_network', - help=_("Name of the physical network over which the virtual " - "network is implemented")) + help=_( + "Name of the physical network over which the virtual " + "network is implemented" + ), + ) parser.add_argument( '--provider-segment', metavar='', dest='segmentation_id', - help=_("VLAN ID for VLAN networks or Tunnel ID for " - "GENEVE/GRE/VXLAN networks")) + help=_( + "VLAN ID for VLAN networks or Tunnel ID for " + "GENEVE/GRE/VXLAN networks" + ), + ) parser.add_argument( '--dns-domain', metavar='', dest='dns_domain', - help=_("Set DNS domain for this network " - "(requires DNS integration extension)") + help=_( + "Set DNS domain for this network " + "(requires DNS integration extension)" + ), ) # TODO(sindhu): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. -class CreateNetwork(common.NetworkAndComputeShowOne, - common.NeutronCommandWithExtraArgs): +class CreateNetwork( + common.NetworkAndComputeShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create new network") def update_parser_common(self, parser): parser.add_argument( - 'name', - metavar='', - help=_("New network name") + 'name', metavar='', help=_("New network name") ) share_group = parser.add_mutually_exclusive_group() share_group.add_argument( '--share', action='store_true', default=None, - help=_("Share the network between projects") + help=_("Share the network between projects"), ) share_group.add_argument( '--no-share', action='store_true', - help=_("Do not share the network between projects") + help=_("Do not share the network between projects"), ) return parser @@ -218,27 +228,27 @@ def update_parser_network(self, parser): '--enable', action='store_true', default=True, - help=self.enhance_help_neutron(_("Enable network (default)")) + help=self.enhance_help_neutron(_("Enable network (default)")), ) admin_group.add_argument( '--disable', action='store_true', - help=self.enhance_help_neutron(_("Disable network")) + help=self.enhance_help_neutron(_("Disable network")), ) parser.add_argument( '--project', metavar='', - help=self.enhance_help_neutron(_("Owner's project (name or ID)")) + help=self.enhance_help_neutron(_("Owner's project (name or ID)")), ) parser.add_argument( '--description', metavar='', - help=self.enhance_help_neutron(_("Set network description")) + help=self.enhance_help_neutron(_("Set network description")), ) parser.add_argument( '--mtu', metavar='', - help=self.enhance_help_neutron(_("Set network mtu")) + help=self.enhance_help_neutron(_("Set network mtu")), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( @@ -247,77 +257,118 @@ def update_parser_network(self, parser): dest='availability_zone_hints', metavar='', help=self.enhance_help_neutron( - _("Availability Zone in which to create this network " - "(Network Availability Zone extension required, " - "repeat option to set multiple availability zones)")) + _( + "Availability Zone in which to create this network " + "(Network Availability Zone extension required, " + "repeat option to set multiple availability zones)" + ) + ), ) port_security_group = parser.add_mutually_exclusive_group() port_security_group.add_argument( '--enable-port-security', action='store_true', help=self.enhance_help_neutron( - _("Enable port security by default for ports created on " - "this network (default)")) + _( + "Enable port security by default for ports created on " + "this network (default)" + ) + ), ) port_security_group.add_argument( '--disable-port-security', action='store_true', help=self.enhance_help_neutron( - _("Disable port security by default for ports created on " - "this network")) + _( + "Disable port security by default for ports created on " + "this network" + ) + ), ) external_router_grp = parser.add_mutually_exclusive_group() external_router_grp.add_argument( '--external', action='store_true', help=self.enhance_help_neutron( - _("The network has an external routing facility that's not " - "managed by Neutron and can be used as in: " - "openstack router set --external-gateway NETWORK " - "(external-net extension required)")) + _( + "The network has an external routing facility that is not " + "managed by Neutron and can be used. For example: " + "openstack router set --external-gateway NETWORK " + "(external-net extension required)" + ) + ), ) external_router_grp.add_argument( '--internal', action='store_true', help=self.enhance_help_neutron( - _("Opposite of '--external' (default)")) + _("Opposite of '--external' (default)") + ), ) default_router_grp = parser.add_mutually_exclusive_group() default_router_grp.add_argument( '--default', action='store_true', help=self.enhance_help_neutron( - _("Specify if this network should be used as the default " - "external network")) + _( + "Specify if this network should be used as the default " + "external network" + ) + ), ) default_router_grp.add_argument( '--no-default', action='store_true', help=self.enhance_help_neutron( - _("Do not use the network as the default external network " - "(default)")) + _( + "Do not use the network as the default external network " + "(default)" + ) + ), ) parser.add_argument( '--qos-policy', metavar='', help=self.enhance_help_neutron( - _("QoS policy to attach to this network (name or ID)")) + _("QoS policy to attach to this network (name or ID)") + ), ) vlan_transparent_grp = parser.add_mutually_exclusive_group() vlan_transparent_grp.add_argument( '--transparent-vlan', action='store_true', help=self.enhance_help_neutron( - _("Make the network VLAN transparent"))) + _("Make the network VLAN transparent") + ), + ) vlan_transparent_grp.add_argument( '--no-transparent-vlan', action='store_true', help=self.enhance_help_neutron( - _("Do not make the network VLAN transparent"))) + _("Do not make the network VLAN transparent") + ), + ) + + vlan_qinq_grp = parser.add_mutually_exclusive_group() + vlan_qinq_grp.add_argument( + '--qinq-vlan', + action='store_true', + help=self.enhance_help_neutron( + _("Enable VLAN QinQ (S-Tag ethtype 0x8a88) for the network") + ), + ) + vlan_qinq_grp.add_argument( + '--no-qinq-vlan', + action='store_true', + help=self.enhance_help_neutron( + _("Disable VLAN QinQ (S-Tag ethtype 0x8a88) for the network") + ), + ) _add_additional_network_options(parser) _tag.add_tag_option_to_parser_for_create( - parser, _('network'), enhance_help=self.enhance_help_neutron) + parser, _('network'), enhance_help=self.enhance_help_neutron + ) return parser def update_parser_compute(self, parser): @@ -326,7 +377,8 @@ def update_parser_compute(self, parser): metavar='', required=True, help=self.enhance_help_nova_network( - _("IPv4 subnet for fixed IPs (in CIDR notation)")) + _("IPv4 subnet for fixed IPs (in CIDR notation)") + ), ) return parser @@ -336,10 +388,35 @@ def take_action_network(self, client, parsed_args): attrs['vlan_transparent'] = True if parsed_args.no_transparent_vlan: attrs['vlan_transparent'] = False + + if parsed_args.qinq_vlan: + attrs['vlan_qinq'] = True + if parsed_args.no_qinq_vlan: + attrs['vlan_qinq'] = False + + if attrs.get('vlan_transparent') and attrs.get('vlan_qinq'): + msg = _( + "--transparent-vlan and --qinq-vlan can not be both enabled " + "for the network." + ) + raise exceptions.CommandError(msg) + + if ( + parsed_args.segmentation_id + and not parsed_args.provider_network_type + ): + msg = _( + "--provider-segment requires --provider-network-type " + "to be specified." + ) + raise exceptions.CommandError(msg) + attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) with common.check_missing_extension_if_error( - self.app.client_manager.network, attrs): + self.app.client_manager.network, attrs + ): obj = client.create_network(**attrs) # tags cannot be set when created, so tags need to be set later. @@ -350,7 +427,7 @@ def take_action_network(self, client, parsed_args): def take_action_compute(self, client, parsed_args): attrs = _get_attrs_compute(self.app.client_manager, parsed_args) - obj = client.api.network_create(**attrs) + obj = compute_v2.create_network(client, **attrs) display_columns, columns = _get_columns_compute(obj) data = utils.get_dict_properties(obj, columns) return (display_columns, data) @@ -368,7 +445,7 @@ def update_parser_common(self, parser): 'network', metavar="", nargs="+", - help=_("Network(s) to delete (name or ID)") + help=_("Network(s) to delete (name or ID)"), ) return parser @@ -378,7 +455,8 @@ def take_action_network(self, client, parsed_args): client.delete_network(obj) def take_action_compute(self, client, parsed_args): - client.api.network_delete(self.r) + network = compute_v2.find_network(client, self.r) + compute_v2.delete_network(client, network['id']) # TODO(sindhu): Use the SDK resource mapped attribute names once the @@ -391,104 +469,127 @@ def update_parser_network(self, parser): router_ext_group.add_argument( '--external', action='store_true', - help=self.enhance_help_neutron(_("List external networks")) + help=self.enhance_help_neutron(_("List only external networks")), ) router_ext_group.add_argument( '--internal', action='store_true', - help=self.enhance_help_neutron(_("List internal networks")) + help=self.enhance_help_neutron(_("List only internal networks")), ) parser.add_argument( '--long', action='store_true', help=self.enhance_help_neutron( - _("List additional fields in output")) + _("List additional fields in output") + ), ) parser.add_argument( '--name', metavar='', help=self.enhance_help_neutron( - _("List networks according to their name")) + _("List only networks with the specified name") + ), ) admin_state_group = parser.add_mutually_exclusive_group() admin_state_group.add_argument( '--enable', action='store_true', - help=self.enhance_help_neutron(_("List enabled networks")) + help=self.enhance_help_neutron(_("List only enabled networks")), ) admin_state_group.add_argument( '--disable', action='store_true', - help=self.enhance_help_neutron(_("List disabled networks")) + help=self.enhance_help_neutron(_("List only disabled networks")), ) parser.add_argument( '--project', metavar='', - help=_("List networks according to their project (name or ID)") + help=_( + "List only networks with the specified project (name or ID)" + ), ) identity_common.add_project_domain_option_to_parser( - parser, enhance_help=self.enhance_help_neutron) + parser, enhance_help=self.enhance_help_neutron + ) shared_group = parser.add_mutually_exclusive_group() shared_group.add_argument( '--share', action='store_true', help=self.enhance_help_neutron( - _("List networks shared between projects")) + _("List only networks shared between projects") + ), ) shared_group.add_argument( '--no-share', action='store_true', help=self.enhance_help_neutron( - _("List networks not shared between projects")) + _("List only networks not shared between projects") + ), ) parser.add_argument( '--status', metavar='', choices=['ACTIVE', 'BUILD', 'DOWN', 'ERROR'], help=self.enhance_help_neutron( - _("List networks according to their status " - "('ACTIVE', 'BUILD', 'DOWN', 'ERROR')")) + _( + "List only networks with the specified status " + "('ACTIVE', 'BUILD', 'DOWN', 'ERROR')" + ) + ), ) parser.add_argument( '--provider-network-type', metavar='', - choices=['flat', 'geneve', 'gre', 'local', - 'vlan', 'vxlan'], + choices=['flat', 'geneve', 'gre', 'local', 'vlan', 'vxlan'], help=self.enhance_help_neutron( - _("List networks according to their physical mechanisms. The " - "supported options are: flat, geneve, gre, local, vlan, " - "vxlan.")) + _( + "List only networks with the specified physical " + "mechanisms. " + "The supported options are: flat, geneve, gre, local, " + "vlan and vxlan." + ) + ), ) parser.add_argument( '--provider-physical-network', metavar='', dest='physical_network', help=self.enhance_help_neutron( - _("List networks according to name of the physical network")) + _( + "List only networks with the specified physical network " + "name" + ) + ), ) parser.add_argument( '--provider-segment', metavar='', dest='segmentation_id', help=self.enhance_help_neutron( - _("List networks according to VLAN ID for VLAN networks or " - "Tunnel ID for GENEVE/GRE/VXLAN networks")) + _( + "List only networks with the specified provider segment " + "ID (VLAN ID for VLAN networks or " + "Tunnel ID for GENEVE/GRE/VXLAN networks)" + ) + ), ) parser.add_argument( '--agent', metavar='', dest='agent_id', help=self.enhance_help_neutron( - _('List networks hosted by agent (ID only)')) + _('List only networks hosted the specified agent (ID only)') + ), ) _tag.add_tag_filtering_option_to_parser( - parser, _('networks'), enhance_help=self.enhance_help_neutron) + parser, _('networks'), enhance_help=self.enhance_help_neutron + ) return parser def take_action_network(self, client, parsed_args): identity_client = self.app.client_manager.identity if parsed_args.long: - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', 'status', @@ -501,7 +602,7 @@ def take_action_network(self, client, parsed_args): 'availability_zones', 'tags', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Name', 'Status', @@ -515,11 +616,7 @@ def take_action_network(self, client, parsed_args): 'Tags', ) elif parsed_args.agent_id: - columns = ( - 'id', - 'name', - 'subnet_ids' - ) + columns = ('id', 'name', 'subnet_ids') column_headers = ( 'ID', 'Name', @@ -529,17 +626,19 @@ def take_action_network(self, client, parsed_args): dhcp_agent = client.get_agent(parsed_args.agent_id) data = client.dhcp_agent_hosting_networks(dhcp_agent) - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters=_formatters, - ) for s in data)) - else: - columns = ( - 'id', - 'name', - 'subnet_ids' + ) + for s in data + ), ) + else: + columns = ('id', 'name', 'subnet_ids') column_headers = ( 'ID', 'Name', @@ -597,11 +696,17 @@ def take_action_network(self, client, parsed_args): data = client.networks(**args) - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters=_formatters, - ) for s in data)) + ) + for s in data + ), + ) def take_action_compute(self, client, parsed_args): columns = ( @@ -615,13 +720,19 @@ def take_action_compute(self, client, parsed_args): 'Subnet', ) - data = client.api.network_list() + data = compute_v2.list_networks(client) - return (column_headers, - (utils.get_dict_properties( - s, columns, + return ( + column_headers, + ( + utils.get_dict_properties( + s, + columns, formatters=_formatters, - ) for s in data)) + ) + for s in data + ), + ) # TODO(sindhu): Use the SDK resource mapped attribute names once the @@ -630,99 +741,99 @@ class SetNetwork(common.NeutronCommandWithExtraArgs): _description = _("Set network properties") def get_parser(self, prog_name): - parser = super(SetNetwork, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network', metavar="", - help=_("Network to modify (name or ID)") + help=_("Network to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar='', - help=_("Set network name") + '--name', metavar='', help=_("Set network name") ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', default=None, - help=_("Enable network") + help=_("Enable network"), ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable network") + '--disable', action='store_true', help=_("Disable network") ) share_group = parser.add_mutually_exclusive_group() share_group.add_argument( '--share', action='store_true', default=None, - help=_("Share the network between projects") + help=_("Share the network between projects"), ) share_group.add_argument( '--no-share', action='store_true', - help=_("Do not share the network between projects") + help=_("Do not share the network between projects"), ) parser.add_argument( '--description', metavar="", - help=_("Set network description") + help=_("Set network description"), ) parser.add_argument( - '--mtu', - metavar="", - help=_("Set network mtu") + '--mtu', metavar="", help=_("Set network mtu") ) port_security_group = parser.add_mutually_exclusive_group() port_security_group.add_argument( '--enable-port-security', action='store_true', - help=_("Enable port security by default for ports created on " - "this network") + help=_( + "Enable port security by default for ports created on " + "this network" + ), ) port_security_group.add_argument( '--disable-port-security', action='store_true', - help=_("Disable port security by default for ports created on " - "this network") + help=_( + "Disable port security by default for ports created on " + "this network" + ), ) external_router_grp = parser.add_mutually_exclusive_group() external_router_grp.add_argument( '--external', action='store_true', - help=_("The network has an external routing facility that's not " - "managed by Neutron and can be used as in: " - "openstack router set --external-gateway NETWORK " - "(external-net extension required)") + help=_( + "The network has an external routing facility that is not " + "managed by Neutron and can be used. For example: " + "openstack router set --external-gateway NETWORK " + "(external-net extension required)." + ), ) external_router_grp.add_argument( '--internal', action='store_true', - help=_("Opposite of '--external'") + help=_("Opposite of '--external'"), ) default_router_grp = parser.add_mutually_exclusive_group() default_router_grp.add_argument( '--default', action='store_true', - help=_("Set the network as the default external network") + help=_("Set the network as the default external network"), ) default_router_grp.add_argument( '--no-default', action='store_true', - help=_("Do not use the network as the default external network") + help=_("Do not use the network as the default external network"), ) qos_group = parser.add_mutually_exclusive_group() qos_group.add_argument( '--qos-policy', metavar='', - help=_("QoS policy to attach to this network (name or ID)") + help=_("QoS policy to attach to this network (name or ID)"), ) qos_group.add_argument( '--no-qos-policy', action='store_true', - help=_("Remove the QoS policy attached to this network") + help=_("Remove the QoS policy attached to this network"), ) _tag.add_tag_option_to_parser_for_set(parser, _('network')) _add_additional_network_options(parser) @@ -734,10 +845,12 @@ def take_action(self, parsed_args): attrs = _get_attrs_network(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: with common.check_missing_extension_if_error( - self.app.client_manager.network, attrs): + self.app.client_manager.network, attrs + ): client.update_network(obj, **attrs) # tags is a subresource and it needs to be updated separately. @@ -751,7 +864,7 @@ def update_parser_common(self, parser): parser.add_argument( 'network', metavar="", - help=_("Network to display (name or ID)") + help=_("Network to display (name or ID)"), ) return parser @@ -762,7 +875,7 @@ def take_action_network(self, client, parsed_args): return (display_columns, data) def take_action_compute(self, client, parsed_args): - obj = client.api.network_find(parsed_args.network) + obj = compute_v2.find_network(client, parsed_args.network) display_columns, columns = _get_columns_compute(obj) data = utils.get_dict_properties(obj, columns) return (display_columns, data) @@ -772,11 +885,11 @@ class UnsetNetwork(common.NeutronUnsetCommandWithExtraArgs): _description = _("Unset network properties") def get_parser(self, prog_name): - parser = super(UnsetNetwork, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network', metavar="", - help=_("Network to modify (name or ID)") + help=_("Network to modify (name or ID)"), ) _tag.add_tag_option_to_parser_for_unset(parser, _('network')) return parser diff --git a/openstackclient/network/v2/network_agent.py b/openstackclient/network/v2/network_agent.py index f67f67bd6c..806422b102 100644 --- a/openstackclient/network/v2/network_agent.py +++ b/openstackclient/network/v2/network_agent.py @@ -17,21 +17,21 @@ from cliff import columns as cliff_columns from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) -class AliveColumn(cliff_columns.FormattableColumn): +class AliveColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return ":-)" if self._value else "XXX" -class AdminStateColumn(cliff_columns.FormattableColumn): +class AdminStateColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return 'UP' if self._value else 'DOWN' @@ -52,9 +52,7 @@ def _get_network_columns(item): } hidden_columns = ['location', 'name', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -62,19 +60,22 @@ class AddNetworkToAgent(command.Command): _description = _("Add network to an agent") def get_parser(self, prog_name): - parser = super(AddNetworkToAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--dhcp', action='store_true', - help=_('Add network to a DHCP agent')) + help=_('Add network to a DHCP agent'), + ) parser.add_argument( 'agent_id', metavar='', - help=_('Agent to which a network is added (ID only)')) + help=_('Agent to which a network is added (ID only)'), + ) parser.add_argument( 'network', metavar='', - help=_('Network to be added to an agent (name or ID)')) + help=_('Network to be added to an agent (name or ID)'), + ) return parser @@ -82,13 +83,13 @@ def take_action(self, parsed_args): client = self.app.client_manager.network agent = client.get_agent(parsed_args.agent_id) network = client.find_network( - parsed_args.network, ignore_missing=False) + parsed_args.network, ignore_missing=False + ) if parsed_args.dhcp: try: client.add_dhcp_agent_to_network(agent, network) except Exception: - msg = 'Failed to add {} to {}'.format( - network.name, agent.agent_type) + msg = f'Failed to add {network.name} to {agent.agent_type}' exceptions.CommandError(msg) @@ -96,21 +97,19 @@ class AddRouterToAgent(command.Command): _description = _("Add router to an agent") def get_parser(self, prog_name): - parser = super(AddRouterToAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - '--l3', - action='store_true', - help=_('Add router to an L3 agent') + '--l3', action='store_true', help=_('Add router to an L3 agent') ) parser.add_argument( 'agent_id', metavar='', - help=_("Agent to which a router is added (ID only)") + help=_("Agent to which a router is added (ID only)"), ) parser.add_argument( 'router', metavar='', - help=_("Router to be added to an agent (name or ID)") + help=_("Router to be added to an agent (name or ID)"), ) return parser @@ -127,12 +126,12 @@ class DeleteNetworkAgent(command.Command): _description = _("Delete network agent(s)") def get_parser(self, prog_name): - parser = super(DeleteNetworkAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_agent', metavar="", nargs='+', - help=(_("Network agent(s) to delete (ID only)")) + help=(_("Network agent(s) to delete (ID only)")), ) return parser @@ -145,14 +144,19 @@ def take_action(self, parsed_args): client.delete_agent(agent, ignore_missing=False) except Exception as e: result += 1 - LOG.error(_("Failed to delete network agent with " - "ID '%(agent)s': %(e)s"), - {'agent': agent, 'e': e}) + LOG.error( + _( + "Failed to delete network agent with " + "ID '%(agent)s': %(e)s" + ), + {'agent': agent, 'e': e}, + ) if result > 0: total = len(parsed_args.network_agent) - msg = (_("%(result)s of %(total)s network agents failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s network agents failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -160,137 +164,154 @@ def take_action(self, parsed_args): # OSC minimum requirements include SDK 1.0. class ListNetworkAgent(command.Lister): _description = _("List network agents") + _supported_agents = { + 'bgp': 'BGP dynamic routing agent', + 'dhcp': 'DHCP agent', + 'open-vswitch': 'Open vSwitch agent', + 'linux-bridge': 'Linux bridge agent', + 'ofa': 'OFA driver agent', + 'l3': 'L3 agent', + 'loadbalancer': 'Loadbalancer agent', + 'metering': 'Metering agent', + 'metadata': 'Metadata agent', + 'macvtap': 'Macvtap agent', + 'nic': 'NIC Switch agent', + 'baremetal': 'Baremetal Node', + 'ovn-controller': 'OVN Controller agent', + 'ovn-controller-gateway': 'OVN Controller Gateway agent', + 'ovn-metadata': 'OVN Metadata agent', + 'ovn-agent': 'OVN Neutron agent', + } def get_parser(self, prog_name): - parser = super(ListNetworkAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) + supported_agents = ','.join(self._supported_agents.keys()) parser.add_argument( '--agent-type', metavar='', - choices=["bgp", "dhcp", "open-vswitch", "linux-bridge", "ofa", - "l3", "loadbalancer", "metering", "metadata", "macvtap", - "nic", "baremetal"], - help=_("List only agents with the specified agent type. " - "The supported agent types are: bgp, dhcp, open-vswitch, " - "linux-bridge, ofa, l3, loadbalancer, metering, " - "metadata, macvtap, nic, baremetal.") + choices=list(self._supported_agents.keys()), + help=_( + "List only agents with the specified agent type. " + "The supported agent types are: %(supported_agents)s." + ) + % {'supported_agents': supported_agents}, ) parser.add_argument( '--host', metavar='', - help=_("List only agents running on the specified host") + help=_("List only agents running on the specified host"), ) agent_type_group = parser.add_mutually_exclusive_group() agent_type_group.add_argument( '--network', metavar='', - help=_('List agents hosting a network (name or ID)') + help=_('List agents hosting the specified network (name or ID)'), ) agent_type_group.add_argument( '--router', metavar='', - help=_('List agents hosting this router (name or ID)') + help=_('List agents hosting the specified router (name or ID)'), ) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - columns = ( + columns: tuple[str, ...] = ( 'id', 'agent_type', 'host', 'availability_zone', 'is_alive', 'is_admin_state_up', - 'binary' + 'binary', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Agent Type', 'Host', 'Availability Zone', 'Alive', 'State', - 'Binary' + 'Binary', ) - key_value = { - 'bgp': 'BGP dynamic routing agent', - 'dhcp': 'DHCP agent', - 'open-vswitch': 'Open vSwitch agent', - 'linux-bridge': 'Linux bridge agent', - 'ofa': 'OFA driver agent', - 'l3': 'L3 agent', - 'loadbalancer': 'Loadbalancer agent', - 'metering': 'Metering agent', - 'metadata': 'Metadata agent', - 'macvtap': 'Macvtap agent', - 'nic': 'NIC Switch agent', - 'baremetal': 'Baremetal Node' - } - filters = {} if parsed_args.network is not None: network = client.find_network( - parsed_args.network, ignore_missing=False) + parsed_args.network, ignore_missing=False + ) data = client.network_hosting_dhcp_agents(network) elif parsed_args.router is not None: if parsed_args.long: columns += ('ha_state',) column_headers += ('HA State',) - router = client.find_router(parsed_args.router, - ignore_missing=False) + router = client.find_router( + parsed_args.router, ignore_missing=False + ) data = client.routers_hosting_l3_agents(router) else: if parsed_args.agent_type is not None: - filters['agent_type'] = key_value[parsed_args.agent_type] + filters['agent_type'] = self._supported_agents[ + parsed_args.agent_type + ] if parsed_args.host is not None: filters['host'] = parsed_args.host data = client.agents(**filters) - return (column_headers, - (utils.get_item_properties( - s, columns, formatters=_formatters, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters=_formatters, + ) + for s in data + ), + ) class RemoveNetworkFromAgent(command.Command): _description = _("Remove network from an agent.") def get_parser(self, prog_name): - parser = super(RemoveNetworkFromAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--dhcp', action='store_true', - help=_('Remove network from DHCP agent')) + help=_('Remove network from DHCP agent'), + ) parser.add_argument( 'agent_id', metavar='', - help=_('Agent to which a network is removed (ID only)')) + help=_('Agent to which a network is removed (ID only)'), + ) parser.add_argument( 'network', metavar='', - help=_('Network to be removed from an agent (name or ID)')) + help=_('Network to be removed from an agent (name or ID)'), + ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network agent = client.get_agent(parsed_args.agent_id) network = client.find_network( - parsed_args.network, ignore_missing=False) + parsed_args.network, ignore_missing=False + ) if parsed_args.dhcp: try: client.remove_dhcp_agent_from_network(agent, network) except Exception: - msg = 'Failed to remove {} to {}'.format( - network.name, agent.agent_type) + msg = f'Failed to remove {network.name} to {agent.agent_type}' exceptions.CommandError(msg) @@ -298,21 +319,21 @@ class RemoveRouterFromAgent(command.Command): _description = _("Remove router from an agent") def get_parser(self, prog_name): - parser = super(RemoveRouterFromAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--l3', action='store_true', - help=_('Remove router from an L3 agent') + help=_('Remove router from an L3 agent'), ) parser.add_argument( 'agent_id', metavar='', - help=_("Agent from which router will be removed (ID only)") + help=_("Agent from which router will be removed (ID only)"), ) parser.add_argument( 'router', metavar='', - help=_("Router to be removed from an agent (name or ID)") + help=_("Router to be removed from an agent (name or ID)"), ) return parser @@ -331,27 +352,23 @@ class SetNetworkAgent(command.Command): _description = _("Set network agent properties") def get_parser(self, prog_name): - parser = super(SetNetworkAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_agent', metavar="", - help=(_("Network agent to modify (ID only)")) + help=(_("Network agent to modify (ID only)")), ) parser.add_argument( '--description', metavar='', - help=_("Set network agent description") + help=_("Set network agent description"), ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( - '--enable', - action='store_true', - help=_("Enable network agent") + '--enable', action='store_true', help=_("Enable network agent") ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable network agent") + '--disable', action='store_true', help=_("Disable network agent") ) return parser @@ -374,11 +391,11 @@ class ShowNetworkAgent(command.ShowOne): _description = _("Display network agent details") def get_parser(self, prog_name): - parser = super(ShowNetworkAgent, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_agent', metavar="", - help=(_("Network agent to display (ID only)")) + help=(_("Network agent to display (ID only)")), ) return parser @@ -386,5 +403,9 @@ def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.get_agent(parsed_args.network_agent) display_columns, columns = _get_network_columns(obj) - data = utils.get_item_properties(obj, columns, formatters=_formatters,) + data = utils.get_item_properties( + obj, + columns, + formatters=_formatters, + ) return display_columns, data diff --git a/openstackclient/network/v2/network_auto_allocated_topology.py b/openstackclient/network/v2/network_auto_allocated_topology.py index c612f053b4..1107cb709d 100644 --- a/openstackclient/network/v2/network_auto_allocated_topology.py +++ b/openstackclient/network/v2/network_auto_allocated_topology.py @@ -15,9 +15,9 @@ import logging -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -25,12 +25,9 @@ def _get_columns(item): - column_map = {} hidden_columns = ['name', 'location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) @@ -67,26 +64,32 @@ class CreateAutoAllocatedTopology(command.ShowOne): _description = _("Create the auto allocated topology for project") def get_parser(self, prog_name): - parser = super(CreateAutoAllocatedTopology, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', - help=_("Return the auto allocated topology for a given project. " - "Default is current project") + help=_( + "Return the auto allocated topology for a given project. " + "Default is current project." + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--check-resources', action='store_true', - help=_("Validate the requirements for auto allocated topology. " - "Does not return a topology.") + help=_( + "Validate the requirements for auto allocated topology. " + "Does not return a topology." + ), ) parser.add_argument( '--or-show', action='store_true', default=True, - help=_("If topology exists returns the topology's " - "information (Default)") + help=_( + "If topology exists returns the topology's " + "information (default)" + ), ) return parser @@ -95,9 +98,9 @@ def check_resource_topology(self, client, parsed_args): obj = client.validate_auto_allocated_topology(parsed_args.project) columns = _format_check_resource_columns() - data = utils.get_item_properties(_format_check_resource(obj), - columns, - formatters={}) + data = utils.get_item_properties( + _format_check_resource(obj), columns, formatters={} + ) return (columns, data) @@ -122,12 +125,14 @@ class DeleteAutoAllocatedTopology(command.Command): _description = _("Delete auto allocated topology for project") def get_parser(self, prog_name): - parser = super(DeleteAutoAllocatedTopology, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', - help=_('Delete auto allocated topology for a given project. ' - 'Default is the current project') + help=_( + 'Delete auto allocated topology for a given project. ' + 'Default is the current project.' + ), ) identity_common.add_project_domain_option_to_parser(parser) diff --git a/openstackclient/network/v2/network_flavor.py b/openstackclient/network/v2/network_flavor.py index 864184c0c3..99993f64e8 100644 --- a/openstackclient/network/v2/network_flavor.py +++ b/openstackclient/network/v2/network_flavor.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -33,9 +33,7 @@ def _get_columns(item): hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -65,28 +63,28 @@ class AddNetworkFlavorToProfile(command.Command): _description = _("Add a service profile to a network flavor") def get_parser(self, prog_name): - parser = super( - AddNetworkFlavorToProfile, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'flavor', - metavar="", - help=_("Network flavor (name or ID)") + 'flavor', metavar="", help=_("Network flavor (name or ID)") ) parser.add_argument( 'service_profile', metavar="", - help=_("Service profile (ID only)") + help=_("Service profile (ID only)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj_flavor = client.find_flavor( - parsed_args.flavor, ignore_missing=False) + parsed_args.flavor, ignore_missing=False + ) obj_service_profile = client.find_service_profile( - parsed_args.service_profile, ignore_missing=False) + parsed_args.service_profile, ignore_missing=False + ) client.associate_flavor_with_service_profile( - obj_flavor, obj_service_profile) + obj_flavor, obj_service_profile + ) # TODO(dasanind): Use the SDK resource mapped attribute names once the @@ -95,28 +93,27 @@ class CreateNetworkFlavor(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create new network flavor") def get_parser(self, prog_name): - parser = super(CreateNetworkFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar="", - help=_("Name for the flavor") + 'name', metavar="", help=_("Name for the flavor") ) parser.add_argument( '--service-type', metavar="", required=True, - help=_('Service type to which the flavor applies to: e.g. VPN ' - '(See openstack network service provider list for loaded ' - 'examples.)') + help=_( + 'Service type to which the flavor applies. For example: VPN ' + '(See openstack network service provider list for loaded ' + 'examples.)' + ), ) parser.add_argument( - '--description', - help=_('Description for the flavor') + '--description', help=_('Description for the flavor') ) parser.add_argument( '--project', metavar="", - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -124,12 +121,10 @@ def get_parser(self, prog_name): enable_group.add_argument( '--enable', action='store_true', - help=_("Enable the flavor (default)") + help=_("Enable the flavor (default)"), ) enable_group.add_argument( - '--disable', - action='store_true', - help=_("Disable the flavor") + '--disable', action='store_true', help=_("Disable the flavor") ) return parser @@ -138,7 +133,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_flavor(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) @@ -150,13 +146,13 @@ class DeleteNetworkFlavor(command.Command): _description = _("Delete network flavors") def get_parser(self, prog_name): - parser = super(DeleteNetworkFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'flavor', metavar='', nargs='+', - help=_('Flavor(s) to delete (name or ID)') + help=_('Flavor(s) to delete (name or ID)'), ) return parser @@ -170,13 +166,19 @@ def take_action(self, parsed_args): client.delete_flavor(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete flavor with " - "name or ID '%(flavor)s': %(e)s"), - {"flavor": flavor, "e": e}) + LOG.error( + _( + "Failed to delete flavor with " + "name or ID '%(flavor)s': %(e)s" + ), + {"flavor": flavor, "e": e}, + ) if result > 0: total = len(parsed_args.flavor) - msg = (_("%(result)s of %(total)s flavors failed " - "to delete.") % {"result": result, "total": total}) + msg = _("%(result)s of %(total)s flavors failed to delete.") % { + "result": result, + "total": total, + } raise exceptions.CommandError(msg) @@ -186,55 +188,54 @@ class ListNetworkFlavor(command.Lister): def take_action(self, parsed_args): client = self.app.client_manager.network - columns = ( - 'id', - 'name', - 'is_enabled', - 'service_type', - 'description' - ) + columns = ('id', 'name', 'is_enabled', 'service_type', 'description') column_headers = ( 'ID', 'Name', 'Enabled', 'Service Type', - 'Description' + 'Description', ) data = client.flavors() - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) class RemoveNetworkFlavorFromProfile(command.Command): - _description = _( - "Remove service profile from network flavor") + _description = _("Remove service profile from network flavor") def get_parser(self, prog_name): - parser = super( - RemoveNetworkFlavorFromProfile, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'flavor', - metavar="", - help=_("Network flavor (name or ID)") + 'flavor', metavar="", help=_("Network flavor (name or ID)") ) parser.add_argument( 'service_profile', metavar="", - help=_("Service profile (ID only)") + help=_("Service profile (ID only)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj_flavor = client.find_flavor( - parsed_args.flavor, ignore_missing=False) + parsed_args.flavor, ignore_missing=False + ) obj_service_profile = client.find_service_profile( - parsed_args.service_profile, ignore_missing=False) + parsed_args.service_profile, ignore_missing=False + ) client.disassociate_flavor_from_service_profile( - obj_flavor, obj_service_profile) + obj_flavor, obj_service_profile + ) # TODO(dasanind): Use only the SDK resource mapped attribute names once the @@ -243,40 +244,31 @@ class SetNetworkFlavor(common.NeutronCommandWithExtraArgs): _description = _("Set network flavor properties") def get_parser(self, prog_name): - parser = super(SetNetworkFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'flavor', metavar="", - help=_("Flavor to update (name or ID)") + help=_("Flavor to update (name or ID)"), ) parser.add_argument( - '--description', - help=_('Set network flavor description') + '--description', help=_('Set network flavor description') ) enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( - '--disable', - action='store_true', - help=_("Disable network flavor") + '--disable', action='store_true', help=_("Disable network flavor") ) enable_group.add_argument( - '--enable', - action='store_true', - help=_("Enable network flavor") + '--enable', action='store_true', help=_("Enable network flavor") ) parser.add_argument( - '--name', - metavar="", - help=_('Set flavor name') + '--name', metavar="", help=_('Set flavor name') ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_flavor( - parsed_args.flavor, - ignore_missing=False) + obj = client.find_flavor(parsed_args.flavor, ignore_missing=False) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name @@ -287,7 +279,8 @@ def take_action(self, parsed_args): if parsed_args.disable: attrs['enabled'] = False attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_flavor(obj, **attrs) @@ -295,11 +288,11 @@ class ShowNetworkFlavor(command.ShowOne): _description = _("Display network flavor details") def get_parser(self, prog_name): - parser = super(ShowNetworkFlavor, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'flavor', metavar='', - help=_('Flavor to display (name or ID)') + help=_('Flavor to display (name or ID)'), ) return parser diff --git a/openstackclient/network/v2/network_flavor_profile.py b/openstackclient/network/v2/network_flavor_profile.py index 66c6dcff3f..9792b010a3 100644 --- a/openstackclient/network/v2/network_flavor_profile.py +++ b/openstackclient/network/v2/network_flavor_profile.py @@ -13,12 +13,11 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ -from openstackclient.identity import common as identity_common from openstackclient.network import common LOG = logging.getLogger(__name__) @@ -29,11 +28,9 @@ def _get_columns(item): 'is_enabled': 'enabled', } - hidden_columns = ['location', 'name', 'tenant_id'] + hidden_columns = ['location', 'name', 'tenant_id', 'project_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -49,57 +46,48 @@ def _get_attrs(client_manager, parsed_args): attrs['enabled'] = True if parsed_args.disable: attrs['enabled'] = False - if 'project' in parsed_args and parsed_args.project is not None: - identity_client = client_manager.identity - project_id = identity_common.find_project( - identity_client, - parsed_args.project, - parsed_args.project_domain, - ).id - attrs['project_id'] = project_id return attrs # TODO(ndahiwade): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. -class CreateNetworkFlavorProfile(command.ShowOne, - common.NeutronCommandWithExtraArgs): +class CreateNetworkFlavorProfile( + command.ShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create new network flavor profile") def get_parser(self, prog_name): - parser = super(CreateNetworkFlavorProfile, self).get_parser(prog_name) - parser.add_argument( - '--project', - metavar="", - help=_("Owner's project (name or ID)") - ) - identity_common.add_project_domain_option_to_parser(parser) + parser = super().get_parser(prog_name) parser.add_argument( '--description', metavar="", - help=_("Description for the flavor profile") + help=_("Description for the flavor profile"), ) enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( '--enable', action='store_true', - help=_("Enable the flavor profile") + help=_("Enable the flavor profile"), ) enable_group.add_argument( '--disable', action='store_true', - help=_("Disable the flavor profile") + help=_("Disable the flavor profile"), ) parser.add_argument( '--driver', - help=_("Python module path to driver. This becomes " - "required if --metainfo is missing and vice versa") + help=_( + "Python module path to driver. This becomes " + "required if --metainfo is missing and vice-versa." + ), ) parser.add_argument( '--metainfo', - help=_("Metainfo for the flavor profile. This becomes " - "required if --driver is missing and vice versa") + help=_( + "Metainfo for the flavor profile. This becomes " + "required if --driver is missing and vice-versa." + ), ) return parser @@ -108,7 +96,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if parsed_args.driver is None and parsed_args.metainfo is None: msg = _("Either --driver or --metainfo or both are required") @@ -125,13 +114,13 @@ class DeleteNetworkFlavorProfile(command.Command): _description = _("Delete network flavor profile") def get_parser(self, prog_name): - parser = super(DeleteNetworkFlavorProfile, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'flavor_profile', metavar='', nargs='+', - help=_("Flavor profile(s) to delete (ID only)") + help=_("Flavor profile(s) to delete (ID only)"), ) return parser @@ -141,18 +130,24 @@ def take_action(self, parsed_args): for flavor_profile in parsed_args.flavor_profile: try: - obj = client.find_service_profile(flavor_profile, - ignore_missing=False) + obj = client.find_service_profile( + flavor_profile, ignore_missing=False + ) client.delete_service_profile(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete flavor profile with " - "ID '%(flavor_profile)s': %(e)s"), - {"flavor_profile": flavor_profile, "e": e}) + LOG.error( + _( + "Failed to delete flavor profile with " + "ID '%(flavor_profile)s': %(e)s" + ), + {"flavor_profile": flavor_profile, "e": e}, + ) if result > 0: total = len(parsed_args.flavor_profile) - msg = (_("%(result)s of %(total)s flavor_profiles failed " - "to delete.") % {"result": result, "total": total}) + msg = _( + "%(result)s of %(total)s flavor_profiles failed to delete." + ) % {"result": result, "total": total} raise exceptions.CommandError(msg) @@ -178,10 +173,16 @@ def take_action(self, parsed_args): ) data = client.service_profiles() - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) # TODO(ndahiwade): Use the SDK resource mapped attribute names once the @@ -190,49 +191,54 @@ class SetNetworkFlavorProfile(common.NeutronCommandWithExtraArgs): _description = _("Set network flavor profile properties") def get_parser(self, prog_name): - parser = super(SetNetworkFlavorProfile, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'flavor_profile', metavar="", - help=_("Flavor profile to update (ID only)") + help=_("Flavor profile to update (ID only)"), ) - identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--description', metavar="", - help=_("Description for the flavor profile") + help=_("Description for the flavor profile"), ) enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( '--enable', action='store_true', - help=_("Enable the flavor profile") + help=_("Enable the flavor profile"), ) enable_group.add_argument( '--disable', action='store_true', - help=_("Disable the flavor profile") + help=_("Disable the flavor profile"), ) parser.add_argument( '--driver', - help=_("Python module path to driver. This becomes " - "required if --metainfo is missing and vice versa") + help=_( + "Python module path to driver. This becomes " + "required if --metainfo is missing and vice-versa." + ), ) parser.add_argument( '--metainfo', - help=_("Metainfo for the flavor profile. This becomes " - "required if --driver is missing and vice versa") + help=_( + "Metainfo for the flavor profile. This becomes " + "required if --driver is missing and vice-versa." + ), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_service_profile(parsed_args.flavor_profile, - ignore_missing=False) + obj = client.find_service_profile( + parsed_args.flavor_profile, ignore_missing=False + ) attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_service_profile(obj, **attrs) @@ -241,18 +247,19 @@ class ShowNetworkFlavorProfile(command.ShowOne): _description = _("Display network flavor profile details") def get_parser(self, prog_name): - parser = super(ShowNetworkFlavorProfile, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'flavor_profile', metavar='', - help=_("Flavor profile to display (ID only)") + help=_("Flavor profile to display (ID only)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_service_profile(parsed_args.flavor_profile, - ignore_missing=False) + obj = client.find_service_profile( + parsed_args.flavor_profile, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return (display_columns, data) diff --git a/openstackclient/network/v2/network_meter.py b/openstackclient/network/v2/network_meter.py index 99b0bdd453..bb3bf94a18 100644 --- a/openstackclient/network/v2/network_meter.py +++ b/openstackclient/network/v2/network_meter.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -32,9 +32,7 @@ def _get_columns(item): } hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -67,17 +65,17 @@ class CreateMeter(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create network meter") def get_parser(self, prog_name): - parser = super(CreateMeter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--description', metavar='', - help=_("Create description for meter") + help=_("Description for meter"), ) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) @@ -86,12 +84,12 @@ def get_parser(self, prog_name): '--share', action='store_true', default=None, - help=_("Share meter between projects") + help=_("Share meter between projects"), ) share_group.add_argument( '--no-share', action='store_true', - help=_("Do not share meter between projects") + help=_("Do not share meter between projects"), ) parser.add_argument( 'name', @@ -105,7 +103,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_metering_label(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) @@ -119,13 +118,13 @@ class DeleteMeter(command.Command): _description = _("Delete network meter") def get_parser(self, prog_name): - parser = super(DeleteMeter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'meter', metavar='', nargs='+', - help=_('Meter to delete (name or ID)') + help=_('Meter to delete (name or ID)'), ) return parser @@ -139,13 +138,16 @@ def take_action(self, parsed_args): client.delete_metering_label(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete meter with " - "ID '%(meter)s': %(e)s"), - {"meter": meter, "e": e}) + LOG.error( + _("Failed to delete meter with ID '%(meter)s': %(e)s"), + {"meter": meter, "e": e}, + ) if result > 0: total = len(parsed_args.meter) - msg = (_("%(result)s of %(total)s meters failed " - "to delete.") % {"result": result, "total": total}) + msg = _("%(result)s of %(total)s meters failed to delete.") % { + "result": result, + "total": total, + } raise exceptions.CommandError(msg) @@ -169,28 +171,33 @@ def take_action(self, parsed_args): ) data = client.metering_labels() - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) class ShowMeter(command.ShowOne): _description = _("Show network meter") def get_parser(self, prog_name): - parser = super(ShowMeter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'meter', - metavar='', - help=_('Meter to display (name or ID)') + 'meter', metavar='', help=_('Meter to display (name or ID)') ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_metering_label(parsed_args.meter, - ignore_missing=False) + obj = client.find_metering_label( + parsed_args.meter, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return display_columns, data diff --git a/openstackclient/network/v2/network_meter_rule.py b/openstackclient/network/v2/network_meter_rule.py index 2c50e5a662..7a7c231dd7 100644 --- a/openstackclient/network/v2/network_meter_rule.py +++ b/openstackclient/network/v2/network_meter_rule.py @@ -14,11 +14,12 @@ """Meter Rule Implementations""" import logging +import typing as ty -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -27,17 +28,14 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) def _get_attrs(client_manager, parsed_args): - attrs = {} + attrs: dict[str, ty.Any] = {} if parsed_args.exclude: attrs['excluded'] = True @@ -71,35 +69,35 @@ class CreateMeterRule(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create a new meter rule") def get_parser(self, prog_name): - parser = super(CreateMeterRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) exclude_group = parser.add_mutually_exclusive_group() exclude_group.add_argument( '--exclude', action='store_true', - help=_("Exclude remote IP prefix from traffic count") + help=_("Exclude remote IP prefix from traffic count"), ) exclude_group.add_argument( '--include', action='store_true', - help=_("Include remote IP prefix from traffic count (default)") + help=_("Include remote IP prefix from traffic count (default)"), ) direction_group = parser.add_mutually_exclusive_group() direction_group.add_argument( '--ingress', action='store_true', - help=_("Apply rule to incoming network traffic (default)") + help=_("Apply rule to incoming network traffic (default)"), ) direction_group.add_argument( '--egress', action='store_true', - help=_('Apply rule to outgoing network traffic') + help=_('Apply rule to outgoing network traffic'), ) parser.add_argument( '--remote-ip-prefix', @@ -129,12 +127,14 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network - _meter = client.find_metering_label(parsed_args.meter, - ignore_missing=False) + _meter = client.find_metering_label( + parsed_args.meter, ignore_missing=False + ) parsed_args.meter = _meter.id attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_metering_label_rule(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) @@ -146,13 +146,13 @@ class DeleteMeterRule(command.Command): _description = _("Delete meter rule(s)") def get_parser(self, prog_name): - parser = super(DeleteMeterRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'meter_rule_id', metavar='', nargs='+', - help=_('Meter rule to delete (ID only)') + help=_('Meter rule to delete (ID only)'), ) return parser @@ -167,14 +167,16 @@ def take_action(self, parsed_args): client.delete_metering_label_rule(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete meter rule with " - "ID '%(id)s': %(e)s"), - {"id": id, "e": e}) + LOG.error( + _("Failed to delete meter rule with ID '%(id)s': %(e)s"), + {"id": id, "e": e}, + ) if result > 0: total = len(parsed_args.meter_rule_id) - msg = (_("%(result)s of %(total)s meter rules failed " - "to delete.") % {"result": result, "total": total}) + msg = _( + "%(result)s of %(total)s meter rules failed to delete." + ) % {"result": result, "total": total} raise exceptions.CommandError(msg) @@ -201,28 +203,35 @@ def take_action(self, parsed_args): 'Destination IP Prefix', ) data = client.metering_label_rules() - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) class ShowMeterRule(command.ShowOne): _description = _("Display meter rules details") def get_parser(self, prog_name): - parser = super(ShowMeterRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'meter_rule_id', metavar='', - help=_('Meter rule (ID only)') + help=_('Meter rule (ID only)'), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_metering_label_rule(parsed_args.meter_rule_id, - ignore_missing=False) + obj = client.find_metering_label_rule( + parsed_args.meter_rule_id, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return display_columns, data diff --git a/openstackclient/network/v2/network_qos_policy.py b/openstackclient/network/v2/network_qos_policy.py index d77e5db938..ab620b8490 100644 --- a/openstackclient/network/v2/network_qos_policy.py +++ b/openstackclient/network/v2/network_qos_policy.py @@ -14,11 +14,13 @@ # under the License. import logging +import typing as ty -from osc_lib.command import command +from cliff import columns as cliff_columns from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -26,15 +28,23 @@ LOG = logging.getLogger(__name__) +class RulesColumn(cliff_columns.FormattableColumn[ty.Any]): + def human_readable(self): + return '\n'.join(str(v) for v in self._value) + + +_formatters = { + 'rules': RulesColumn, +} + + def _get_columns(item): column_map = { 'is_shared': 'shared', } hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -70,39 +80,40 @@ def _get_attrs(client_manager, parsed_args): # TODO(abhiraut): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. -class CreateNetworkQosPolicy(command.ShowOne, - common.NeutronCommandWithExtraArgs): +class CreateNetworkQosPolicy( + command.ShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create a QoS policy") def get_parser(self, prog_name): - parser = super(CreateNetworkQosPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_("Name of QoS policy to create") + 'name', metavar='', help=_("Name of QoS policy to create") ) parser.add_argument( '--description', metavar='', - help=_("Description of the QoS policy") + help=_("Description of the QoS policy"), ) share_group = parser.add_mutually_exclusive_group() share_group.add_argument( '--share', action='store_true', default=None, - help=_("Make the QoS policy accessible by other projects") + help=_("Make the QoS policy accessible by other projects"), ) share_group.add_argument( '--no-share', action='store_true', - help=_("Make the QoS policy not accessible by other projects " - "(default)") + help=_( + "Make the QoS policy not accessible by other projects " + "(default)" + ), ) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) default_group = parser.add_mutually_exclusive_group() @@ -122,7 +133,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_qos_policy(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) @@ -133,12 +145,12 @@ class DeleteNetworkQosPolicy(command.Command): _description = _("Delete Qos Policy(s)") def get_parser(self, prog_name): - parser = super(DeleteNetworkQosPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'policy', metavar="", nargs="+", - help=_("QoS policy(s) to delete (name or ID)") + help=_("QoS policy(s) to delete (name or ID)"), ) return parser @@ -152,14 +164,19 @@ def take_action(self, parsed_args): client.delete_qos_policy(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete QoS policy " - "name or ID '%(qos_policy)s': %(e)s"), - {'qos_policy': policy, 'e': e}) + LOG.error( + _( + "Failed to delete QoS policy " + "name or ID '%(qos_policy)s': %(e)s" + ), + {'qos_policy': policy, 'e': e}, + ) if result > 0: total = len(parsed_args.policy) - msg = (_("%(result)s of %(total)s QoS policies failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s QoS policies failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -169,23 +186,26 @@ class ListNetworkQosPolicy(command.Lister): _description = _("List QoS policies") def get_parser(self, prog_name): - parser = super(ListNetworkQosPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', - help=_("List qos policies according to their project (name or ID)") + help=_( + "List only QoS policies with the specified project " + "(name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) shared_group = parser.add_mutually_exclusive_group() shared_group.add_argument( '--share', action='store_true', - help=_("List qos policies shared between projects") + help=_("List only QoS policies shared between projects"), ) shared_group.add_argument( '--no-share', action='store_true', - help=_("List qos policies not shared between projects") + help=_("List only QoS policies not shared between projects"), ) return parser @@ -207,10 +227,17 @@ def take_action(self, parsed_args): ) attrs = _get_attrs(self.app.client_manager, parsed_args) data = client.qos_policies(**attrs) - return (column_headers, - (utils.get_item_properties( - s, columns, formatters={}, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), + ) # TODO(abhiraut): Use the SDK resource mapped attribute names once the @@ -219,21 +246,19 @@ class SetNetworkQosPolicy(common.NeutronCommandWithExtraArgs): _description = _("Set QoS policy properties") def get_parser(self, prog_name): - parser = super(SetNetworkQosPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'policy', metavar="", - help=_("QoS policy to modify (name or ID)") + help=_("QoS policy to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar="", - help=_('Set QoS policy name') + '--name', metavar="", help=_('Set QoS policy name') ) parser.add_argument( '--description', metavar='', - help=_("Description of the QoS policy") + help=_("Description of the QoS policy"), ) enable_group = parser.add_mutually_exclusive_group() enable_group.add_argument( @@ -261,12 +286,11 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_qos_policy( - parsed_args.policy, - ignore_missing=False) + obj = client.find_qos_policy(parsed_args.policy, ignore_missing=False) attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_qos_policy(obj, **attrs) @@ -274,18 +298,17 @@ class ShowNetworkQosPolicy(command.ShowOne): _description = _("Display QoS policy details") def get_parser(self, prog_name): - parser = super(ShowNetworkQosPolicy, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'policy', metavar="", - help=_("QoS policy to display (name or ID)") + help=_("QoS policy to display (name or ID)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_qos_policy(parsed_args.policy, - ignore_missing=False) + obj = client.find_qos_policy(parsed_args.policy, ignore_missing=False) display_columns, columns = _get_columns(obj) - data = utils.get_item_properties(obj, columns) + data = utils.get_item_properties(obj, columns, formatters=_formatters) return (display_columns, data) diff --git a/openstackclient/network/v2/network_qos_rule.py b/openstackclient/network/v2/network_qos_rule.py index cb2d23398e..b2ddd823ab 100644 --- a/openstackclient/network/v2/network_qos_rule.py +++ b/openstackclient/network/v2/network_qos_rule.py @@ -15,10 +15,10 @@ import itertools -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.network import common @@ -30,17 +30,40 @@ RULE_TYPE_MINIMUM_BANDWIDTH: {'min_kbps', 'direction'}, RULE_TYPE_MINIMUM_PACKET_RATE: {'min_kpps', 'direction'}, RULE_TYPE_DSCP_MARKING: {'dscp_mark'}, - RULE_TYPE_BANDWIDTH_LIMIT: {'max_kbps'}} + RULE_TYPE_BANDWIDTH_LIMIT: {'max_kbps'}, +} OPTIONAL_PARAMETERS = { RULE_TYPE_MINIMUM_BANDWIDTH: set(), RULE_TYPE_MINIMUM_PACKET_RATE: set(), RULE_TYPE_DSCP_MARKING: set(), - RULE_TYPE_BANDWIDTH_LIMIT: {'direction', 'max_burst_kbps'}} + RULE_TYPE_BANDWIDTH_LIMIT: {'direction', 'max_burst_kbps'}, +} DIRECTION_EGRESS = 'egress' DIRECTION_INGRESS = 'ingress' DIRECTION_ANY = 'any' -DSCP_VALID_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, - 34, 36, 38, 40, 46, 48, 56] +DSCP_VALID_MARKS = [ + 0, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 46, + 48, + 56, +] ACTION_CREATE = 'create' ACTION_DELETE = 'delete' @@ -50,12 +73,9 @@ def _get_columns(item): - column_map = {} - hidden_columns = ['location', 'tenant_id'] + hidden_columns = ['location', 'name', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) @@ -63,31 +83,38 @@ def _check_type_parameters(attrs, type, is_create): req_params = MANDATORY_PARAMETERS[type] opt_params = OPTIONAL_PARAMETERS[type] type_params = req_params | opt_params - notreq_params = set(itertools.chain( - *[v for k, v in MANDATORY_PARAMETERS.items() if k != type])) + notreq_params = set( + itertools.chain( + *[v for k, v in MANDATORY_PARAMETERS.items() if k != type] + ) + ) notreq_params -= type_params if is_create and None in map(attrs.get, req_params): - msg = (_('"Create" rule command for type "%(rule_type)s" requires ' - 'arguments: %(args)s') % - {'rule_type': type, 'args': ", ".join(sorted(req_params))}) + msg = _( + '"Create" rule command for type "%(rule_type)s" requires ' + 'arguments: %(args)s' + ) % {'rule_type': type, 'args': ", ".join(sorted(req_params))} raise exceptions.CommandError(msg) if set(attrs.keys()) & notreq_params: - msg = (_('Rule type "%(rule_type)s" only requires arguments: %(args)s') - % {'rule_type': type, 'args': ", ".join(sorted(type_params))}) + msg = _( + 'Rule type "%(rule_type)s" only requires arguments: %(args)s' + ) % {'rule_type': type, 'args': ", ".join(sorted(type_params))} raise exceptions.CommandError(msg) def _get_attrs(network_client, parsed_args, is_create=False): attrs = {} - qos = network_client.find_qos_policy(parsed_args.qos_policy, - ignore_missing=False) + qos = network_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ) attrs['qos_policy_id'] = qos.id if not is_create: attrs['id'] = parsed_args.id rule_type = _find_rule_type(qos, parsed_args.id) if not rule_type: - msg = (_('Rule ID %(rule_id)s not found') % - {'rule_id': parsed_args.id}) + msg = _('Rule ID %(rule_id)s not found') % { + 'rule_id': parsed_args.id + } raise exceptions.CommandError(msg) else: rule_type = parsed_args.type @@ -112,26 +139,18 @@ def _get_attrs(network_client, parsed_args, is_create=False): if rule_type == RULE_TYPE_MINIMUM_PACKET_RATE: attrs['direction'] = DIRECTION_ANY else: - msg = (_('Direction "any" can only be used with ' - '%(rule_type_min_pps)s rule type') % - {'rule_type_min_pps': RULE_TYPE_MINIMUM_PACKET_RATE}) + msg = _( + 'Direction "any" can only be used with ' + '%(rule_type_min_pps)s rule type' + ) % {'rule_type_min_pps': RULE_TYPE_MINIMUM_PACKET_RATE} raise exceptions.CommandError(msg) _check_type_parameters(attrs, rule_type, is_create) return attrs -def _get_item_properties(item, fields): - """Return a tuple containing the item properties.""" - row = [] - for field in fields: - row.append(item.get(field, '')) - return tuple(row) - - def _rule_action_call(client, action, rule_type): rule_type = rule_type.replace('-', '_') - func_name = '%(action)s_qos_%(rule_type)s_rule' % {'action': action, - 'rule_type': rule_type} + func_name = f'{action}_qos_{rule_type}_rule' return getattr(client, func_name) @@ -147,81 +166,91 @@ def _add_rule_arguments(parser): dest='max_kbps', metavar='', type=int, - help=_('Maximum bandwidth in kbps') + help=_('Maximum bandwidth in kbps'), ) parser.add_argument( '--max-burst-kbits', dest='max_burst_kbits', metavar='', type=int, - help=_('Maximum burst in kilobits, 0 or not specified means ' - 'automatic, which is 80%% of the bandwidth limit, which works ' - 'for typical TCP traffic. For details check the QoS user ' - 'workflow.') + help=_( + 'Maximum burst in kilobits, 0 or not specified means ' + 'automatic, which is 80%% of the bandwidth limit, which works ' + 'for typical TCP traffic. For details check the QoS user ' + 'workflow.' + ), ) parser.add_argument( '--dscp-mark', dest='dscp_mark', metavar='', type=int, - help=_('DSCP mark: value can be 0, even numbers from 8-56, ' - 'excluding 42, 44, 50, 52, and 54') + help=_( + 'DSCP mark: value can be 0, even numbers from 8-56, ' + 'excluding 42, 44, 50, 52, and 54' + ), ) parser.add_argument( '--min-kbps', dest='min_kbps', metavar='', type=int, - help=_('Minimum guaranteed bandwidth in kbps') + help=_('Minimum guaranteed bandwidth in kbps'), ) parser.add_argument( '--min-kpps', dest='min_kpps', metavar='', type=int, - help=_('Minimum guaranteed packet rate in kpps') + help=_('Minimum guaranteed packet rate in kpps'), ) direction_group = parser.add_mutually_exclusive_group() direction_group.add_argument( '--ingress', action='store_true', - help=_("Ingress traffic direction from the project point of view") + help=_("Ingress traffic direction from the project point of view"), ) direction_group.add_argument( '--egress', action='store_true', - help=_("Egress traffic direction from the project point of view") + help=_("Egress traffic direction from the project point of view"), ) direction_group.add_argument( '--any', action='store_true', - help=_("Any traffic direction from the project point of view. Can be " - "used only with minimum packet rate rule.") + help=_( + "Any traffic direction from the project point of view. Can be " + "used only with minimum packet rate rule." + ), ) -class CreateNetworkQosRule(command.ShowOne, - common.NeutronCommandWithExtraArgs): +class CreateNetworkQosRule( + command.ShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create new Network QoS rule") def get_parser(self, prog_name): - parser = super(CreateNetworkQosRule, self).get_parser( - prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_policy', metavar='', - help=_('QoS policy that contains the rule (name or ID)') + help=_('QoS policy that contains the rule (name or ID)'), ) parser.add_argument( '--type', metavar='', required=True, - choices=[RULE_TYPE_MINIMUM_BANDWIDTH, - RULE_TYPE_MINIMUM_PACKET_RATE, - RULE_TYPE_DSCP_MARKING, - RULE_TYPE_BANDWIDTH_LIMIT], - help=(_('QoS rule type (%s)') % - ", ".join(MANDATORY_PARAMETERS.keys())) + choices=[ + RULE_TYPE_MINIMUM_BANDWIDTH, + RULE_TYPE_MINIMUM_PACKET_RATE, + RULE_TYPE_DSCP_MARKING, + RULE_TYPE_BANDWIDTH_LIMIT, + ], + help=( + _('QoS rule type (%s)') + % ", ".join(MANDATORY_PARAMETERS.keys()) + ), ) _add_rule_arguments(parser) return parser @@ -231,12 +260,13 @@ def take_action(self, parsed_args): try: attrs = _get_attrs(network_client, parsed_args, is_create=True) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = _rule_action_call( - network_client, ACTION_CREATE, parsed_args.type)( - attrs.pop('qos_policy_id'), **attrs) + network_client, ACTION_CREATE, parsed_args.type + )(attrs.pop('qos_policy_id'), **attrs) except Exception as e: - msg = (_('Failed to create Network QoS rule: %(e)s') % {'e': e}) + msg = _('Failed to create Network QoS rule: %(e)s') % {'e': e} raise exceptions.CommandError(msg) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) @@ -247,16 +277,16 @@ class DeleteNetworkQosRule(command.Command): _description = _("Delete Network QoS rule") def get_parser(self, prog_name): - parser = super(DeleteNetworkQosRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_policy', metavar='', - help=_('QoS policy that contains the rule (name or ID)') + help=_('QoS policy that contains the rule (name or ID)'), ) parser.add_argument( 'id', metavar='', - help=_('Network QoS rule to delete (ID)') + help=_('Network QoS rule to delete (ID)'), ) return parser @@ -264,16 +294,19 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network rule_id = parsed_args.id try: - qos = network_client.find_qos_policy(parsed_args.qos_policy, - ignore_missing=False) + qos = network_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ) rule_type = _find_rule_type(qos, rule_id) if not rule_type: - raise Exception('Rule %s not found' % rule_id) + raise Exception(f'Rule {rule_id} not found') _rule_action_call(network_client, ACTION_DELETE, rule_type)( - rule_id, qos.id) + rule_id, qos.id + ) except Exception as e: - msg = (_('Failed to delete Network QoS rule ID "%(rule)s": %(e)s') - % {'rule': rule_id, 'e': e}) + msg = _( + 'Failed to delete Network QoS rule ID "%(rule)s": %(e)s' + ) % {'rule': rule_id, 'e': e} raise exceptions.CommandError(msg) @@ -281,11 +314,11 @@ class ListNetworkQosRule(command.Lister): _description = _("List Network QoS rules") def get_parser(self, prog_name): - parser = super(ListNetworkQosRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_policy', metavar='', - help=_('QoS policy that contains the rule (name or ID)') + help=_('QoS policy that contains the rule (name or ID)'), ) return parser @@ -313,27 +346,30 @@ def take_action(self, parsed_args): 'DSCP mark', 'Direction', ) - qos = client.find_qos_policy(parsed_args.qos_policy, - ignore_missing=False) - data = qos.rules - return (column_headers, - (_get_item_properties(s, columns) for s in data)) + qos = client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ) + + return ( + column_headers, + (utils.get_dict_properties(s, columns) for s in qos.rules), + ) class SetNetworkQosRule(common.NeutronCommandWithExtraArgs): _description = _("Set Network QoS rule properties") def get_parser(self, prog_name): - parser = super(SetNetworkQosRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_policy', metavar='', - help=_('QoS policy that contains the rule (name or ID)') + help=_('QoS policy that contains the rule (name or ID)'), ) parser.add_argument( 'id', metavar='', - help=_('Network QoS rule to delete (ID)') + help=_('Network QoS rule to set (ID)'), ) _add_rule_arguments(parser) return parser @@ -341,22 +377,28 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): network_client = self.app.client_manager.network try: - qos = network_client.find_qos_policy(parsed_args.qos_policy, - ignore_missing=False) + qos = network_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ) rule_type = _find_rule_type(qos, parsed_args.id) if not rule_type: raise Exception('Rule not found') attrs = _get_attrs(network_client, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) qos_id = attrs.pop('qos_policy_id') - qos_rule = _rule_action_call(network_client, ACTION_FIND, - rule_type)(attrs.pop('id'), qos_id) + qos_rule = _rule_action_call( + network_client, ACTION_FIND, rule_type + )(attrs.pop('id'), qos_id) _rule_action_call(network_client, ACTION_SET, rule_type)( - qos_rule, qos_id, **attrs) + qos_rule, qos_id, **attrs + ) except Exception as e: - msg = (_('Failed to set Network QoS rule ID "%(rule)s": %(e)s') % - {'rule': parsed_args.id, 'e': e}) + msg = _('Failed to set Network QoS rule ID "%(rule)s": %(e)s') % { + 'rule': parsed_args.id, + 'e': e, + } raise exceptions.CommandError(msg) @@ -364,16 +406,16 @@ class ShowNetworkQosRule(command.ShowOne): _description = _("Display Network QoS rule details") def get_parser(self, prog_name): - parser = super(ShowNetworkQosRule, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_policy', metavar='', - help=_('QoS policy that contains the rule (name or ID)') + help=_('QoS policy that contains the rule (name or ID)'), ) parser.add_argument( 'id', metavar='', - help=_('Network QoS rule to delete (ID)') + help=_('Network QoS rule to show (ID)'), ) return parser @@ -381,16 +423,20 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network rule_id = parsed_args.id try: - qos = network_client.find_qos_policy(parsed_args.qos_policy, - ignore_missing=False) + qos = network_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ) rule_type = _find_rule_type(qos, rule_id) if not rule_type: raise Exception('Rule not found') obj = _rule_action_call(network_client, ACTION_SHOW, rule_type)( - rule_id, qos.id) + rule_id, qos.id + ) except Exception as e: - msg = (_('Failed to set Network QoS rule ID "%(rule)s": %(e)s') % - {'rule': rule_id, 'e': e}) + msg = _('Failed to show Network QoS rule ID "%(rule)s": %(e)s') % { + 'rule': rule_id, + 'e': e, + } raise exceptions.CommandError(msg) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) diff --git a/openstackclient/network/v2/network_qos_rule_type.py b/openstackclient/network/v2/network_qos_rule_type.py index 3f4f6a198b..c79c5fe017 100644 --- a/openstackclient/network/v2/network_qos_rule_type.py +++ b/openstackclient/network/v2/network_qos_rule_type.py @@ -13,9 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -26,7 +26,8 @@ def _get_columns(item): } hidden_columns = ["id", "location", "name", 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, hidden_columns) + item, column_map, hidden_columns + ) class ListNetworkQosRuleType(command.Lister): @@ -38,25 +39,25 @@ def get_parser(self, prog_name): supported.add_argument( '--all-supported', action='store_true', - help=_("List all the QoS rule types supported by any loaded " - "mechanism drivers (the union of all sets of supported " - "rules)") + help=_( + "List all the QoS rule types supported by any loaded " + "mechanism drivers (the union of all sets of supported " + "rules)" + ), ) supported.add_argument( '--all-rules', action='store_true', - help=_("List all QoS rule types implemented in Neutron QoS driver") + help=_( + "List all QoS rule types implemented in Neutron QoS driver" + ), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - columns = ( - 'type', - ) - column_headers = ( - 'Type', - ) + columns = ('type',) + column_headers = ('Type',) args = {} if parsed_args.all_supported: @@ -65,21 +66,28 @@ def take_action(self, parsed_args): args['all_rules'] = True data = client.qos_rule_types(**args) - return (column_headers, - (utils.get_item_properties( - s, columns, formatters={}, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={}, + ) + for s in data + ), + ) class ShowNetworkQosRuleType(command.ShowOne): _description = _("Show details about supported QoS rule type") def get_parser(self, prog_name): - parser = super(ShowNetworkQosRuleType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'rule_type', metavar="", - help=_("Name of QoS rule type") + help=_("Name of QoS rule type"), ) return parser diff --git a/openstackclient/network/v2/network_rbac.py b/openstackclient/network/v2/network_rbac.py index fa4fca7c40..200175ac94 100644 --- a/openstackclient/network/v2/network_rbac.py +++ b/openstackclient/network/v2/network_rbac.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -32,9 +32,7 @@ def _get_columns(item): } hidden_columns = ['location', 'name', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -46,27 +44,28 @@ def _get_attrs(client_manager, parsed_args): network_client = client_manager.network if parsed_args.type == 'network': object_id = network_client.find_network( - parsed_args.rbac_object, ignore_missing=False).id + parsed_args.rbac_object, ignore_missing=False + ).id if parsed_args.type == 'qos_policy': object_id = network_client.find_qos_policy( - parsed_args.rbac_object, - ignore_missing=False).id + parsed_args.rbac_object, ignore_missing=False + ).id if parsed_args.type == 'security_group': object_id = network_client.find_security_group( - parsed_args.rbac_object, - ignore_missing=False).id + parsed_args.rbac_object, ignore_missing=False + ).id if parsed_args.type == 'address_scope': object_id = network_client.find_address_scope( - parsed_args.rbac_object, - ignore_missing=False).id + parsed_args.rbac_object, ignore_missing=False + ).id if parsed_args.type == 'subnetpool': object_id = network_client.find_subnet_pool( - parsed_args.rbac_object, - ignore_missing=False).id + parsed_args.rbac_object, ignore_missing=False + ).id if parsed_args.type == 'address_group': object_id = network_client.find_address_group( - parsed_args.rbac_object, - ignore_missing=False).id + parsed_args.rbac_object, ignore_missing=False + ).id attrs['object_id'] = object_id @@ -97,55 +96,72 @@ class CreateNetworkRBAC(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create network RBAC policy") def get_parser(self, prog_name): - parser = super(CreateNetworkRBAC, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'rbac_object', metavar="", - help=_("The object to which this RBAC policy affects (name or ID)") + help=_( + "The object to which this RBAC policy affects (name or ID)" + ), ) parser.add_argument( '--type', metavar="", required=True, - choices=['address_group', 'address_scope', 'security_group', - 'subnetpool', 'qos_policy', 'network'], - help=_('Type of the object that RBAC policy ' - 'affects ("address_group", "address_scope", ' - '"security_group", "subnetpool", "qos_policy" or ' - '"network")') + choices=[ + 'address_group', + 'address_scope', + 'security_group', + 'subnetpool', + 'qos_policy', + 'network', + ], + help=_( + 'Type of the object that RBAC policy ' + 'affects ("address_group", "address_scope", ' + '"security_group", "subnetpool", "qos_policy" or ' + '"network")' + ), ) parser.add_argument( '--action', metavar="", required=True, choices=['access_as_external', 'access_as_shared'], - help=_('Action for the RBAC policy ' - '("access_as_external" or "access_as_shared")') + help=_( + 'Action for the RBAC policy ' + '("access_as_external" or "access_as_shared")' + ), ) target_project_group = parser.add_mutually_exclusive_group( - required=True) + required=True + ) target_project_group.add_argument( '--target-project', metavar="", - help=_('The project to which the RBAC policy ' - 'will be enforced (name or ID)') + help=_( + 'The project to which the RBAC policy ' + 'will be enforced (name or ID)' + ), ) target_project_group.add_argument( '--target-all-projects', action='store_true', - help=_('Allow creating RBAC policy for all projects.') + help=_('Allow creating RBAC policy for all projects'), ) parser.add_argument( '--target-project-domain', metavar='', - help=_('Domain the target project belongs to (name or ID). ' - 'This can be used in case collisions between project names ' - 'exist.'), + help=_( + 'Domain the target project belongs to (name or ID). ' + 'This can be used in case collisions between project names ' + 'exist.' + ), ) parser.add_argument( '--project', metavar="", - help=_('The owner project (name or ID)') + help=_('The owner project (name or ID)'), ) identity_common.add_project_domain_option_to_parser(parser) return parser @@ -154,7 +170,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_rbac_policy(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) @@ -165,12 +182,12 @@ class DeleteNetworkRBAC(command.Command): _description = _("Delete network RBAC policy(s)") def get_parser(self, prog_name): - parser = super(DeleteNetworkRBAC, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'rbac_policy', metavar="", nargs='+', - help=_("RBAC policy(s) to delete (ID only)") + help=_("RBAC policy(s) to delete (ID only)"), ) return parser @@ -184,14 +201,19 @@ def take_action(self, parsed_args): client.delete_rbac_policy(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete RBAC policy with " - "ID '%(rbac)s': %(e)s"), - {'rbac': rbac, 'e': e}) + LOG.error( + _( + "Failed to delete RBAC policy with " + "ID '%(rbac)s': %(e)s" + ), + {'rbac': rbac, 'e': e}, + ) if result > 0: total = len(parsed_args.rbac_policy) - msg = (_("%(result)s of %(total)s RBAC policies failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s RBAC policies failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -199,46 +221,59 @@ class ListNetworkRBAC(command.Lister): _description = _("List network RBAC policies") def get_parser(self, prog_name): - parser = super(ListNetworkRBAC, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--type', metavar='', - choices=['address_group', 'address_scope', 'security_group', - 'subnetpool', 'qos_policy', 'network'], - help=_('List network RBAC policies according to ' - 'given object type ("address_group", "address_scope", ' - '"security_group", "subnetpool", "qos_policy" or ' - '"network")') + choices=[ + 'address_group', + 'address_scope', + 'security_group', + 'subnetpool', + 'qos_policy', + 'network', + ], + help=_( + 'List only network RBAC policies with the specified ' + 'object type ("address_group", "address_scope", ' + '"security_group", "subnetpool", "qos_policy" or ' + '"network")' + ), ) parser.add_argument( '--action', metavar='', choices=['access_as_external', 'access_as_shared'], - help=_('List network RBAC policies according to given ' - 'action ("access_as_external" or "access_as_shared")') + help=_( + 'List only network RBAC policies with the specified ' + 'action ("access_as_external" or "access_as_shared")' + ), ) parser.add_argument( '--target-project', metavar='', - help=_('List network RBAC policies for a specific target project') + help=_( + 'List only network RBAC policies with the specified ' + 'target project (name or ID)' + ), ) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - columns = ( + columns: tuple[str, ...] = ( 'id', 'object_type', 'object_id', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Object Type', 'Object ID', @@ -265,10 +300,16 @@ def take_action(self, parsed_args): data = client.rbac_policies(**query) - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) # TODO(abhiraut): Use the SDK resource mapped attribute names once the @@ -277,31 +318,36 @@ class SetNetworkRBAC(common.NeutronCommandWithExtraArgs): _description = _("Set network RBAC policy properties") def get_parser(self, prog_name): - parser = super(SetNetworkRBAC, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'rbac_policy', metavar="", - help=_("RBAC policy to be modified (ID only)") + help=_("RBAC policy to be modified (ID only)"), ) parser.add_argument( '--target-project', metavar="", - help=_('The project to which the RBAC policy ' - 'will be enforced (name or ID)') + help=_( + 'The project to which the RBAC policy ' + 'will be enforced (name or ID)' + ), ) parser.add_argument( '--target-project-domain', metavar='', - help=_('Domain the target project belongs to (name or ID). ' - 'This can be used in case collisions between project names ' - 'exist.'), + help=_( + 'Domain the target project belongs to (name or ID). ' + 'This can be used in case collisions between project names ' + 'exist.' + ), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_rbac_policy(parsed_args.rbac_policy, - ignore_missing=False) + obj = client.find_rbac_policy( + parsed_args.rbac_policy, ignore_missing=False + ) attrs = {} if parsed_args.target_project: identity_client = self.app.client_manager.identity @@ -312,7 +358,8 @@ def take_action(self, parsed_args): ).id attrs['target_tenant'] = project_id attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_rbac_policy(obj, **attrs) @@ -320,18 +367,19 @@ class ShowNetworkRBAC(command.ShowOne): _description = _("Display network RBAC policy details") def get_parser(self, prog_name): - parser = super(ShowNetworkRBAC, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'rbac_policy', metavar="", - help=_("RBAC policy (ID only)") + help=_("RBAC policy (ID only)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_rbac_policy(parsed_args.rbac_policy, - ignore_missing=False) + obj = client.find_rbac_policy( + parsed_args.rbac_policy, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return display_columns, data diff --git a/openstackclient/network/v2/network_segment.py b/openstackclient/network/v2/network_segment.py index c6c88e30c9..c2e7becfa8 100644 --- a/openstackclient/network/v2/network_segment.py +++ b/openstackclient/network/v2/network_segment.py @@ -15,10 +15,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.network import common @@ -26,25 +26,21 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) -class CreateNetworkSegment(command.ShowOne, - common.NeutronCommandWithExtraArgs): +class CreateNetworkSegment( + command.ShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create new network segment") def get_parser(self, prog_name): - parser = super(CreateNetworkSegment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_('New network segment name') + 'name', metavar='', help=_('New network segment name') ) parser.add_argument( '--description', @@ -60,10 +56,12 @@ def get_parser(self, prog_name): '--segment', metavar='', type=int, - help=_('Segment identifier for this network segment which is ' - 'based on the network type, VLAN ID for vlan network ' - 'type and tunnel ID for geneve, gre and vxlan network ' - 'types'), + help=_( + 'Segment identifier for this network segment which is ' + 'based on the network type, VLAN ID for vlan network ' + 'type and tunnel ID for geneve, gre and vxlan network ' + 'types' + ), ) parser.add_argument( '--network', @@ -76,8 +74,10 @@ def get_parser(self, prog_name): metavar='', choices=['flat', 'geneve', 'gre', 'local', 'vlan', 'vxlan'], required=True, - help=_('Network type of this network segment ' - '(flat, geneve, gre, local, vlan or vxlan)'), + help=_( + 'Network type of this network segment ' + '(flat, geneve, gre, local, vlan or vxlan)' + ), ) return parser @@ -85,8 +85,9 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = {} attrs['name'] = parsed_args.name - attrs['network_id'] = client.find_network(parsed_args.network, - ignore_missing=False).id + attrs['network_id'] = client.find_network( + parsed_args.network, ignore_missing=False + ).id attrs['network_type'] = parsed_args.network_type if parsed_args.description is not None: attrs['description'] = parsed_args.description @@ -95,7 +96,8 @@ def take_action(self, parsed_args): if parsed_args.segment is not None: attrs['segmentation_id'] = parsed_args.segment attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_segment(**attrs) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) @@ -106,7 +108,7 @@ class DeleteNetworkSegment(command.Command): _description = _("Delete network segment(s)") def get_parser(self, prog_name): - parser = super(DeleteNetworkSegment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_segment', metavar='', @@ -121,19 +123,25 @@ def take_action(self, parsed_args): result = 0 for network_segment in parsed_args.network_segment: try: - obj = client.find_segment(network_segment, - ignore_missing=False) + obj = client.find_segment( + network_segment, ignore_missing=False + ) client.delete_segment(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete network segment with " - "ID '%(network_segment)s': %(e)s"), - {'network_segment': network_segment, 'e': e}) + LOG.error( + _( + "Failed to delete network segment with " + "ID '%(network_segment)s': %(e)s" + ), + {'network_segment': network_segment, 'e': e}, + ) if result > 0: total = len(parsed_args.network_segment) - msg = (_("%(result)s of %(total)s network segments failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s network segments failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -141,7 +149,7 @@ class ListNetworkSegment(command.Lister): _description = _("List network segments") def get_parser(self, prog_name): - parser = super(ListNetworkSegment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -151,8 +159,10 @@ def get_parser(self, prog_name): parser.add_argument( '--network', metavar='', - help=_('List network segments that belong to this ' - 'network (name or ID)'), + help=_( + 'List only network segments associated with the specified ' + 'network (name or ID)' + ), ) return parser @@ -162,20 +172,19 @@ def take_action(self, parsed_args): filters = {} if parsed_args.network: _network = network_client.find_network( - parsed_args.network, - ignore_missing=False + parsed_args.network, ignore_missing=False ) filters = {'network_id': _network.id} data = network_client.segments(**filters) - headers = ( + headers: tuple[str, ...] = ( 'ID', 'Name', 'Network', 'Network Type', 'Segment', ) - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', 'network_id', @@ -183,25 +192,27 @@ def take_action(self, parsed_args): 'segmentation_id', ) if parsed_args.long: - headers = headers + ( - 'Physical Network', - ) - columns = columns + ( - 'physical_network', - ) - - return (headers, - (utils.get_item_properties( - s, columns, + headers += ('Physical Network',) + columns += ('physical_network',) + + return ( + headers, + ( + utils.get_item_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SetNetworkSegment(common.NeutronCommandWithExtraArgs): _description = _("Set network segment properties") def get_parser(self, prog_name): - parser = super(SetNetworkSegment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_segment', metavar='', @@ -221,15 +232,17 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_segment(parsed_args.network_segment, - ignore_missing=False) + obj = client.find_segment( + parsed_args.network_segment, ignore_missing=False + ) attrs = {} if parsed_args.description is not None: attrs['description'] = parsed_args.description if parsed_args.name is not None: attrs['name'] = parsed_args.name attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.update_segment(obj, **attrs) @@ -237,7 +250,7 @@ class ShowNetworkSegment(command.ShowOne): _description = _("Display network segment details") def get_parser(self, prog_name): - parser = super(ShowNetworkSegment, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_segment', metavar='', @@ -248,8 +261,7 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_segment( - parsed_args.network_segment, - ignore_missing=False + parsed_args.network_segment, ignore_missing=False ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) diff --git a/openstackclient/network/v2/network_segment_range.py b/openstackclient/network/v2/network_segment_range.py index 1291d9d815..c8b9a0e4c4 100644 --- a/openstackclient/network/v2/network_segment_range.py +++ b/openstackclient/network/v2/network_segment_range.py @@ -18,11 +18,12 @@ import itertools import logging +import typing as ty -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -32,21 +33,19 @@ def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) def _get_ranges(item): item = sorted([int(i) for i in item]) for a, b in itertools.groupby(enumerate(item), lambda xy: xy[1] - xy[0]): - b = list(b) - yield "%s-%s" % (b[0][1], b[-1][1]) if b[0][1] != b[-1][1] else \ - str(b[0][1]) + c = list(b) + yield ( + f"{c[0][1]}-{c[-1][1]}" if c[0][1] != c[-1][1] else str(c[0][1]) + ) def _hack_tuple_value_update_by_index(tup, index, value): @@ -60,7 +59,7 @@ def _is_prop_empty(columns, props, prop_name): def _exchange_dict_keys_with_values(orig_dict): - updated_dict = dict() + updated_dict: dict[str, ty.Any] = {} for k, v in orig_dict.items(): k = [k] if not updated_dict.get(v): @@ -73,7 +72,8 @@ def _exchange_dict_keys_with_values(orig_dict): def _update_available_from_props(columns, props): index_available = columns.index('available') props = _hack_tuple_value_update_by_index( - props, index_available, list(_get_ranges(props[index_available]))) + props, index_available, list(_get_ranges(props[index_available])) + ) return props @@ -82,8 +82,7 @@ def _update_used_from_props(columns, props): updated_used = _exchange_dict_keys_with_values(props[index_used]) for k, v in updated_used.items(): updated_used[k] = list(_get_ranges(v)) - props = _hack_tuple_value_update_by_index( - props, index_used, updated_used) + props = _hack_tuple_value_update_by_index(props, index_used, updated_used) return props @@ -93,19 +92,21 @@ def _update_additional_fields_from_props(columns, props): return props -class CreateNetworkSegmentRange(command.ShowOne, - common.NeutronCommandWithExtraArgs): +class CreateNetworkSegmentRange( + command.ShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create new network segment range") def get_parser(self, prog_name): - parser = super(CreateNetworkSegmentRange, self).get_parser(prog_name) + parser = super().get_parser(prog_name) shared_group = parser.add_mutually_exclusive_group() shared_group.add_argument( "--private", dest="private", action="store_true", - help=_('Network segment range is assigned specifically to the ' - 'project'), + help=_( + 'Network segment range is assigned specifically to the project' + ), ) shared_group.add_argument( "--shared", @@ -116,13 +117,15 @@ def get_parser(self, prog_name): parser.add_argument( 'name', metavar='', - help=_('Name of new network segment range') + help=_('Name of new network segment range'), ) parser.add_argument( '--project', metavar='', - help=_('Network segment range owner (name or ID). Optional when ' - 'the segment range is shared'), + help=_( + 'Network segment range owner (name or ID). Optional when ' + 'the segment range is shared.' + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( @@ -130,8 +133,10 @@ def get_parser(self, prog_name): metavar='', choices=['geneve', 'gre', 'vlan', 'vxlan'], required=True, - help=_('Network type of this network segment range ' - '(geneve, gre, vlan or vxlan)'), + help=_( + 'Network type of this network segment range ' + '(geneve, gre, vlan or vxlan)' + ), ) parser.add_argument( '--physical-network', @@ -143,20 +148,24 @@ def get_parser(self, prog_name): metavar='', type=int, required=True, - help=_('Minimum segment identifier for this network segment ' - 'range which is based on the network type, VLAN ID for ' - 'vlan network type and tunnel ID for geneve, gre and vxlan ' - 'network types'), + help=_( + 'Minimum segment identifier for this network segment ' + 'range which is based on the network type, VLAN ID for ' + 'vlan network type and tunnel ID for geneve, gre and vxlan ' + 'network types' + ), ) parser.add_argument( '--maximum', metavar='', type=int, required=True, - help=_('Maximum segment identifier for this network segment ' - 'range which is based on the network type, VLAN ID for ' - 'vlan network type and tunnel ID for geneve, gre and vxlan ' - 'network types'), + help=_( + 'Maximum segment identifier for this network segment ' + 'range which is based on the network type, VLAN ID for ' + 'vlan network type and tunnel ID for geneve, gre and vxlan ' + 'network types' + ), ) return parser @@ -165,11 +174,14 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network try: # Verify that the extension exists. - network_client.find_extension('network-segment-range', - ignore_missing=False) + network_client.find_extension( + 'network-segment-range', ignore_missing=False + ) except Exception as e: - msg = (_('Network segment range create not supported by ' - 'Network API: %(e)s') % {'e': e}) + msg = _( + 'Network segment range create not supported by ' + 'Network API: %(e)s' + ) % {'e': e} raise exceptions.CommandError(msg) identity_client = self.app.client_manager.identity @@ -178,10 +190,13 @@ def take_action(self, parsed_args): msg = _("--project is only allowed with --private") raise exceptions.CommandError(msg) - if (parsed_args.network_type.lower() != 'vlan' and - parsed_args.physical_network): - msg = _("--physical-network is only allowed with --network-type " - "vlan") + if ( + parsed_args.network_type.lower() != 'vlan' + and parsed_args.physical_network + ): + msg = _( + "--physical-network is only allowed with --network-type vlan" + ) raise exceptions.CommandError(msg) attrs = {} @@ -205,8 +220,13 @@ def take_action(self, parsed_args): if project_id: attrs['project_id'] = project_id else: - msg = (_("Failed to create the network segment range for " - "project %(project_id)s") % parsed_args.project_id) + msg = ( + _( + "Failed to create the network segment range for " + "project %(project_id)s" + ) + % parsed_args.project_id + ) raise exceptions.CommandError(msg) elif not attrs['shared']: # default to the current project if no project specified and shared @@ -218,7 +238,8 @@ def take_action(self, parsed_args): attrs['physical_network'] = parsed_args.physical_network attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = network_client.create_network_segment_range(**attrs) display_columns, columns = _get_columns(obj) @@ -231,7 +252,7 @@ class DeleteNetworkSegmentRange(command.Command): _description = _("Delete network segment range(s)") def get_parser(self, prog_name): - parser = super(DeleteNetworkSegmentRange, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_segment_range', metavar='', @@ -244,30 +265,39 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network try: # Verify that the extension exists. - network_client.find_extension('network-segment-range', - ignore_missing=False) + network_client.find_extension( + 'network-segment-range', ignore_missing=False + ) except Exception as e: - msg = (_('Network segment range delete not supported by ' - 'Network API: %(e)s') % {'e': e}) + msg = _( + 'Network segment range delete not supported by ' + 'Network API: %(e)s' + ) % {'e': e} raise exceptions.CommandError(msg) result = 0 for network_segment_range in parsed_args.network_segment_range: try: obj = network_client.find_network_segment_range( - network_segment_range, ignore_missing=False) + network_segment_range, ignore_missing=False + ) network_client.delete_network_segment_range(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete network segment range with " - "ID '%(network_segment_range)s': %(e)s"), - {'network_segment_range': network_segment_range, - 'e': e}) + LOG.error( + _( + "Failed to delete network segment range with " + "ID '%(network_segment_range)s': %(e)s" + ), + {'network_segment_range': network_segment_range, 'e': e}, + ) if result > 0: total = len(parsed_args.network_segment_range) - msg = (_("%(result)s of %(total)s network segment ranges failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s network segment ranges failed " + "to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -275,7 +305,7 @@ class ListNetworkSegmentRange(command.Lister): _description = _("List network segment ranges") def get_parser(self, prog_name): - parser = super(ListNetworkSegmentRange, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', @@ -285,24 +315,32 @@ def get_parser(self, prog_name): used_group.add_argument( '--used', action='store_true', - help=_('List network segment ranges that have segments in use'), + help=_( + 'List only network segment ranges that have segments in use' + ), ) used_group.add_argument( '--unused', action='store_true', - help=_('List network segment ranges that have segments ' - 'not in use'), + help=_( + 'List only network segment ranges that have segments ' + 'not in use' + ), ) available_group = parser.add_mutually_exclusive_group() available_group.add_argument( '--available', action='store_true', - help=_('List network segment ranges that have available segments'), + help=_( + 'List only network segment ranges that have available segments' + ), ) available_group.add_argument( '--unavailable', action='store_true', - help=_('List network segment ranges without available segments'), + help=_( + 'List only network segment ranges without available segments' + ), ) return parser @@ -310,17 +348,19 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network try: # Verify that the extension exists. - network_client.find_extension('network-segment-range', - ignore_missing=False) + network_client.find_extension( + 'network-segment-range', ignore_missing=False + ) except Exception as e: - msg = (_('Network segment ranges list not supported by ' - 'Network API: %(e)s') % {'e': e}) + msg = _( + 'Network segment ranges list not supported by ' + 'Network API: %(e)s' + ) % {'e': e} raise exceptions.CommandError(msg) - filters = {} - data = network_client.network_segment_ranges(**filters) + data = network_client.network_segment_ranges() - headers = ( + headers: tuple[str, ...] = ( 'ID', 'Name', 'Default', @@ -329,9 +369,9 @@ def take_action(self, parsed_args): 'Network Type', 'Physical Network', 'Minimum ID', - 'Maximum ID' + 'Maximum ID', ) - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', 'default', @@ -342,32 +382,39 @@ def take_action(self, parsed_args): 'minimum', 'maximum', ) - if parsed_args.available or parsed_args.unavailable or \ - parsed_args.used or parsed_args.unused: + if ( + parsed_args.available + or parsed_args.unavailable + or parsed_args.used + or parsed_args.unused + ): # If one of `--available`, `--unavailable`, `--used`, # `--unused` is specified, we assume that additional fields # should be listed in output. parsed_args.long = True if parsed_args.long: - headers = headers + ( + headers += ( 'Used', 'Available', ) - columns = columns + ( + columns += ( 'used', 'available', ) - display_props = tuple() + display_props: tuple[ty.Any, ...] = tuple() for s in data: props = utils.get_item_properties(s, columns) - if parsed_args.available and \ - _is_prop_empty(columns, props, 'available') or \ - parsed_args.unavailable and \ - not _is_prop_empty(columns, props, 'available') or \ - parsed_args.used and _is_prop_empty(columns, props, 'used') or \ - parsed_args.unused and \ - not _is_prop_empty(columns, props, 'used'): + if ( + parsed_args.available + and _is_prop_empty(columns, props, 'available') + or parsed_args.unavailable + and not _is_prop_empty(columns, props, 'available') + or parsed_args.used + and _is_prop_empty(columns, props, 'used') + or parsed_args.unused + and not _is_prop_empty(columns, props, 'used') + ): continue if parsed_args.long: props = _update_additional_fields_from_props(columns, props) @@ -380,7 +427,7 @@ class SetNetworkSegmentRange(common.NeutronCommandWithExtraArgs): _description = _("Set network segment range properties") def get_parser(self, prog_name): - parser = super(SetNetworkSegmentRange, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_segment_range', metavar='', @@ -409,20 +456,24 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network try: # Verify that the extension exists. - network_client.find_extension('network-segment-range', - ignore_missing=False) + network_client.find_extension( + 'network-segment-range', ignore_missing=False + ) except Exception as e: - msg = (_('Network segment range set not supported by ' - 'Network API: %(e)s') % {'e': e}) + msg = _( + 'Network segment range set not supported by Network API: %(e)s' + ) % {'e': e} raise exceptions.CommandError(msg) - if (parsed_args.minimum and not parsed_args.maximum) or \ - (parsed_args.maximum and not parsed_args.minimum): + if (parsed_args.minimum and not parsed_args.maximum) or ( + parsed_args.maximum and not parsed_args.minimum + ): msg = _("--minimum and --maximum are both required") raise exceptions.CommandError(msg) obj = network_client.find_network_segment_range( - parsed_args.network_segment_range, ignore_missing=False) + parsed_args.network_segment_range, ignore_missing=False + ) attrs = {} if parsed_args.name: attrs['name'] = parsed_args.name @@ -431,7 +482,8 @@ def take_action(self, parsed_args): if parsed_args.maximum: attrs['maximum'] = parsed_args.maximum attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) network_client.update_network_segment_range(obj, **attrs) @@ -439,7 +491,7 @@ class ShowNetworkSegmentRange(command.ShowOne): _description = _("Display network segment range details") def get_parser(self, prog_name): - parser = super(ShowNetworkSegmentRange, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'network_segment_range', metavar='', @@ -451,16 +503,18 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network try: # Verify that the extension exists. - network_client.find_extension('network-segment-range', - ignore_missing=False) + network_client.find_extension( + 'network-segment-range', ignore_missing=False + ) except Exception as e: - msg = (_('Network segment range show not supported by ' - 'Network API: %(e)s') % {'e': e}) + msg = _( + 'Network segment range show not supported by ' + 'Network API: %(e)s' + ) % {'e': e} raise exceptions.CommandError(msg) obj = network_client.find_network_segment_range( - parsed_args.network_segment_range, - ignore_missing=False + parsed_args.network_segment_range, ignore_missing=False ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) diff --git a/openstackclient/network/v2/network_service_provider.py b/openstackclient/network/v2/network_service_provider.py index 157948cc4b..1433c097ae 100644 --- a/openstackclient/network/v2/network_service_provider.py +++ b/openstackclient/network/v2/network_service_provider.py @@ -13,9 +13,9 @@ """Network Service Providers Implementation""" -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -26,18 +26,24 @@ def take_action(self, parsed_args): client = self.app.client_manager.network columns = ( - 'service_type', - 'name', - 'is_default', + "service_type", + "name", + "is_default", ) column_headers = ( - 'Service Type', - 'Name', - 'Default', + "Service Type", + "Name", + "Default", ) data = client.service_providers() - return(column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) diff --git a/openstackclient/network/v2/network_trunk.py b/openstackclient/network/v2/network_trunk.py index c5f629018e..974a997636 100644 --- a/openstackclient/network/v2/network_trunk.py +++ b/openstackclient/network/v2/network_trunk.py @@ -15,16 +15,18 @@ # """Network trunk and subports action implementations""" + import logging +import typing as ty from cliff import columns as cliff_columns from osc_lib.cli import format_columns from osc_lib.cli import identity as identity_utils from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils as osc_utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) @@ -34,7 +36,7 @@ SUB_PORTS = 'sub_ports' -class AdminStateColumn(cliff_columns.FormattableColumn): +class AdminStateColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return 'UP' if self._value else 'DOWN' @@ -43,57 +45,59 @@ class CreateNetworkTrunk(command.ShowOne): """Create a network trunk for a given project""" def get_parser(self, prog_name): - parser = super(CreateNetworkTrunk, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_("Name of the trunk to create") + 'name', metavar='', help=_("Name of the trunk to create") ) parser.add_argument( '--description', metavar='', - help=_("A description of the trunk") + help=_("A description of the trunk"), ) parser.add_argument( '--parent-port', metavar='', required=True, - help=_("Parent port belonging to this trunk (name or ID)") + help=_("Parent port belonging to this trunk (name or ID)"), ) parser.add_argument( '--subport', metavar='', - action=parseractions.MultiKeyValueAction, dest='add_subports', + action=parseractions.MultiKeyValueAction, + dest='add_subports', optional_keys=['segmentation-id', 'segmentation-type'], required_keys=['port'], - help=_("Subport to add. Subport is of form " - "\'port=,segmentation-type=," - "segmentation-id=\' (--subport) option " - "can be repeated") + help=_( + "Subport to add. Subport is of form " + "'port=,segmentation-type=," + "segmentation-id=' (repeat option " + "to add multiple subports)" + ), ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', default=True, - help=_("Enable trunk (default)") + help=_("Enable trunk (default)"), ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable trunk") + '--disable', action='store_true', help=_("Disable trunk") ) identity_utils.add_project_owner_option_to_parser(parser) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - attrs = _get_attrs_for_trunk(self.app.client_manager, - parsed_args) - obj = client.create_trunk(**attrs) + network_client = self.app.client_manager.network + identity_client = self.app.client_manager.identity + attrs = _get_attrs_for_trunk( + network_client, identity_client, parsed_args + ) + obj = network_client.create_trunk(**attrs) display_columns, columns = _get_columns(obj) - data = osc_utils.get_dict_properties(obj, columns, - formatters=_formatters) + data = osc_utils.get_dict_properties( + obj, columns, formatters=_formatters + ) return display_columns, data @@ -101,31 +105,40 @@ class DeleteNetworkTrunk(command.Command): """Delete a given network trunk""" def get_parser(self, prog_name): - parser = super(DeleteNetworkTrunk, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'trunk', metavar="", nargs="+", - help=_("Trunk(s) to delete (name or ID)") + help=_("Trunk(s) to delete (name or ID)"), ) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network + network_client = self.app.client_manager.network result = 0 for trunk in parsed_args.trunk: try: - trunk_id = client.find_trunk(trunk).id - client.delete_trunk(trunk_id) + trunk_id = network_client.find_trunk( + trunk, + ignore_missing=False, + ).id + network_client.delete_trunk(trunk_id) except Exception as e: result += 1 - LOG.error(_("Failed to delete trunk with name " - "or ID '%(trunk)s': %(e)s"), - {'trunk': trunk, 'e': e}) + LOG.error( + _( + "Failed to delete trunk with name " + "or ID '%(trunk)s': %(e)s" + ), + {'trunk': trunk, 'e': e}, + ) if result > 0: total = len(parsed_args.trunk) - msg = (_("%(result)s of %(total)s trunks failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s trunks failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -133,30 +146,20 @@ class ListNetworkTrunk(command.Lister): """List all network trunks""" def get_parser(self, prog_name): - parser = super(ListNetworkTrunk, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - data = client.trunks() - headers = ( - 'ID', - 'Name', - 'Parent Port', - 'Description' - ) - columns = ( - 'id', - 'name', - 'port_id', - 'description' - ) + network_client = self.app.client_manager.network + data = network_client.trunks() + headers: tuple[str, ...] = ('ID', 'Name', 'Parent Port', 'Description') + columns: tuple[str, ...] = ('id', 'name', 'port_id', 'description') if parsed_args.long: headers += ( 'Status', @@ -164,102 +167,112 @@ def take_action(self, parsed_args): 'Created At', 'Updated At', ) - columns += ( - 'status', - 'admin_state_up', - 'created_at', - 'updated_at' - ) - return (headers, - (osc_utils.get_item_properties( - s, columns, + columns += ('status', 'admin_state_up', 'created_at', 'updated_at') + return ( + headers, + ( + osc_utils.get_item_properties( + s, + columns, formatters=_formatters, - ) for s in data)) + ) + for s in data + ), + ) class SetNetworkTrunk(command.Command): """Set network trunk properties""" def get_parser(self, prog_name): - parser = super(SetNetworkTrunk, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'trunk', - metavar="", - help=_("Trunk to modify (name or ID)") + 'trunk', metavar="", help=_("Trunk to modify (name or ID)") ) parser.add_argument( - '--name', - metavar="", - help=_("Set trunk name") + '--name', metavar="", help=_("Set trunk name") ) parser.add_argument( '--description', metavar='', - help=_("A description of the trunk") + help=_("A description of the trunk"), ) parser.add_argument( '--subport', metavar='', - action=parseractions.MultiKeyValueAction, dest='set_subports', + action=parseractions.MultiKeyValueAction, + dest='set_subports', optional_keys=['segmentation-id', 'segmentation-type'], required_keys=['port'], - help=_("Subport to add. Subport is of form " - "\'port=,segmentation-type=" - ",segmentation-id=\' (--subport) option " - "can be repeated") + help=_( + "Subport to add. Subport is of form " + "'port=,segmentation-type=," + "segmentation-id=' (repeat option " + "to add multiple subports)" + ), ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( - '--enable', - action='store_true', - help=_("Enable trunk") + '--enable', action='store_true', help=_("Enable trunk") ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable trunk") + '--disable', action='store_true', help=_("Disable trunk") ) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - trunk_id = client.find_trunk(parsed_args.trunk) - attrs = _get_attrs_for_trunk(self.app.client_manager, parsed_args) + network_client = self.app.client_manager.network + identity_client = self.app.client_manager.identity + trunk_id = network_client.find_trunk( + parsed_args.trunk, + ignore_missing=False, + ) + attrs = _get_attrs_for_trunk( + network_client, identity_client, parsed_args + ) try: - client.update_trunk(trunk_id, **attrs) + network_client.update_trunk(trunk_id, **attrs) except Exception as e: - msg = (_("Failed to set trunk '%(t)s': %(e)s") - % {'t': parsed_args.trunk, 'e': e}) + msg = _("Failed to set trunk '%(t)s': %(e)s") % { + 't': parsed_args.trunk, + 'e': e, + } raise exceptions.CommandError(msg) if parsed_args.set_subports: - subport_attrs = _get_attrs_for_subports(self.app.client_manager, - parsed_args) + subport_attrs = _get_attrs_for_subports( + network_client, parsed_args + ) try: - client.add_trunk_subports(trunk_id, subport_attrs) + network_client.add_trunk_subports(trunk_id, subport_attrs) except Exception as e: - msg = (_("Failed to add subports to trunk '%(t)s': %(e)s") - % {'t': parsed_args.trunk, 'e': e}) + msg = _("Failed to add subports to trunk '%(t)s': %(e)s") % { + 't': parsed_args.trunk, + 'e': e, + } raise exceptions.CommandError(msg) class ShowNetworkTrunk(command.ShowOne): """Show information of a given network trunk""" + def get_parser(self, prog_name): - parser = super(ShowNetworkTrunk, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'trunk', - metavar="", - help=_("Trunk to display (name or ID)") + 'trunk', metavar="", help=_("Trunk to display (name or ID)") ) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - trunk_id = client.find_trunk(parsed_args.trunk).id - obj = client.get_trunk(trunk_id) + network_client = self.app.client_manager.network + trunk_id = network_client.find_trunk( + parsed_args.trunk, + ignore_missing=False, + ).id + obj = network_client.get_trunk(trunk_id) display_columns, columns = _get_columns(obj) - data = osc_utils.get_dict_properties(obj, columns, - formatters=_formatters) + data = osc_utils.get_dict_properties( + obj, columns, formatters=_formatters + ) return display_columns, data @@ -267,52 +280,75 @@ class ListNetworkSubport(command.Lister): """List all subports for a given network trunk""" def get_parser(self, prog_name): - parser = super(ListNetworkSubport, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--trunk', required=True, metavar="", - help=_("List subports belonging to this trunk (name or ID)") + help=_("List only subports belonging to this trunk (name or ID)"), ) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - trunk_id = client.find_trunk(parsed_args.trunk) - data = client.get_trunk_subports(trunk_id) - headers = ('Port', 'Segmentation Type', 'Segmentation ID') - columns = ('port_id', 'segmentation_type', 'segmentation_id') - return (headers, - (osc_utils.get_dict_properties( - s, columns, - ) for s in data[SUB_PORTS])) + network_client = self.app.client_manager.network + trunk_id = network_client.find_trunk( + parsed_args.trunk, + ignore_missing=False, + ) + data = network_client.get_trunk_subports(trunk_id) + headers: tuple[str, ...] = ( + 'Port', + 'Segmentation Type', + 'Segmentation ID', + ) + columns: tuple[str, ...] = ( + 'port_id', + 'segmentation_type', + 'segmentation_id', + ) + return ( + headers, + ( + osc_utils.get_dict_properties( + s, + columns, + ) + for s in data[SUB_PORTS] + ), + ) class UnsetNetworkTrunk(command.Command): """Unset subports from a given network trunk""" def get_parser(self, prog_name): - parser = super(UnsetNetworkTrunk, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'trunk', metavar="", - help=_("Unset subports from this trunk (name or ID)") + help=_("Unset subports from this trunk (name or ID)"), ) parser.add_argument( '--subport', metavar="", required=True, - action='append', dest='unset_subports', - help=_("Subport to delete (name or ID of the port) " - "(--subport) option can be repeated") + action='append', + dest='unset_subports', + help=_( + "Subport to unset (name or ID of the port) " + "(repeat option to unset multiple subports)" + ), ) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - attrs = _get_attrs_for_subports(self.app.client_manager, parsed_args) - trunk_id = client.find_trunk(parsed_args.trunk) - client.delete_trunk_subports(trunk_id, attrs) + network_client = self.app.client_manager.network + attrs = _get_attrs_for_subports(network_client, parsed_args) + trunk_id = network_client.find_trunk( + parsed_args.trunk, + ignore_missing=False, + ) + network_client.delete_trunk_subports(trunk_id, attrs) _formatters = { @@ -322,17 +358,14 @@ def take_action(self, parsed_args): def _get_columns(item): - column_map = {} hidden_columns = ['location', 'tenant_id'] return osc_utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) -def _get_attrs_for_trunk(client_manager, parsed_args): - attrs = {} +def _get_attrs_for_trunk(network_client, identity_client, parsed_args): + attrs: dict[str, ty.Any] = {} if parsed_args.name is not None: attrs['name'] = str(parsed_args.name) if parsed_args.description is not None: @@ -342,16 +375,18 @@ def _get_attrs_for_trunk(client_manager, parsed_args): if parsed_args.disable: attrs['admin_state_up'] = False if 'parent_port' in parsed_args and parsed_args.parent_port is not None: - port_id = client_manager.network.find_port( - parsed_args.parent_port)['id'] + port_id = network_client.find_port( + parsed_args.parent_port, + ignore_missing=False, + ).id attrs['port_id'] = port_id if 'add_subports' in parsed_args and parsed_args.add_subports is not None: - attrs[SUB_PORTS] = _format_subports(client_manager, - parsed_args.add_subports) + attrs[SUB_PORTS] = _format_subports( + network_client, parsed_args.add_subports + ) # "trunk set" command doesn't support setting project. if 'project' in parsed_args and parsed_args.project is not None: - identity_client = client_manager.identity project_id = identity_utils.find_project( identity_client, parsed_args.project, @@ -362,20 +397,26 @@ def _get_attrs_for_trunk(client_manager, parsed_args): return attrs -def _format_subports(client_manager, subports): +def _format_subports(network_client, subports): attrs = [] for subport in subports: subport_attrs = {} if subport.get('port'): - port_id = client_manager.network.find_port(subport['port'])['id'] + port_id = network_client.find_port( + subport['port'], + ignore_missing=False, + ).id subport_attrs['port_id'] = port_id if subport.get('segmentation-id'): try: subport_attrs['segmentation_id'] = int( - subport['segmentation-id']) + subport['segmentation-id'] + ) except ValueError: - msg = (_("Segmentation-id '%s' is not an integer") % - subport['segmentation-id']) + msg = ( + _("Segmentation-id '%s' is not an integer") + % subport['segmentation-id'] + ) raise exceptions.CommandError(msg) if subport.get('segmentation-type'): subport_attrs['segmentation_type'] = subport['segmentation-type'] @@ -383,20 +424,20 @@ def _format_subports(client_manager, subports): return attrs -def _get_attrs_for_subports(client_manager, parsed_args): - attrs = {} +def _get_attrs_for_subports(network_client, parsed_args): + attrs = [] if 'set_subports' in parsed_args and parsed_args.set_subports is not None: - attrs = _format_subports(client_manager, - parsed_args.set_subports) - if ('unset_subports' in parsed_args and - parsed_args.unset_subports is not None): + attrs = _format_subports(network_client, parsed_args.set_subports) + if ( + 'unset_subports' in parsed_args + and parsed_args.unset_subports is not None + ): subports_list = [] for subport in parsed_args.unset_subports: - port_id = client_manager.network.find_port(subport)['id'] + port_id = network_client.find_port( + subport, + ignore_missing=False, + )['id'] subports_list.append({'port_id': port_id}) attrs = subports_list return attrs - - -def _get_id(client, id_or_name, resource): - return client.find_resource(resource, str(id_or_name))['id'] diff --git a/openstackclient/network/v2/port.py b/openstackclient/network/v2/port.py index 8bf14d6a73..e1205153e6 100644 --- a/openstackclient/network/v2/port.py +++ b/openstackclient/network/v2/port.py @@ -17,15 +17,16 @@ import copy import json import logging +import typing as ty from cliff import columns as cliff_columns from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -33,11 +34,27 @@ LOG = logging.getLogger(__name__) -class AdminStateColumn(cliff_columns.FormattableColumn): +class AdminStateColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return 'UP' if self._value else 'DOWN' +class SubPortColumn(format_columns.ListDictColumn): + _value: ty.Any + + def _retrieve_subports(self): + if isinstance(self._value, dict): + self._value = self._value['sub_ports'] + + def human_readable(self): + self._retrieve_subports() + return super().human_readable() + + def machine_readable(self): + self._retrieve_subports() + return super().machine_readable() + + _formatters = { 'admin_state_up': AdminStateColumn, 'is_admin_state_up': AdminStateColumn, @@ -52,23 +69,55 @@ def human_readable(self): 'security_group_ids': format_columns.ListColumn, 'tags': format_columns.ListColumn, } +_list_formatters = copy.deepcopy(_formatters) +_list_formatters.update({'trunk_details': SubPortColumn}) def _get_columns(item): - column_map = { - 'binding:host_id': 'binding_host_id', - 'binding:profile': 'binding_profile', - 'binding:vif_details': 'binding_vif_details', - 'binding:vif_type': 'binding_vif_type', - 'binding:vnic_type': 'binding_vnic_type', - 'is_admin_state_up': 'admin_state_up', - 'is_port_security_enabled': 'port_security_enabled', + column_data_mapping = { + 'admin_state_up': 'is_admin_state_up', + 'allowed_address_pairs': 'allowed_address_pairs', + 'binding_host_id': 'binding_host_id', + 'binding_profile': 'binding_profile', + 'binding_vif_details': 'binding_vif_details', + 'binding_vif_type': 'binding_vif_type', + 'binding_vnic_type': 'binding_vnic_type', + 'created_at': 'created_at', + 'data_plane_status': 'data_plane_status', + 'description': 'description', + 'device_id': 'device_id', + 'device_owner': 'device_owner', + 'device_profile': 'device_profile', + 'dns_assignment': 'dns_assignment', + 'dns_domain': 'dns_domain', + 'dns_name': 'dns_name', + 'extra_dhcp_opts': 'extra_dhcp_opts', + 'fixed_ips': 'fixed_ips', + 'hardware_offload_type': 'hardware_offload_type', + 'hints': 'hints', + 'id': 'id', + 'ip_allocation': 'ip_allocation', + 'mac_address': 'mac_address', + 'name': 'name', + 'network_id': 'network_id', + 'numa_affinity_policy': 'numa_affinity_policy', + 'port_security_enabled': 'is_port_security_enabled', + 'project_id': 'project_id', + 'propagate_uplink_status': 'propagate_uplink_status', + 'resource_request': 'resource_request', + 'revision_number': 'revision_number', + 'qos_network_policy_id': 'qos_network_policy_id', + 'qos_policy_id': 'qos_policy_id', + 'security_group_ids': 'security_group_ids', + 'status': 'status', + 'tags': 'tags', + 'trunk_details': 'trunk_details', + 'trusted': 'trusted', + 'updated_at': 'updated_at', } - hidden_columns = ['location', 'tenant_id'] - return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + return ( + tuple(column_data_mapping.keys()), + tuple(column_data_mapping.values()), ) @@ -79,7 +128,6 @@ class JSONKeyValueAction(argparse.Action): """ def __call__(self, parser, namespace, values, option_string=None): - # Make sure we have an empty dict rather than None if getattr(namespace, self.dest, None) is None: setattr(namespace, self.dest, {}) @@ -92,10 +140,12 @@ def __call__(self, parser, namespace, values, option_string=None): if '=' in values: current_dest.update([values.split('=', 1)]) else: - msg = _("Expected '=' or JSON data for option " - "%(option)s, but encountered JSON parsing error: " - "%(error)s") % {"option": option_string, "error": e} - raise argparse.ArgumentTypeError(msg) + msg = _( + "Expected '=' or JSON data for option " + "%(option)s, but encountered JSON parsing error: " + "%(error)s" + ) % {"option": option_string, "error": e} + raise argparse.ArgumentError(self, msg) def _get_attrs(client_manager, parsed_args): @@ -151,28 +201,52 @@ def _get_attrs(client_manager, parsed_args): if parsed_args.qos_policy: attrs['qos_policy_id'] = client_manager.network.find_qos_policy( - parsed_args.qos_policy, ignore_missing=False).id + parsed_args.qos_policy, ignore_missing=False + ).id - if ('enable_uplink_status_propagation' in parsed_args and - parsed_args.enable_uplink_status_propagation): + if ( + 'enable_uplink_status_propagation' in parsed_args + and parsed_args.enable_uplink_status_propagation + ): attrs['propagate_uplink_status'] = True - if ('disable_uplink_status_propagation' in parsed_args and - parsed_args.disable_uplink_status_propagation): + if ( + 'disable_uplink_status_propagation' in parsed_args + and parsed_args.disable_uplink_status_propagation + ): attrs['propagate_uplink_status'] = False - if ('numa_policy_required' in parsed_args and - parsed_args.numa_policy_required): + if ( + 'numa_policy_required' in parsed_args + and parsed_args.numa_policy_required + ): attrs['numa_affinity_policy'] = 'required' - elif ('numa_policy_preferred' in parsed_args and - parsed_args.numa_policy_preferred): + elif ( + 'numa_policy_preferred' in parsed_args + and parsed_args.numa_policy_preferred + ): attrs['numa_affinity_policy'] = 'preferred' - elif ('numa_policy_legacy' in parsed_args and - parsed_args.numa_policy_legacy): + elif ( + 'numa_policy_socket' in parsed_args and parsed_args.numa_policy_socket + ): + attrs['numa_affinity_policy'] = 'socket' + elif ( + 'numa_policy_legacy' in parsed_args and parsed_args.numa_policy_legacy + ): attrs['numa_affinity_policy'] = 'legacy' if 'device_profile' in parsed_args and parsed_args.device_profile: attrs['device_profile'] = parsed_args.device_profile + if ( + 'hardware_offload_type' in parsed_args + and parsed_args.hardware_offload_type + ): + attrs['hardware_offload_type'] = parsed_args.hardware_offload_type + if parsed_args.not_trusted: + attrs['trusted'] = False + if parsed_args.trusted: + attrs['trusted'] = True + return attrs @@ -191,8 +265,9 @@ def _prepare_fixed_ips(client_manager, parsed_args): if 'subnet' in ip_spec: subnet_name_id = ip_spec['subnet'] if subnet_name_id: - _subnet = client.find_subnet(subnet_name_id, - ignore_missing=False) + _subnet = client.find_subnet( + subnet_name_id, ignore_missing=False + ) ip_spec['subnet_id'] = _subnet.id del ip_spec['subnet'] @@ -220,86 +295,140 @@ def _prepare_filter_fixed_ips(client_manager, parsed_args): if 'subnet' in ip_spec: subnet_name_id = ip_spec['subnet'] if subnet_name_id: - _subnet = client.find_subnet(subnet_name_id, - ignore_missing=False) - ips.append('subnet_id=%s' % _subnet.id) + _subnet = client.find_subnet( + subnet_name_id, ignore_missing=False + ) + ips.append(f'subnet_id={_subnet.id}') if 'ip-address' in ip_spec: - ips.append('ip_address=%s' % ip_spec['ip-address']) + ips.append('ip_address={}'.format(ip_spec['ip-address'])) if 'ip-substring' in ip_spec: - ips.append('ip_address_substr=%s' % ip_spec['ip-substring']) + ips.append('ip_address_substr={}'.format(ip_spec['ip-substring'])) return ips -def _add_updatable_args(parser): +def _add_updatable_args(parser, create=False): parser.add_argument( '--description', metavar='', - help=_("Description of this port") + help=_("Description of this port"), ) parser.add_argument( - '--device', - metavar='', - help=_("Port device ID") + '--device', metavar='', help=_("Port device ID") ) parser.add_argument( '--mac-address', metavar='', - help=_("MAC address of this port (admin only)") + help=( + _("MAC address of this port") + if create + else _("MAC address of this port (admin only)") + ), ) parser.add_argument( '--device-owner', metavar='', - help=_("Device owner of this port. This is the entity that uses " - "the port (for example, network:dhcp).") + help=_( + "Device owner of this port. This is the entity that uses " + "the port (for example, network:dhcp)." + ), ) parser.add_argument( '--vnic-type', metavar='', choices=( - 'direct', 'direct-physical', 'macvtap', - 'normal', 'baremetal', 'virtio-forwarder', 'vdpa', 'remote-managed' + 'direct', + 'direct-physical', + 'macvtap', + 'normal', + 'baremetal', + 'virtio-forwarder', + 'vdpa', + 'remote-managed', ), help=_( "VNIC type for this port (direct | direct-physical | " "macvtap | normal | baremetal | virtio-forwarder | vdpa | " - "remote-managed, " - "default: normal)" + "remote-managed) " + "(default: normal)" ), ) parser.add_argument( '--host', metavar='', - help=_("Allocate port on host (ID only)") + help=_("Allocate port on host (ID only)"), ) parser.add_argument( '--dns-domain', metavar='dns-domain', - help=_("Set DNS domain to this port " - "(requires dns_domain extension for ports)") + help=_( + "Set DNS domain to this port " + "(requires dns_domain extension for ports)" + ), ) parser.add_argument( '--dns-name', metavar='', - help=_("Set DNS name for this port " - "(requires DNS integration extension)") + help=_( + "Set DNS name for this port (requires DNS integration extension)" + ), ) numa_affinity_policy_group = parser.add_mutually_exclusive_group() numa_affinity_policy_group.add_argument( '--numa-policy-required', action='store_true', - help=_("NUMA affinity policy required to schedule this port") + help=_("NUMA affinity policy required to schedule this port"), ) numa_affinity_policy_group.add_argument( '--numa-policy-preferred', action='store_true', - help=_("NUMA affinity policy preferred to schedule this port") + help=_("NUMA affinity policy preferred to schedule this port"), + ) + numa_affinity_policy_group.add_argument( + '--numa-policy-socket', + action='store_true', + help=_("NUMA affinity policy socket to schedule this port"), ) numa_affinity_policy_group.add_argument( '--numa-policy-legacy', action='store_true', - help=_("NUMA affinity policy using legacy mode to schedule this port") + help=_("NUMA affinity policy using legacy mode to schedule this port"), + ) + parser.add_argument( + '--hint', + metavar='', + action=JSONKeyValueAction, + default={}, + help=_( + 'Port hints as ALIAS=VALUE or as JSON. ' + 'Valid hint aliases/values: ' + 'ovs-tx-steering=thread, ovs-tx-steering=hash. ' + 'Valid JSON values are as specified by the Neutron API. ' + '(requires port-hints extension) ' + '(requires port-hint-ovs-tx-steering extension for alias: ' + 'ovs-tx-steering) ' + '(repeat option to set multiple hints).' + ), + ) + port_trusted = parser.add_mutually_exclusive_group() + port_trusted.add_argument( + '--trusted', + action='store_true', + help=_( + "Set port to be trusted. This will be populated into the " + "'binding:profile' dictionary and passed to the services " + "which expect it in this dictionary (for example, Nova)." + ), + ) + port_trusted.add_argument( + '--not-trusted', + action='store_true', + help=_( + "Set port to be not trusted. This will be populated into the " + "'binding:profile' dictionary and passed to the services " + "which expect it in this dictionary (for example, Nova)." + ), ) @@ -329,75 +458,103 @@ def _convert_extra_dhcp_options(parsed_args): return dhcp_options +# When we have multiple hints, we'll need to refactor this to allow +# arbitrary combinations. But until then let's have it as simple as possible. +def _validate_port_hints(hints): + if hints not in ( + {}, + # by hint alias + {'ovs-tx-steering': 'thread'}, + {'ovs-tx-steering': 'hash'}, + # by fully specified value of the port's hints field + {'openvswitch': {'other_config': {'tx-steering': 'thread'}}}, + {'openvswitch': {'other_config': {'tx-steering': 'hash'}}}, + ): + msg = _("Invalid value to --hints, see --help for valid values.") + raise exceptions.CommandError(msg) + + +# When we have multiple hints, we'll need to refactor this to expand aliases +# without losing other hints. But until then let's have it as simple as +# possible. +def _expand_port_hint_aliases(hints): + if hints == {'ovs-tx-steering': 'thread'}: + return {'openvswitch': {'other_config': {'tx-steering': 'thread'}}} + elif hints == {'ovs-tx-steering': 'hash'}: + return {'openvswitch': {'other_config': {'tx-steering': 'hash'}}} + else: + return hints + + class CreatePort(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create a new port") def get_parser(self, prog_name): - parser = super(CreatePort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--network', metavar='', required=True, - help=_("Network this port belongs to (name or ID)") + help=_("Network this port belongs to (name or ID)"), ) - _add_updatable_args(parser) + _add_updatable_args(parser, create=True) fixed_ip = parser.add_mutually_exclusive_group() fixed_ip.add_argument( '--fixed-ip', metavar='subnet=,ip-address=', action=parseractions.MultiKeyValueAction, optional_keys=['subnet', 'ip-address'], - help=_("Desired IP and/or subnet for this port (name or ID): " - "subnet=,ip-address= " - "(repeat option to set multiple fixed IP addresses)") + help=_( + "Desired IP and/or subnet for this port (name or ID): " + "subnet=,ip-address= " + "(repeat option to set multiple fixed IP addresses)" + ), ) fixed_ip.add_argument( '--no-fixed-ip', action='store_true', - help=_("No IP or subnet for this port.") + help=_("No IP or subnet set for this port"), ) parser.add_argument( '--binding-profile', metavar='', action=JSONKeyValueAction, - help=_("Custom data to be passed as binding:profile. Data may " - "be passed as = or JSON. " - "(repeat option to set multiple binding:profile data)") + help=_( + "Custom data to be passed as binding:profile. Data may " + "be passed as = or JSON " + "(repeat option to set multiple binding:profile data)." + ), ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', default=True, - help=_("Enable port (default)") + help=_("Enable port (default)"), ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable port") + '--disable', action='store_true', help=_("Disable port") ) uplink_status_group = parser.add_mutually_exclusive_group() uplink_status_group.add_argument( '--enable-uplink-status-propagation', action='store_true', - help=_("Enable uplink status propagate") + help=_("Enable uplink status propagation (default)"), ) uplink_status_group.add_argument( '--disable-uplink-status-propagation', action='store_true', - help=_("Disable uplink status propagate (default)") + help=_("Disable uplink status propagation"), ) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( - 'name', - metavar='', - help=_("Name of this port") + 'name', metavar='', help=_("Name of this port") ) parser.add_argument( '--extra-dhcp-option', @@ -407,40 +564,46 @@ def get_parser(self, prog_name): dest='extra_dhcp_options', required_keys=['name'], optional_keys=['value', "ip-version"], - help=_('Extra DHCP options to be assigned to this port: ' - 'name=[,value=,ip-version={4,6}] ' - '(repeat option to set multiple extra DHCP options)')) + help=_( + 'Extra DHCP options to be assigned to this port: ' + 'name=[,value=,ip-version={4,6}] ' + '(repeat option to set multiple extra DHCP options)' + ), + ) secgroups = parser.add_mutually_exclusive_group() secgroups.add_argument( '--security-group', metavar='', action='append', - dest='security_group', - help=_("Security group to associate with this port (name or ID) " - "(repeat option to set multiple security groups)") + dest='security_groups', + help=_( + "Security group to associate with this port (name or ID) " + "(repeat option to set multiple security groups)" + ), ) secgroups.add_argument( '--no-security-group', - dest='no_security_group', - action='store_true', - help=_("Associate no security groups with this port") + action='store_const', + const=[], + dest='security_groups', + help=_("Associate no security groups with this port"), ) parser.add_argument( '--qos-policy', metavar='', - help=_("Attach QoS policy to this port (name or ID)") + help=_("Attach QoS policy to this port (name or ID)"), ) port_security = parser.add_mutually_exclusive_group() port_security.add_argument( '--enable-port-security', action='store_true', - help=_("Enable port security for this port (Default)") + help=_("Enable port security for this port (default)"), ) port_security.add_argument( '--disable-port-security', action='store_true', - help=_("Disable port security for this port") + help=_("Disable port security for this port"), ) parser.add_argument( '--allowed-address', @@ -449,23 +612,35 @@ def get_parser(self, prog_name): dest='allowed_address_pairs', required_keys=['ip-address'], optional_keys=['mac-address'], - help=_("Add allowed-address pair associated with this port: " - "ip-address=[,mac-address=] " - "(repeat option to set multiple allowed-address pairs)") + help=_( + "Add allowed-address pair associated with this port: " + "ip-address=[,mac-address=] " + "(repeat option to set multiple allowed-address pairs)" + ), ) parser.add_argument( '--device-profile', metavar='', - help=_('Cyborg port device profile') + help=_('Port device profile'), + ) + parser.add_argument( + '--hardware-offload-type', + metavar='', + dest='hardware_offload_type', + help=_( + 'Hardware offload type this port will request when ' + 'attached to the network backend' + ), ) _tag.add_tag_option_to_parser_for_create(parser, _('port')) return parser def take_action(self, parsed_args): - client = self.app.client_manager.network - _network = client.find_network(parsed_args.network, - ignore_missing=False) - parsed_args.network = _network.id + network_client = self.app.client_manager.network + network = network_client.find_network( + parsed_args.network, ignore_missing=False + ) + parsed_args.network = network.id _prepare_fixed_ips(self.app.client_manager, parsed_args) attrs = _get_attrs(self.app.client_manager, parsed_args) @@ -477,27 +652,55 @@ def take_action(self, parsed_args): elif parsed_args.no_fixed_ip: attrs['fixed_ips'] = [] - if parsed_args.security_group: - attrs['security_group_ids'] = [client.find_security_group( - sg, ignore_missing=False).id - for sg in - parsed_args.security_group] - elif parsed_args.no_security_group: - attrs['security_group_ids'] = [] + if parsed_args.security_groups is not None: + attrs['security_group_ids'] = [ + network_client.find_security_group(sg, ignore_missing=False).id + for sg in parsed_args.security_groups + ] if parsed_args.allowed_address_pairs: - attrs['allowed_address_pairs'] = ( - _convert_address_pairs(parsed_args)) + attrs['allowed_address_pairs'] = _convert_address_pairs( + parsed_args + ) if parsed_args.extra_dhcp_options: attrs["extra_dhcp_opts"] = _convert_extra_dhcp_options(parsed_args) if parsed_args.qos_policy: - attrs['qos_policy_id'] = client.find_qos_policy( - parsed_args.qos_policy, ignore_missing=False).id + attrs['qos_policy_id'] = network_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ).id + + if parsed_args.hint: + _validate_port_hints(parsed_args.hint) + expanded_hints = _expand_port_hint_aliases(parsed_args.hint) + try: + network_client.find_extension( + 'port-hints', ignore_missing=False + ) + except Exception as e: + msg = _('Not supported by Network API: %(e)s') % {'e': e} + raise exceptions.CommandError(msg) + if ( + 'openvswitch' in expanded_hints + and 'other_config' in expanded_hints['openvswitch'] + and 'tx-steering' + in expanded_hints['openvswitch']['other_config'] + ): + try: + network_client.find_extension( + 'port-hint-ovs-tx-steering', ignore_missing=False + ) + except Exception as e: + msg = _('Not supported by Network API: %(e)s') % {'e': e} + raise exceptions.CommandError(msg) + attrs['hints'] = expanded_hints set_tags_in_post = bool( - client.find_extension('tag-ports-during-bulk-creation')) + network_client.find_extension( + 'tag-ports-during-bulk-creation', ignore_missing=True + ) + ) if set_tags_in_post: if parsed_args.no_tag: attrs['tags'] = [] @@ -505,15 +708,15 @@ def take_action(self, parsed_args): attrs['tags'] = list(set(parsed_args.tags)) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) - with common.check_missing_extension_if_error( - self.app.client_manager.network, attrs): - obj = client.create_port(**attrs) + with common.check_missing_extension_if_error(network_client, attrs): + obj = network_client.create_port(**attrs) if not set_tags_in_post: # tags cannot be set when created, so tags need to be set later. - _tag.update_tags_for_set(client, obj, parsed_args) + _tag.update_tags_for_set(network_client, obj, parsed_args) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters=_formatters) @@ -525,12 +728,12 @@ class DeletePort(command.Command): _description = _("Delete port(s)") def get_parser(self, prog_name): - parser = super(DeletePort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'port', metavar="", nargs="+", - help=_("Port(s) to delete (name or ID)") + help=_("Port(s) to delete (name or ID)"), ) return parser @@ -544,14 +747,20 @@ def take_action(self, parsed_args): client.delete_port(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete port with " - "name or ID '%(port)s': %(e)s"), - {'port': port, 'e': e}) + LOG.error( + _( + "Failed to delete port with " + "name or ID '%(port)s': %(e)s" + ), + {'port': port, 'e': e}, + ) if result > 0: total = len(parsed_args.port) - msg = (_("%(result)s of %(total)s ports failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s ports failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -561,28 +770,32 @@ class ListPort(command.Lister): _description = _("List ports") def get_parser(self, prog_name): - parser = super(ListPort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--device-owner', metavar='', - help=_("List only ports with the specified device owner. " - "This is the entity that uses the port (for example, " - "network:dhcp).") + help=_( + "List only ports with the specified device owner. " + "This is the entity that uses the port (for example, " + "network:dhcp)." + ), ) parser.add_argument( '--host', metavar='', - help=_("List only ports bound to this host ID")) + help=_("List only ports bound to this host ID"), + ) parser.add_argument( '--network', metavar='', - help=_("List only ports connected to this network (name or ID)")) + help=_("List only ports connected to this network (name or ID)"), + ) device_group = parser.add_mutually_exclusive_group() device_group.add_argument( '--router', metavar='', dest='router', - help=_("List only ports attached to this router (name or ID)") + help=_("List only ports attached to this router (name or ID)"), ) device_group.add_argument( '--server', @@ -592,47 +805,64 @@ def get_parser(self, prog_name): device_group.add_argument( '--device-id', metavar='', - help=_("List only ports with the specified device ID") + help=_("List only ports with the specified device ID"), ) parser.add_argument( '--mac-address', metavar='', - help=_("List only ports with this MAC address") + help=_("List only ports with the specified MAC address"), ) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) parser.add_argument( '--project', metavar='', - help=_("List ports according to their project (name or ID)") + help=_("List only ports with the specified project (name or ID)"), ) parser.add_argument( '--name', metavar='', - help=_("List ports according to their name") + help=_("List only ports with the specified name"), ) parser.add_argument( '--security-group', action='append', dest='security_groups', metavar='', - help=_("List only ports associated with this security group") + help=_("List only ports associated with this security group"), + ) + # the API sadly reports these in upper case and while it would be + # wonderful to plaster over this ugliness client-side, there are + # already users in the wild doing this in upper case that we need to + # support + parser.add_argument( + '--status', + metavar='', + choices=('ACTIVE', 'BUILD', 'DOWN', 'ERROR'), + help=_( + "List only ports with the specified status " + "('ACTIVE', 'BUILD', 'DOWN', 'ERROR')" + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--fixed-ip', - metavar=('subnet=,ip-address=,' - 'ip-substring='), + metavar=( + 'subnet=,ip-address=,' + 'ip-substring=' + ), action=parseractions.MultiKeyValueAction, optional_keys=['subnet', 'ip-address', 'ip-substring'], - help=_("Desired IP and/or subnet for filtering ports " - "(name or ID): subnet=,ip-address=," - "ip-substring= " - "(repeat option to set multiple fixed IP addresses)"), + help=_( + "Desired IP and/or subnet for filtering ports " + "(name or ID): subnet=,ip-address=," + "ip-substring= " + "(repeat option to filter multiple fixed IP addresses)" + ), ) _tag.add_tag_filtering_option_to_parser(parser, _('ports')) return parser @@ -641,46 +871,57 @@ def take_action(self, parsed_args): network_client = self.app.client_manager.network identity_client = self.app.client_manager.identity - columns = ( + columns = [ 'id', 'name', 'mac_address', 'fixed_ips', 'status', - ) - column_headers = ( + ] + column_headers = [ 'ID', 'Name', 'MAC Address', 'Fixed IP Addresses', 'Status', - ) + ] filters = {} if parsed_args.long: - columns += ('security_group_ids', 'device_owner', 'tags') - column_headers += ('Security Groups', 'Device Owner', 'Tags') + columns.extend( + ['security_groups', 'device_owner', 'tags', 'trunk_details'] + ) + column_headers.extend( + ['Security Groups', 'Device Owner', 'Tags', 'Trunk subports'] + ) + if parsed_args.device_owner is not None: filters['device_owner'] = parsed_args.device_owner if parsed_args.device_id is not None: filters['device_id'] = parsed_args.device_id if parsed_args.router: - _router = network_client.find_router(parsed_args.router, - ignore_missing=False) + _router = network_client.find_router( + parsed_args.router, ignore_missing=False + ) filters['device_id'] = _router.id if parsed_args.server: compute_client = self.app.client_manager.compute - server = utils.find_resource(compute_client.servers, - parsed_args.server) + server = compute_client.find_server( + parsed_args.server, + ignore_missing=False, + ) filters['device_id'] = server.id if parsed_args.host: filters['binding:host_id'] = parsed_args.host if parsed_args.network: - network = network_client.find_network(parsed_args.network, - ignore_missing=False) + network = network_client.find_network( + parsed_args.network, ignore_missing=False + ) filters['network_id'] = network.id if parsed_args.mac_address: filters['mac_address'] = parsed_args.mac_address + if parsed_args.status: + filters['status'] = parsed_args.status if parsed_args.project: project_id = identity_common.find_project( identity_client, @@ -692,21 +933,32 @@ def take_action(self, parsed_args): filters['name'] = parsed_args.name if parsed_args.fixed_ip: filters['fixed_ips'] = _prepare_filter_fixed_ips( - self.app.client_manager, parsed_args) + self.app.client_manager, parsed_args + ) if parsed_args.security_groups: - filters['security_groups'] = parsed_args.security_groups + filters['security_group_ids'] = parsed_args.security_groups _tag.get_tag_filtering_args(parsed_args, filters) data = network_client.ports(fields=columns, **filters) - headers, attrs = utils.calculate_header_and_attrs( - column_headers, columns, parsed_args) - return (headers, - (utils.get_item_properties( - s, attrs, - formatters=_formatters, - ) for s in data)) + if parsed_args.long: + columns = [ + 'security_group_ids' if item == 'security_groups' else item + for item in columns + ] + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters=_list_formatters, + ) + for s in data + ), + ) # TODO(abhiraut): Use the SDK resource mapped attribute names once the @@ -715,90 +967,94 @@ class SetPort(common.NeutronCommandWithExtraArgs): _description = _("Set port properties") def get_parser(self, prog_name): - parser = super(SetPort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) _add_updatable_args(parser) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', default=None, - help=_("Enable port") + help=_("Enable port"), ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable port") + '--disable', action='store_true', help=_("Disable port") ) parser.add_argument( - '--name', - metavar="", - help=_("Set port name") + '--name', metavar="", help=_("Set port name") ) parser.add_argument( '--fixed-ip', metavar='subnet=,ip-address=', action=parseractions.MultiKeyValueAction, optional_keys=['subnet', 'ip-address'], - help=_("Desired IP and/or subnet for this port (name or ID): " - "subnet=,ip-address= " - "(repeat option to set multiple fixed IP addresses)") + help=_( + "Desired IP and/or subnet for this port (name or ID): " + "subnet=,ip-address= " + "(repeat option to set multiple fixed IP addresses)" + ), ) parser.add_argument( '--no-fixed-ip', action='store_true', - help=_("Clear existing information of fixed IP addresses." - "Specify both --fixed-ip and --no-fixed-ip " - "to overwrite the current fixed IP addresses.") + help=_( + "Clear existing information of fixed IP addresses. " + "Specify both --fixed-ip and --no-fixed-ip " + "to overwrite the current fixed IP addresses." + ), ) parser.add_argument( '--binding-profile', metavar='', action=JSONKeyValueAction, - help=_("Custom data to be passed as binding:profile. Data may " - "be passed as = or JSON. " - "(repeat option to set multiple binding:profile data)") + help=_( + "Custom data to be passed as binding:profile. Data may " + "be passed as = or JSON " + "(repeat option to set multiple binding:profile data)." + ), ) parser.add_argument( '--no-binding-profile', action='store_true', - help=_("Clear existing information of binding:profile. " - "Specify both --binding-profile and --no-binding-profile " - "to overwrite the current binding:profile information.") + help=_( + "Clear existing information of binding:profile. " + "Specify both --binding-profile and --no-binding-profile " + "to overwrite the current binding:profile information." + ), ) parser.add_argument( '--qos-policy', metavar='', - help=_("Attach QoS policy to this port (name or ID)") + help=_("Attach QoS policy to this port (name or ID)"), ) parser.add_argument( - 'port', - metavar="", - help=_("Port to modify (name or ID)") + 'port', metavar="", help=_("Port to modify (name or ID)") ) parser.add_argument( '--security-group', metavar='', action='append', - dest='security_group', - help=_("Security group to associate with this port (name or ID) " - "(repeat option to set multiple security groups)") + dest='security_groups', + help=_( + "Security group to associate with this port (name or ID) " + "(repeat option to set multiple security groups)" + ), ) parser.add_argument( '--no-security-group', dest='no_security_group', action='store_true', - help=_("Clear existing security groups associated with this port") + help=_("Clear existing security groups associated with this port"), ) port_security = parser.add_mutually_exclusive_group() port_security.add_argument( '--enable-port-security', action='store_true', - help=_("Enable port security for this port") + help=_("Enable port security for this port"), ) port_security.add_argument( '--disable-port-security', action='store_true', - help=_("Disable port security for this port") + help=_("Disable port security for this port"), ) parser.add_argument( '--allowed-address', @@ -807,18 +1063,22 @@ def get_parser(self, prog_name): dest='allowed_address_pairs', required_keys=['ip-address'], optional_keys=['mac-address'], - help=_("Add allowed-address pair associated with this port: " - "ip-address=[,mac-address=] " - "(repeat option to set multiple allowed-address pairs)") + help=_( + "Add allowed-address pair associated with this port: " + "ip-address=[,mac-address=] " + "(repeat option to set multiple allowed-address pairs)" + ), ) parser.add_argument( '--no-allowed-address', dest='no_allowed_address_pair', action='store_true', - help=_("Clear existing allowed-address pairs associated " - "with this port. " - "(Specify both --allowed-address and --no-allowed-address " - "to overwrite the current allowed-address pairs)") + help=_( + "Clear existing allowed-address pairs associated " + "with this port. " + "Specify both --allowed-address and --no-allowed-address " + "to overwrite the current allowed-address pairs." + ), ) parser.add_argument( '--extra-dhcp-option', @@ -828,17 +1088,34 @@ def get_parser(self, prog_name): dest='extra_dhcp_options', required_keys=['name'], optional_keys=['value', "ip-version"], - help=_('Extra DHCP options to be assigned to this port: ' - 'name=[,value=,ip-version={4,6}] ' - '(repeat option to set multiple extra DHCP options)')) + help=_( + 'Extra DHCP options to be assigned to this port: ' + 'name=[,value=,ip-version={4,6}] ' + '(repeat option to set multiple extra DHCP options)' + ), + ) parser.add_argument( '--data-plane-status', metavar='', choices=['ACTIVE', 'DOWN'], - help=_("Set data plane status of this port (ACTIVE | DOWN). " - "Unset it to None with the 'port unset' command " - "(requires data plane status extension)") + help=_( + "Set data plane status of this port (ACTIVE | DOWN). " + "Unset it to None with the 'port unset' command " + "(requires data plane status extension)." + ), ) + uplink_status_group = parser.add_mutually_exclusive_group() + uplink_status_group.add_argument( + '--enable-uplink-status-propagation', + action='store_true', + help=_('Enable uplink status propagation'), + ) + uplink_status_group.add_argument( + '--disable-uplink-status-propagation', + action='store_true', + help=_('Disable uplink status propagation'), + ) + _tag.add_tag_option_to_parser_for_set(parser, _('port')) return parser @@ -869,7 +1146,7 @@ def take_action(self, parsed_args): if parsed_args.no_security_group: attrs['security_group_ids'] = [] - if parsed_args.security_group: + if parsed_args.security_groups: if 'security_group_ids' not in attrs: # NOTE(dtroyer): Get existing security groups, iterate the # list to force a new list object to be @@ -880,16 +1157,16 @@ def take_action(self, parsed_args): ] attrs['security_group_ids'].extend( client.find_security_group(sg, ignore_missing=False).id - for sg in parsed_args.security_group + for sg in parsed_args.security_groups ) if parsed_args.no_allowed_address_pair: attrs['allowed_address_pairs'] = [] if parsed_args.allowed_address_pairs: if 'allowed_address_pairs' not in attrs: - attrs['allowed_address_pairs'] = ( - [addr for addr in obj.allowed_address_pairs if addr] - ) + attrs['allowed_address_pairs'] = [ + addr for addr in obj.allowed_address_pairs if addr + ] attrs['allowed_address_pairs'].extend( _convert_address_pairs(parsed_args) ) @@ -900,12 +1177,42 @@ def take_action(self, parsed_args): if parsed_args.data_plane_status: attrs['data_plane_status'] = parsed_args.data_plane_status + if parsed_args.hint: + _validate_port_hints(parsed_args.hint) + expanded_hints = _expand_port_hint_aliases(parsed_args.hint) + try: + client.find_extension('port-hints', ignore_missing=False) + except Exception as e: + msg = _('Not supported by Network API: %(e)s') % {'e': e} + raise exceptions.CommandError(msg) + if ( + 'openvswitch' in expanded_hints + and 'other_config' in expanded_hints['openvswitch'] + and 'tx-steering' + in expanded_hints['openvswitch']['other_config'] + ): + try: + client.find_extension( + 'port-hint-ovs-tx-steering', ignore_missing=False + ) + except Exception as e: + msg = _('Not supported by Network API: %(e)s') % {'e': e} + raise exceptions.CommandError(msg) + attrs['hints'] = expanded_hints + + if parsed_args.not_trusted: + attrs['trusted'] = False + if parsed_args.trusted: + attrs['trusted'] = True + attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: with common.check_missing_extension_if_error( - self.app.client_manager.network, attrs): + self.app.client_manager.network, attrs + ): client.update_port(obj, **attrs) # tags is a subresource and it needs to be updated separately. @@ -916,11 +1223,9 @@ class ShowPort(command.ShowOne): _description = _("Display port details") def get_parser(self, prog_name): - parser = super(ShowPort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'port', - metavar="", - help=_("Port to display (name or ID)") + 'port', metavar="", help=_("Port to display (name or ID)") ) return parser @@ -938,36 +1243,38 @@ class UnsetPort(common.NeutronUnsetCommandWithExtraArgs): _description = _("Unset port properties") def get_parser(self, prog_name): - parser = super(UnsetPort, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--fixed-ip', metavar='subnet=,ip-address=', action=parseractions.MultiKeyValueAction, optional_keys=['subnet', 'ip-address'], - help=_("Desired IP and/or subnet which should be " - "removed from this port (name or ID): subnet=," - "ip-address= (repeat option to unset multiple " - "fixed IP addresses)")) + help=_( + "Desired IP and/or subnet which should be " + "removed from this port (name or ID): subnet=," + "ip-address= (repeat option to unset multiple " + "fixed IP addresses)" + ), + ) parser.add_argument( '--binding-profile', metavar='', action='append', - help=_("Desired key which should be removed from binding:profile " - "(repeat option to unset multiple binding:profile data)")) + help=_( + "Desired key which should be removed from binding:profile " + "(repeat option to unset multiple binding:profile keys)" + ), + ) parser.add_argument( '--security-group', metavar='', action='append', - dest='security_group_ids', - help=_("Security group which should be removed this port (name " - "or ID) (repeat option to unset multiple security groups)") - ) - - parser.add_argument( - 'port', - metavar="", - help=_("Port to modify (name or ID)") + dest='security_groups', + help=_( + "Security group which should be removed this port (name " + "or ID) (repeat option to unset multiple security groups)" + ), ) parser.add_argument( '--allowed-address', @@ -976,35 +1283,59 @@ def get_parser(self, prog_name): dest='allowed_address_pairs', required_keys=['ip-address'], optional_keys=['mac-address'], - help=_("Desired allowed-address pair which should be removed " - "from this port: ip-address=" - "[,mac-address=] (repeat option to unset " - "multiple allowed-address pairs)") + help=_( + "Desired allowed-address pair which should be removed " + "from this port: ip-address=" + "[,mac-address=] (repeat option to unset " + "multiple allowed-address pairs)" + ), ) parser.add_argument( '--qos-policy', action='store_true', default=False, - help=_("Remove the QoS policy attached to the port") + help=_("Remove the QoS policy attached to the port"), ) parser.add_argument( '--data-plane-status', action='store_true', - help=_("Clear existing information of data plane status") + help=_("Clear existing data plane status information"), ) parser.add_argument( '--numa-policy', action='store_true', - help=_("Clear existing NUMA affinity policy") + help=_("Clear existing NUMA affinity policy"), ) parser.add_argument( '--host', action='store_true', default=False, - help=_("Clear host binding for the port.") + help=_("Clear host binding for the port"), + ) + parser.add_argument( + '--hints', + action='store_true', + default=False, + help=_("Clear hints for the port"), + ) + parser.add_argument( + '--device', + action='store_true', + default=False, + help=_("Clear device ID for the port."), + ) + parser.add_argument( + '--device-owner', + action='store_true', + default=False, + help=_("Clear device owner for the port."), ) - _tag.add_tag_option_to_parser_for_unset(parser, _('port')) + parser.add_argument( + 'port', + metavar="", + help=_("Port to modify (name or ID)"), + ) return parser @@ -1036,11 +1367,12 @@ def take_action(self, parsed_args): msg = _("Port does not contain binding-profile %s") % key raise exceptions.CommandError(msg) attrs['binding:profile'] = tmp_binding_profile - if parsed_args.security_group_ids: + if parsed_args.security_groups: try: - for sg in parsed_args.security_group_ids: + for sg in parsed_args.security_groups: sg_id = client.find_security_group( - sg, ignore_missing=False).id + sg, ignore_missing=False + ).id tmp_secgroups.remove(sg_id) except ValueError: msg = _("Port does not contain security group %s") % sg @@ -1062,9 +1394,16 @@ def take_action(self, parsed_args): attrs['numa_affinity_policy'] = None if parsed_args.host: attrs['binding:host_id'] = None + if parsed_args.hints: + attrs['hints'] = None + if parsed_args.device: + attrs['device_id'] = '' + if parsed_args.device_owner: + attrs['device_owner'] = '' attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_port(obj, **attrs) diff --git a/openstackclient/network/v2/router.py b/openstackclient/network/v2/router.py index 8302ee0152..939167d850 100644 --- a/openstackclient/network/v2/router.py +++ b/openstackclient/network/v2/router.py @@ -13,18 +13,21 @@ """Router action implementations""" +import argparse +import collections import copy import json import logging +import typing as ty from cliff import columns as cliff_columns from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -32,12 +35,12 @@ LOG = logging.getLogger(__name__) -class AdminStateColumn(cliff_columns.FormattableColumn): +class AdminStateColumn(cliff_columns.FormattableColumn[bool]): def human_readable(self): return 'UP' if self._value else 'DOWN' -class RouterInfoColumn(cliff_columns.FormattableColumn): +class RouterInfoColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): try: return json.dumps(self._value) @@ -45,7 +48,7 @@ def human_readable(self): return '' -class RoutesColumn(cliff_columns.FormattableColumn): +class RoutesColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): # Map the route keys to match --route option. for route in self._value or []: @@ -73,7 +76,7 @@ def _get_columns(item): } if hasattr(item, 'interfaces_info'): column_map['interfaces_info'] = 'interfaces_info' - invisible_columns = ['location'] + invisible_columns = ['location', 'tenant_id'] if item.is_ha is None: invisible_columns.append('is_ha') column_map.pop('is_ha') @@ -81,11 +84,132 @@ def _get_columns(item): invisible_columns.append('is_distributed') column_map.pop('is_distributed') return utils.get_osc_show_columns_for_sdk_resource( - item, column_map, invisible_columns) + item, column_map, invisible_columns + ) + + +def is_multiple_gateways_supported(n_client): + return ( + n_client.find_extension( + "external-gateway-multihoming", ignore_missing=True + ) + is not None + ) + + +def _passed_multiple_gateways(extension_supported, external_gateways): + passed_multiple_gws = len(external_gateways) > 1 + if passed_multiple_gws and not extension_supported: + msg = _( + 'Supplying --external-gateway option multiple times is not ' + 'supported due to the lack of external-gateway-multihoming ' + 'extension at the Neutron side.' + ) + raise exceptions.CommandError(msg) + return passed_multiple_gws + + +def _get_external_gateway_attrs(client_manager, parsed_args): + attrs: dict[str, ty.Any] = {} + + if parsed_args.external_gateways: + external_gateways: collections.defaultdict[str, list[dict]] = ( + collections.defaultdict(list) + ) + n_client = client_manager.network + first_network_id = '' + + for gw_net_name_or_id in parsed_args.external_gateways: + gateway_info = {} + gw_net = n_client.find_network( + gw_net_name_or_id, ignore_missing=False + ) + if not first_network_id: + first_network_id = gw_net.id + gateway_info['network_id'] = gw_net.id + if 'disable_snat' in parsed_args and parsed_args.disable_snat: + gateway_info['enable_snat'] = False + if 'enable_snat' in parsed_args and parsed_args.enable_snat: + gateway_info['enable_snat'] = True + + # This option was added before multiple gateways were supported, so + # it does not have a per-gateway port granularity so just pass it + # along in gw info in case it is specified. + if 'qos_policy' in parsed_args and parsed_args.qos_policy: + qos_id = n_client.find_qos_policy( + parsed_args.qos_policy, ignore_missing=False + ).id + gateway_info['qos_policy_id'] = qos_id + if 'no_qos_policy' in parsed_args and parsed_args.no_qos_policy: + gateway_info['qos_policy_id'] = None + + external_gateways[gw_net.id].append(gateway_info) + + multiple_gws_supported = is_multiple_gateways_supported(n_client) + # Parse the external fixed IP specs and match them to specific gateway + # ports if needed. + if parsed_args.fixed_ips: + for ip_spec in parsed_args.fixed_ips: + # If there is only one gateway, this value will represent the + # network ID for it, otherwise it will be overridden. + ip_net_id: str = first_network_id + + if ip_spec.get('subnet', False): + subnet_name_id = ip_spec.pop('subnet') + if subnet_name_id: + subnet = n_client.find_subnet( + subnet_name_id, ignore_missing=False + ) + ip_spec['subnet_id'] = subnet.id + ip_net_id = subnet.network_id + if ip_spec.get('ip-address', False): + ip_spec['ip_address'] = ip_spec.pop('ip-address') + # Finally, add an ip_spec to the specific gateway identified + # by a network from the spec. + if ( + 'subnet_id' in ip_spec + and ip_net_id not in external_gateways + ): + msg = ( + _( + 'Subnet %s does not belong to any of the networks ' + 'provided for --external-gateway.' + ) + % (ip_spec['subnet_id']) + ) + raise exceptions.CommandError(msg) + for gw_info in external_gateways[ip_net_id]: + if 'external_fixed_ips' not in gw_info: + gw_info['external_fixed_ips'] = [ip_spec] + break + else: + # The end user has requested more fixed IPs than there are + # gateways, add multiple fixed IPs to single gateway to + # retain current behavior. + for gw_info in external_gateways[ip_net_id]: + gw_info['external_fixed_ips'].append(ip_spec) + break + + # Use the newer API whenever it is supported regardless of whether one + # or multiple gateways are passed as arguments. + if multiple_gws_supported: + gateway_list = [] + # Now merge the per-network-id lists of external gateway info + # dicts into one list. + for gw_info_list in external_gateways.values(): + gateway_list.extend(gw_info_list) + attrs['external_gateways'] = gateway_list + else: + attrs['external_gateway_info'] = external_gateways[ + first_network_id + ][0] + return attrs def _get_attrs(client_manager, parsed_args): attrs = {} + n_client = client_manager.network + if parsed_args.name is not None: attrs['name'] = parsed_args.name if parsed_args.enable: @@ -96,8 +220,10 @@ def _get_attrs(client_manager, parsed_args): attrs['distributed'] = False if parsed_args.distributed: attrs['distributed'] = True - if ('availability_zone_hints' in parsed_args and - parsed_args.availability_zone_hints is not None): + if ( + 'availability_zone_hints' in parsed_args + and parsed_args.availability_zone_hints is not None + ): attrs['availability_zone_hints'] = parsed_args.availability_zone_hints if parsed_args.description is not None: attrs['description'] = parsed_args.description @@ -110,95 +236,147 @@ def _get_attrs(client_manager, parsed_args): parsed_args.project_domain, ).id attrs['project_id'] = project_id - if parsed_args.external_gateway: - gateway_info = {} - n_client = client_manager.network - network = n_client.find_network( - parsed_args.external_gateway, ignore_missing=False) - gateway_info['network_id'] = network.id - if parsed_args.disable_snat: - gateway_info['enable_snat'] = False - if parsed_args.enable_snat: - gateway_info['enable_snat'] = True - if parsed_args.fixed_ip: - ips = [] - for ip_spec in parsed_args.fixed_ip: - if ip_spec.get('subnet', False): - subnet_name_id = ip_spec.pop('subnet') - if subnet_name_id: - subnet = n_client.find_subnet(subnet_name_id, - ignore_missing=False) - ip_spec['subnet_id'] = subnet.id - if ip_spec.get('ip-address', False): - ip_spec['ip_address'] = ip_spec.pop('ip-address') - ips.append(ip_spec) - gateway_info['external_fixed_ips'] = ips - attrs['external_gateway_info'] = gateway_info + + attrs.update(_get_external_gateway_attrs(client_manager, parsed_args)) + + # "router set" command doesn't support setting flavor_id. + if 'flavor_id' in parsed_args and parsed_args.flavor_id is not None: + flavor = n_client.find_flavor( + parsed_args.flavor_id, ignore_missing=False + ) + attrs['flavor_id'] = flavor.id + elif 'flavor' in parsed_args and parsed_args.flavor is not None: + flavor = n_client.find_flavor(parsed_args.flavor, ignore_missing=False) + attrs['flavor_id'] = flavor.id + + for attr in ('enable_default_route_bfd', 'enable_default_route_ecmp'): + value = getattr(parsed_args, attr, None) + if value is not None: + attrs[attr] = value return attrs +def _parser_add_bfd_ecmp_arguments(parser): + """Helper to add BFD and ECMP args for CreateRouter and SetRouter.""" + parser.add_argument( + '--enable-default-route-bfd', + dest='enable_default_route_bfd', + default=None, + action='store_true', + help=_( + "Enable BFD sessions for default routes inferred from " + "the external gateway port subnets for this router" + ), + ) + parser.add_argument( + '--disable-default-route-bfd', + dest='enable_default_route_bfd', + default=None, + action='store_false', + help=_( + "Disable BFD sessions for default routes inferred from " + "the external gateway port subnets for this router" + ), + ) + parser.add_argument( + '--enable-default-route-ecmp', + dest='enable_default_route_ecmp', + default=None, + action='store_true', + help=_( + "Add ECMP default routes if multiple are available via " + "different gateway ports" + ), + ) + parser.add_argument( + '--disable-default-route-ecmp', + dest='enable_default_route_ecmp', + default=None, + action='store_false', + help=_("Add default route only for first gateway port"), + ) + + +def _command_check_bfd_ecmp_supported(attrs, client): + """Helper to check for server side support when bfd/ecmp attrs provided. + + :raises: exceptions.CommandError + """ + if ( + 'enable_default_route_bfd' in attrs + or 'enable_default_route_ecmp' in attrs + ) and not is_multiple_gateways_supported(client): + msg = _( + 'The external-gateway-multihoming extension is not enabled at ' + 'the Neutron side, cannot use --enable-default-route-bfd or ' + '--enable-default-route-ecmp arguments.' + ) + raise exceptions.CommandError(msg) + + class AddPortToRouter(command.Command): _description = _("Add a port to a router") def get_parser(self, prog_name): - parser = super(AddPortToRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_("Router to which port will be added (name or ID)") + help=_("Router to which port will be added (name or ID)"), ) parser.add_argument( - 'port', - metavar='', - help=_("Port to be added (name or ID)") + 'port', metavar='', help=_("Port to be added (name or ID)") ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network port = client.find_port(parsed_args.port, ignore_missing=False) - client.add_interface_to_router(client.find_router( - parsed_args.router, ignore_missing=False), port_id=port.id) + client.add_interface_to_router( + client.find_router(parsed_args.router, ignore_missing=False), + port_id=port.id, + ) class AddSubnetToRouter(command.Command): _description = _("Add a subnet to a router") def get_parser(self, prog_name): - parser = super(AddSubnetToRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_("Router to which subnet will be added (name or ID)") + help=_("Router to which subnet will be added (name or ID)"), ) parser.add_argument( 'subnet', metavar='', - help=_("Subnet to be added (name or ID)") + help=_("Subnet to be added (name or ID)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - subnet = client.find_subnet(parsed_args.subnet, - ignore_missing=False) + subnet = client.find_subnet(parsed_args.subnet, ignore_missing=False) client.add_interface_to_router( - client.find_router(parsed_args.router, - ignore_missing=False), - subnet_id=subnet.id) + client.find_router(parsed_args.router, ignore_missing=False), + subnet_id=subnet.id, + ) class AddExtraRoutesToRouter(command.ShowOne): _description = _("Add extra static routes to a router's routing table.") def get_parser(self, prog_name): - parser = super(AddExtraRoutesToRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_("Router to which extra static routes " - "will be added (name or ID).") + help=_( + "Router to which extra static routes " + "will be added (name or ID)" + ), ) parser.add_argument( '--route', @@ -207,14 +385,16 @@ def get_parser(self, prog_name): dest='routes', default=[], required_keys=['destination', 'gateway'], - help=_("Add extra static route to the router. " - "destination: destination subnet (in CIDR notation), " - "gateway: nexthop IP address. " - "Repeat option to add multiple routes. " - "Trying to add a route that's already present " - "(exactly, including destination and nexthop) " - "in the routing table is allowed and is considered " - "a successful operation.") + help=_( + "Add extra static route to the router. " + "destination: destination subnet (in CIDR notation), " + "gateway: nexthop IP address. " + "Repeat option to add multiple routes. " + "Trying to add a route that is already present " + "(exactly, including destination and nexthop) " + "in the routing table is allowed and is considered " + "a successful operation." + ), ) return parser @@ -225,24 +405,29 @@ def take_action(self, parsed_args): client = self.app.client_manager.network router_obj = client.add_extra_routes_to_router( client.find_router(parsed_args.router, ignore_missing=False), - body={'router': {'routes': parsed_args.routes}}) + body={'router': {'routes': parsed_args.routes}}, + ) display_columns, columns = _get_columns(router_obj) data = utils.get_item_properties( - router_obj, columns, formatters=_formatters) + router_obj, columns, formatters=_formatters + ) return (display_columns, data) class RemoveExtraRoutesFromRouter(command.ShowOne): _description = _( - "Remove extra static routes from a router's routing table.") + "Remove extra static routes from a router's routing table." + ) def get_parser(self, prog_name): - parser = super(RemoveExtraRoutesFromRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_("Router from which extra static routes " - "will be removed (name or ID).") + help=_( + "Router from which extra static routes " + "will be removed (name or ID)" + ), ) parser.add_argument( '--route', @@ -251,14 +436,16 @@ def get_parser(self, prog_name): dest='routes', default=[], required_keys=['destination', 'gateway'], - help=_("Remove extra static route from the router. " - "destination: destination subnet (in CIDR notation), " - "gateway: nexthop IP address. " - "Repeat option to remove multiple routes. " - "Trying to remove a route that's already missing " - "(fully, including destination and nexthop) " - "from the routing table is allowed and is considered " - "a successful operation.") + help=_( + "Remove extra static route from the router. " + "destination: destination subnet (in CIDR notation), " + "gateway: nexthop IP address. " + "Repeat option to remove multiple routes. " + "Trying to remove a route that is already missing " + "(fully, including destination and nexthop) " + "from the routing table is allowed and is considered " + "a successful operation." + ), ) return parser @@ -269,10 +456,12 @@ def take_action(self, parsed_args): client = self.app.client_manager.network router_obj = client.remove_extra_routes_from_router( client.find_router(parsed_args.router, ignore_missing=False), - body={'router': {'routes': parsed_args.routes}}) + body={'router': {'routes': parsed_args.routes}}, + ) display_columns, columns = _get_columns(router_obj) data = utils.get_item_properties( - router_obj, columns, formatters=_formatters) + router_obj, columns, formatters=_formatters + ) return (display_columns, data) @@ -282,55 +471,49 @@ class CreateRouter(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create a new router") def get_parser(self, prog_name): - parser = super(CreateRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_("New router name") + 'name', metavar='', help=_("New router name") ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', default=True, - help=_("Enable router (default)") + help=_("Enable router (default)"), ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable router") + '--disable', action='store_true', help=_("Disable router") ) distribute_group = parser.add_mutually_exclusive_group() distribute_group.add_argument( '--distributed', action='store_true', - help=_("Create a distributed router") + help=_("Create a distributed router"), ) distribute_group.add_argument( '--centralized', action='store_true', - help=_("Create a centralized router") + help=_("Create a centralized router"), ) ha_group = parser.add_mutually_exclusive_group() ha_group.add_argument( '--ha', action='store_true', - help=_("Create a highly available router") + help=_("Create a highly available router"), ) ha_group.add_argument( - '--no-ha', - action='store_true', - help=_("Create a legacy router") + '--no-ha', action='store_true', help=_("Create a legacy router") ) parser.add_argument( '--description', metavar='', - help=_("Set router description") + help=_("Set router description"), ) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( @@ -338,36 +521,47 @@ def get_parser(self, prog_name): metavar='', action='append', dest='availability_zone_hints', - help=_("Availability Zone in which to create this router " - "(Router Availability Zone extension required, " - "repeat option to set multiple availability zones)") + help=_( + "Availability Zone in which to create this router " + "(Router Availability Zone extension required, " + "repeat option to set multiple availability zones)" + ), ) _tag.add_tag_option_to_parser_for_create(parser, _('router')) parser.add_argument( '--external-gateway', metavar="", - help=_("External Network used as router's gateway (name or ID)") + action='append', + help=_( + "External Network used as router's gateway (name or ID) " + "(repeat option to set multiple gateways per router " + "if the L3 service plugin in use supports it)" + ), + dest='external_gateways', ) parser.add_argument( '--fixed-ip', metavar='subnet=,ip-address=', action=parseractions.MultiKeyValueAction, optional_keys=['subnet', 'ip-address'], - help=_("Desired IP and/or subnet (name or ID) " - "on external gateway: " - "subnet=,ip-address= " - "(repeat option to set multiple fixed IP addresses)") + dest='fixed_ips', + help=_( + "Desired IP and/or subnet (name or ID) " + "on external gateway: " + "subnet=,ip-address= " + "(repeat option to set multiple fixed IP addresses)" + ), ) snat_group = parser.add_mutually_exclusive_group() snat_group.add_argument( '--enable-snat', action='store_true', - help=_("Enable Source NAT on external gateway") + help=_("Enable Source NAT on external gateway"), ) snat_group.add_argument( '--disable-snat', action='store_true', - help=_("Disable Source NAT on external gateway") + help=_("Disable Source NAT on external gateway"), ) ndp_proxy_group = parser.add_mutually_exclusive_group() ndp_proxy_group.add_argument( @@ -375,14 +569,30 @@ def get_parser(self, prog_name): dest='enable_ndp_proxy', default=None, action='store_true', - help=_("Enable IPv6 NDP proxy on external gateway") + help=_("Enable IPv6 NDP proxy on external gateway"), ) ndp_proxy_group.add_argument( '--disable-ndp-proxy', dest='enable_ndp_proxy', default=None, action='store_false', - help=_("Disable IPv6 NDP proxy on external gateway") + help=_("Disable IPv6 NDP proxy on external gateway"), + ) + parser.add_argument( + '--flavor', + metavar='', + help=_("Associate the router to a flavor (by name or ID"), + ) + parser.add_argument( + '--flavor-id', + metavar='', + help=argparse.SUPPRESS, + ) + _parser_add_bfd_ecmp_arguments(parser) + parser.add_argument( + '--qos-policy', + metavar='', + help=_('Attach QoS policy to router gateway IPs'), ) return parser @@ -396,24 +606,50 @@ def take_action(self, parsed_args): if parsed_args.no_ha: attrs['ha'] = False attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) - if parsed_args.enable_ndp_proxy and not parsed_args.external_gateway: - msg = (_("You must specify '--external-gateway' in order " - "to enable router's NDP proxy")) + if parsed_args.enable_ndp_proxy and not parsed_args.external_gateways: + msg = _( + "You must specify '--external-gateway' in order " + "to enable router's NDP proxy" + ) + raise exceptions.CommandError(msg) + + if parsed_args.qos_policy and not parsed_args.external_gateways: + msg = _( + "You must specify '--external-gateway' in order " + "to define a QoS policy" + ) raise exceptions.CommandError(msg) if parsed_args.enable_ndp_proxy is not None: attrs['enable_ndp_proxy'] = parsed_args.enable_ndp_proxy + _command_check_bfd_ecmp_supported(attrs, client) + + external_gateways = attrs.pop('external_gateways', None) obj = client.create_router(**attrs) # tags cannot be set when created, so tags need to be set later. _tag.update_tags_for_set(client, obj, parsed_args) - if (parsed_args.disable_snat or parsed_args.enable_snat or - parsed_args.fixed_ip) and not parsed_args.external_gateway: - msg = (_("You must specify '--external-gateway' in order " - "to specify SNAT or fixed-ip values")) + # If the multiple external gateways API is intended to be used, + # do a separate API call to set the desired external gateways as the + # router creation API supports adding only one. + if external_gateways: + client.update_external_gateways( + obj, body={'router': {'external_gateways': external_gateways}} + ) + + if ( + parsed_args.disable_snat + or parsed_args.enable_snat + or parsed_args.fixed_ips + ) and not parsed_args.external_gateways: + msg = _( + "You must specify '--external-gateway' in order " + "to specify SNAT or fixed-ip values" + ) raise exceptions.CommandError(msg) display_columns, columns = _get_columns(obj) @@ -426,12 +662,12 @@ class DeleteRouter(command.Command): _description = _("Delete router(s)") def get_parser(self, prog_name): - parser = super(DeleteRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar="", nargs="+", - help=_("Router(s) to delete (name or ID)") + help=_("Router(s) to delete (name or ID)"), ) return parser @@ -445,14 +681,20 @@ def take_action(self, parsed_args): client.delete_router(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete router with " - "name or ID '%(router)s': %(e)s"), - {'router': router, 'e': e}) + LOG.error( + _( + "Failed to delete router with " + "name or ID '%(router)s': %(e)s" + ), + {'router': router, 'e': e}, + ) if result > 0: total = len(parsed_args.router) - msg = (_("%(result)s of %(total)s routers failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s routers failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -462,39 +704,39 @@ class ListRouter(command.Lister): _description = _("List routers") def get_parser(self, prog_name): - parser = super(ListRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--name', metavar='', - help=_("List routers according to their name") + help=_("List routers according to their name"), ) admin_state_group = parser.add_mutually_exclusive_group() admin_state_group.add_argument( - '--enable', - action='store_true', - help=_("List enabled routers") + '--enable', action='store_true', help=_("List enabled routers") ) admin_state_group.add_argument( - '--disable', - action='store_true', - help=_("List disabled routers") + '--disable', action='store_true', help=_("List disabled routers") ) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) parser.add_argument( '--project', metavar='', - help=_("List routers according to their project (name or ID)") + help=_( + "List only routers with the specified project (name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--agent', metavar='', - help=_("List routers hosted by an agent (ID only)") + help=_( + "List only routers hosted by the specified agent (ID only)" + ), ) _tag.add_tag_filtering_option_to_parser(parser, _('routers')) @@ -504,14 +746,14 @@ def take_action(self, parsed_args): identity_client = self.app.client_manager.identity client = self.app.client_manager.network - columns = ( + columns: tuple[str, ...] = ( 'id', 'name', 'status', 'is_admin_state_up', 'project_id', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'Name', 'Status', @@ -553,39 +795,43 @@ def take_action(self, parsed_args): # check if "HA" and "Distributed" columns should be displayed also data = list(data) for d in data: - if (d.is_distributed is not None and - 'is_distributed' not in columns): - columns = columns + ('is_distributed',) - column_headers = column_headers + ('Distributed',) + if ( + d.is_distributed is not None + and 'is_distributed' not in columns + ): + columns += ('is_distributed',) + column_headers += ('Distributed',) if d.is_ha is not None and 'is_ha' not in columns: - columns = columns + ('is_ha',) - column_headers = column_headers + ('HA',) + columns += ('is_ha',) + column_headers += ('HA',) if parsed_args.long: - columns = columns + ( + columns += ( 'routes', 'external_gateway_info', ) - column_headers = column_headers + ( + column_headers += ( 'Routes', 'External gateway info', ) # availability zone will be available only when # router_availability_zone extension is enabled if client.find_extension("router_availability_zone"): - columns = columns + ( - 'availability_zones', - ) - column_headers = column_headers + ( - 'Availability zones', - ) - columns = columns + ('tags',) - column_headers = column_headers + ('Tags',) - - return (column_headers, - (utils.get_item_properties( - s, columns, + columns += ('availability_zones',) + column_headers += ('Availability zones',) + columns += ('tags',) + column_headers += ('Tags',) + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters=_formatters, - ) for s in data)) + ) + for s in data + ), + ) @staticmethod def _filter_match(data, conditions): @@ -606,51 +852,54 @@ class RemovePortFromRouter(command.Command): _description = _("Remove a port from a router") def get_parser(self, prog_name): - parser = super(RemovePortFromRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_("Router from which port will be removed (name or ID)") + help=_("Router from which port will be removed (name or ID)"), ) parser.add_argument( 'port', metavar='', - help=_("Port to be removed and deleted (name or ID)") + help=_("Port to be removed and deleted (name or ID)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network port = client.find_port(parsed_args.port, ignore_missing=False) - client.remove_interface_from_router(client.find_router( - parsed_args.router, ignore_missing=False), port_id=port.id) + client.remove_interface_from_router( + client.find_router(parsed_args.router, ignore_missing=False), + port_id=port.id, + ) class RemoveSubnetFromRouter(command.Command): _description = _("Remove a subnet from a router") def get_parser(self, prog_name): - parser = super(RemoveSubnetFromRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar='', - help=_("Router from which the subnet will be removed (name or ID)") + help=_( + "Router from which the subnet will be removed (name or ID)" + ), ) parser.add_argument( 'subnet', metavar='', - help=_("Subnet to be removed (name or ID)") + help=_("Subnet to be removed (name or ID)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - subnet = client.find_subnet(parsed_args.subnet, - ignore_missing=False) + subnet = client.find_subnet(parsed_args.subnet, ignore_missing=False) client.remove_interface_from_router( - client.find_router(parsed_args.router, - ignore_missing=False), - subnet_id=subnet.id) + client.find_router(parsed_args.router, ignore_missing=False), + subnet_id=subnet.id, + ) # TODO(yanxing'an): Use the SDK resource mapped attribute names once the @@ -659,44 +908,40 @@ class SetRouter(common.NeutronCommandWithExtraArgs): _description = _("Set router properties") def get_parser(self, prog_name): - parser = super(SetRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar="", - help=_("Router to modify (name or ID)") + help=_("Router to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar='', - help=_("Set router name") + '--name', metavar='', help=_("Set router name") ) parser.add_argument( '--description', metavar='', - help=_('Set router description') + help=_('Set router description'), ) admin_group = parser.add_mutually_exclusive_group() admin_group.add_argument( '--enable', action='store_true', default=None, - help=_("Enable router") + help=_("Enable router"), ) admin_group.add_argument( - '--disable', - action='store_true', - help=_("Disable router") + '--disable', action='store_true', help=_("Disable router") ) distribute_group = parser.add_mutually_exclusive_group() distribute_group.add_argument( '--distributed', action='store_true', - help=_("Set router to distributed mode (disabled router only)") + help=_("Set router to distributed mode (disabled router only)"), ) distribute_group.add_argument( '--centralized', action='store_true', - help=_("Set router to centralized mode (disabled router only)") + help=_("Set router to centralized mode (disabled router only)"), ) parser.add_argument( '--route', @@ -705,60 +950,76 @@ def get_parser(self, prog_name): dest='routes', default=None, required_keys=['destination', 'gateway'], - help=_("Add routes to the router " - "destination: destination subnet (in CIDR notation) " - "gateway: nexthop IP address " - "(repeat option to add multiple routes). " - "This is deprecated in favor of 'router add/remove route' " - "since it is prone to race conditions between concurrent " - "clients when not used together with --no-route to " - "overwrite the current value of 'routes'.") + help=_( + "Add routes to the router. " + "destination: destination subnet (in CIDR notation) " + "gateway: nexthop IP address " + "(repeat option to add multiple routes). " + "This is deprecated in favor of 'router add/remove route' " + "since it is prone to race conditions between concurrent " + "clients when not used together with --no-route to " + "overwrite the current value of 'routes'." + ), ) parser.add_argument( '--no-route', action='store_true', - help=_("Clear routes associated with the router. " - "Specify both --route and --no-route to overwrite " - "current value of routes.") + help=_( + "Clear routes associated with the router. " + "Specify both --route and --no-route to overwrite " + "current value of routes." + ), ) routes_ha = parser.add_mutually_exclusive_group() routes_ha.add_argument( '--ha', action='store_true', - help=_("Set the router as highly available " - "(disabled router only)") + help=_( + "Set the router as highly available (disabled router only)" + ), ) routes_ha.add_argument( '--no-ha', action='store_true', - help=_("Clear high availability attribute of the router " - "(disabled router only)") + help=_( + "Clear high availability attribute of the router " + "(disabled router only)" + ), ) parser.add_argument( '--external-gateway', metavar="", - help=_("External Network used as router's gateway (name or ID)") + action='append', + help=_( + "External Network used as router's gateway (name or ID) " + "(repeat option to set multiple gateways per router " + "if the L3 service plugin in use supports it)." + ), + dest='external_gateways', ) parser.add_argument( '--fixed-ip', metavar='subnet=,ip-address=', action=parseractions.MultiKeyValueAction, optional_keys=['subnet', 'ip-address'], - help=_("Desired IP and/or subnet (name or ID) " - "on external gateway: " - "subnet=,ip-address= " - "(repeat option to set multiple fixed IP addresses)") + dest='fixed_ips', + help=_( + "Desired IP and/or subnet (name or ID) " + "on external gateway: " + "subnet=,ip-address= " + "(repeat option to set multiple fixed IP addresses)" + ), ) snat_group = parser.add_mutually_exclusive_group() snat_group.add_argument( '--enable-snat', action='store_true', - help=_("Enable Source NAT on external gateway") + help=_("Enable Source NAT on external gateway"), ) snat_group.add_argument( '--disable-snat', action='store_true', - help=_("Disable Source NAT on external gateway") + help=_("Disable Source NAT on external gateway"), ) ndp_proxy_group = parser.add_mutually_exclusive_group() ndp_proxy_group.add_argument( @@ -766,27 +1027,28 @@ def get_parser(self, prog_name): dest='enable_ndp_proxy', default=None, action='store_true', - help=_("Enable IPv6 NDP proxy on external gateway") + help=_("Enable IPv6 NDP proxy on external gateway"), ) ndp_proxy_group.add_argument( '--disable-ndp-proxy', dest='enable_ndp_proxy', default=None, action='store_false', - help=_("Disable IPv6 NDP proxy on external gateway") + help=_("Disable IPv6 NDP proxy on external gateway"), ) qos_policy_group = parser.add_mutually_exclusive_group() qos_policy_group.add_argument( '--qos-policy', metavar='', - help=_("Attach QoS policy to router gateway IPs") + help=_("Attach QoS policy to router gateway IPs"), ) qos_policy_group.add_argument( '--no-qos-policy', action='store_true', - help=_("Remove QoS policy from router gateway IPs") + help=_("Remove QoS policy from router gateway IPs"), ) _tag.add_tag_option_to_parser_for_set(parser, _('router')) + _parser_add_bfd_ecmp_arguments(parser) return parser def take_action(self, parsed_args): @@ -812,41 +1074,66 @@ def take_action(self, parsed_args): attrs['routes'] += obj.routes elif parsed_args.no_route: attrs['routes'] = [] - if (parsed_args.disable_snat or parsed_args.enable_snat or - parsed_args.fixed_ip) and not parsed_args.external_gateway: - msg = (_("You must specify '--external-gateway' in order " - "to update the SNAT or fixed-ip values")) + if ( + parsed_args.disable_snat + or parsed_args.enable_snat + or parsed_args.fixed_ips + ) and not parsed_args.external_gateways: + msg = _( + "You must specify '--external-gateway' in order " + "to update the SNAT or fixed-ip values" + ) raise exceptions.CommandError(msg) - if ((parsed_args.qos_policy or parsed_args.no_qos_policy) and - not parsed_args.external_gateway): + if ( + parsed_args.qos_policy or parsed_args.no_qos_policy + ) and not parsed_args.external_gateways: try: original_net_id = obj.external_gateway_info['network_id'] except (KeyError, TypeError): - msg = (_("You must specify '--external-gateway' or the router " - "must already have an external network in order to " - "set router gateway IP QoS")) + msg = _( + "You must specify '--external-gateway' or the router " + "must already have an external network in order to " + "set router gateway IP QoS" + ) raise exceptions.CommandError(msg) else: - if not attrs.get('external_gateway_info'): + if not attrs.get('external_gateway_info') and not attrs.get( + 'external_gateways' + ): attrs['external_gateway_info'] = {} attrs['external_gateway_info']['network_id'] = original_net_id if parsed_args.qos_policy: check_qos_id = client.find_qos_policy( - parsed_args.qos_policy, ignore_missing=False).id - attrs['external_gateway_info']['qos_policy_id'] = check_qos_id + parsed_args.qos_policy, ignore_missing=False + ).id + if not attrs.get('external_gateways'): + attrs['external_gateway_info']['qos_policy_id'] = check_qos_id if 'no_qos_policy' in parsed_args and parsed_args.no_qos_policy: - attrs['external_gateway_info']['qos_policy_id'] = None + if not attrs.get('external_gateways'): + attrs['external_gateway_info']['qos_policy_id'] = None attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if parsed_args.enable_ndp_proxy is not None: attrs['enable_ndp_proxy'] = parsed_args.enable_ndp_proxy + _command_check_bfd_ecmp_supported(attrs, client) + if attrs: + external_gateways = attrs.pop('external_gateways', None) client.update_router(obj, **attrs) + # If the multiple external gateways API is intended to be used, + # do a separate API call to set external gateways. + if external_gateways: + client.update_external_gateways( + obj, + body={'router': {'external_gateways': external_gateways}}, + ) + # tags is a subresource and it needs to be updated separately. _tag.update_tags_for_set(client, obj, parsed_args) @@ -855,11 +1142,11 @@ class ShowRouter(command.ShowOne): _description = _("Display router details") def get_parser(self, prog_name): - parser = super(ShowRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'router', metavar="", - help=_("Router to display (name or ID)") + help=_("Router to display (name or ID)"), ) return parser @@ -875,7 +1162,7 @@ def take_action(self, parsed_args): int_info = { 'port_id': port.id, 'ip_address': ip_spec.get('ip_address'), - 'subnet_id': ip_spec.get('subnet_id') + 'subnet_id': ip_spec.get('subnet_id'), } interfaces_info.append(int_info) @@ -891,7 +1178,7 @@ class UnsetRouter(common.NeutronUnsetCommandWithExtraArgs): _description = _("Unset router properties") def get_parser(self, prog_name): - parser = super(UnsetRouter, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--route', metavar='destination=,gateway=', @@ -899,25 +1186,33 @@ def get_parser(self, prog_name): dest='routes', default=None, required_keys=['destination', 'gateway'], - help=_("Routes to be removed from the router " - "destination: destination subnet (in CIDR notation) " - "gateway: nexthop IP address " - "(repeat option to unset multiple routes)")) + help=_( + "Routes to be removed from the router. " + "destination: destination subnet (in CIDR notation) " + "gateway: nexthop IP address " + "(repeat option to unset multiple routes)" + ), + ) + # NOTE(dmitriis): This was not extended to support selective removal + # of external gateways due to a cpython bug in argparse: + # https://github.com/python/cpython/issues/53584 parser.add_argument( '--external-gateway', action='store_true', default=False, - help=_("Remove external gateway information from the router")) + help=_("Remove external gateway information from the router"), + dest='external_gateways', + ) parser.add_argument( '--qos-policy', action='store_true', default=False, - help=_("Remove QoS policy from router gateway IPs") + help=_("Remove QoS policy from router gateway IPs"), ) parser.add_argument( 'router', metavar="", - help=_("Router to modify (name or ID)") + help=_("Router to modify (name or ID)"), ) _tag.add_tag_option_to_parser_for_unset(parser, _('router')) return parser @@ -934,30 +1229,174 @@ def take_action(self, parsed_args): route['nexthop'] = route.pop('gateway') tmp_routes.remove(route) except ValueError: - msg = (_("Router does not contain route %s") % route) + msg = _("Router does not contain route %s") % route raise exceptions.CommandError(msg) attrs['routes'] = tmp_routes if parsed_args.qos_policy: try: - if (tmp_external_gateway_info['network_id'] and - tmp_external_gateway_info['qos_policy_id']): + if ( + tmp_external_gateway_info['network_id'] + and tmp_external_gateway_info['qos_policy_id'] + ): pass except (KeyError, TypeError): - msg = _("Router does not have external network or qos policy") + msg = _("Router does not have external network or QoS policy") raise exceptions.CommandError(msg) else: attrs['external_gateway_info'] = { 'network_id': tmp_external_gateway_info['network_id'], - 'qos_policy_id': None + 'qos_policy_id': None, } - if parsed_args.external_gateway: + if parsed_args.external_gateways: attrs['external_gateway_info'] = {} attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: + # If removing multiple gateways per router are supported, + # use the relevant API to remove them all. + if is_multiple_gateways_supported(client): + client.remove_external_gateways( + obj, + body={'router': {'external_gateways': {}}}, + ) + client.update_router(obj, **attrs) # tags is a subresource and it needs to be updated separately. _tag.update_tags_for_unset(client, obj, parsed_args) + + +class AddGatewayToRouter(command.ShowOne): + _description = _("Add router gateway") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'router', + metavar="", + help=_("Router to modify (name or ID)"), + ) + parser.add_argument( + metavar="", + help=_( + "External Network to a attach a router gateway to (name or ID)" + ), + dest='external_gateways', + # The argument is stored in a list in order to reuse the + # common attribute parsing code. + nargs=1, + ) + parser.add_argument( + '--fixed-ip', + metavar='subnet=,ip-address=', + action=parseractions.MultiKeyValueAction, + optional_keys=['subnet', 'ip-address'], + dest='fixed_ips', + help=_( + "Desired IP and/or subnet (name or ID) " + "on external gateway: " + "subnet=,ip-address= " + "(repeat option to set multiple fixed IP addresses)" + ), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + if not is_multiple_gateways_supported(client): + msg = _( + 'The external-gateway-multihoming extension is not enabled at ' + 'the Neutron side.' + ) + raise exceptions.CommandError(msg) + + router_obj = client.find_router( + parsed_args.router, ignore_missing=False + ) + + # Get the common attributes. + attrs = _get_external_gateway_attrs( + self.app.client_manager, parsed_args + ) + + if attrs: + external_gateways = attrs.pop('external_gateways') + router_obj = client.add_external_gateways( + router_obj, + body={'router': {'external_gateways': external_gateways}}, + ) + + display_columns, columns = _get_columns(router_obj) + data = utils.get_item_properties( + router_obj, columns, formatters=_formatters + ) + return (display_columns, data) + + +class RemoveGatewayFromRouter(command.ShowOne): + _description = _("Remove router gateway") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'router', + metavar="", + help=_("Router to modify (name or ID)."), + ) + parser.add_argument( + metavar="", + help=_( + "External Network to remove a router gateway from (name or ID)" + ), + dest='external_gateways', + # The argument is stored in a list in order to reuse the + # common attribute parsing code. + nargs=1, + ) + parser.add_argument( + '--fixed-ip', + metavar='subnet=,ip-address=', + action=parseractions.MultiKeyValueAction, + optional_keys=['subnet', 'ip-address'], + dest='fixed_ips', + help=_( + "IP and/or subnet (name or ID) on the external gateway " + "which is used to identify a particular gateway if multiple " + "are attached to the same network: subnet=," + "ip-address=" + ), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + if not is_multiple_gateways_supported(client): + msg = _( + 'The external-gateway-multihoming extension is not enabled at ' + 'the Neutron side.' + ) + raise exceptions.CommandError(msg) + + router_obj = client.find_router( + parsed_args.router, ignore_missing=False + ) + + # Get the common attributes. + attrs = _get_external_gateway_attrs( + self.app.client_manager, parsed_args + ) + if attrs: + external_gateways = attrs.pop('external_gateways') + router_obj = client.remove_external_gateways( + router_obj, + body={'router': {'external_gateways': external_gateways}}, + ) + + display_columns, columns = _get_columns(router_obj) + data = utils.get_item_properties( + router_obj, columns, formatters=_formatters + ) + return (display_columns, data) diff --git a/openstackclient/network/v2/security_group.py b/openstackclient/network/v2/security_group.py index ffad99882c..c6930de78d 100644 --- a/openstackclient/network/v2/security_group.py +++ b/openstackclient/network/v2/security_group.py @@ -14,12 +14,14 @@ """Security Group action implementations""" import argparse +import typing as ty from cliff import columns as cliff_columns -from osc_lib.command import command from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient.api import compute_v2 +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -64,12 +66,12 @@ def _format_compute_security_group_rules(sg_rules): return utils.format_list(rules, separator='\n') -class NetworkSecurityGroupRulesColumn(cliff_columns.FormattableColumn): +class NetworkSecurityGroupRulesColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): return _format_network_security_group_rules(self._value) -class ComputeSecurityGroupRulesColumn(cliff_columns.FormattableColumn): +class ComputeSecurityGroupRulesColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): return _format_compute_security_group_rules(self._value) @@ -88,32 +90,28 @@ def _get_columns(item): # We still support Nova managed security groups, where we have tenant_id. column_map = { 'security_group_rules': 'rules', - 'tenant_id': 'project_id', } - hidden_columns = ['location'] + hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) # TODO(abhiraut): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. -class CreateSecurityGroup(common.NetworkAndComputeShowOne, - common.NeutronCommandWithExtraArgs): +class CreateSecurityGroup( + common.NetworkAndComputeShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create a new security group") def update_parser_common(self, parser): parser.add_argument( - "name", - metavar="", - help=_("New security group name") + "name", metavar="", help=_("New security group name") ) parser.add_argument( "--description", metavar="", - help=_("Security group description") + help=_("Security group description"), ) return parser @@ -121,26 +119,27 @@ def update_parser_network(self, parser): parser.add_argument( '--project', metavar='', - help=self.enhance_help_neutron(_("Owner's project (name or ID)")) + help=self.enhance_help_neutron(_("Owner's project (name or ID)")), ) stateful_group = parser.add_mutually_exclusive_group() stateful_group.add_argument( "--stateful", action='store_true', default=None, - help=_("Security group is stateful (Default)") + help=_("Security group is stateful (default)"), ) stateful_group.add_argument( "--stateless", action='store_true', default=None, - help=_("Security group is stateless") + help=_("Security group is stateless"), ) identity_common.add_project_domain_option_to_parser( - parser, enhance_help=self.enhance_help_neutron) + parser, enhance_help=self.enhance_help_neutron + ) _tag.add_tag_option_to_parser_for_create( - parser, _('security group'), - enhance_help=self.enhance_help_neutron) + parser, _('security group'), enhance_help=self.enhance_help_neutron + ) return parser def _get_description(self, parsed_args): @@ -167,7 +166,8 @@ def take_action_network(self, client, parsed_args): ).id attrs['project_id'] = project_id attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) # Create the security group and display the results. obj = client.create_security_group(**attrs) @@ -175,23 +175,21 @@ def take_action_network(self, client, parsed_args): _tag.update_tags_for_set(client, obj, parsed_args) display_columns, property_columns = _get_columns(obj) data = utils.get_item_properties( - obj, - property_columns, - formatters=_formatters_network + obj, property_columns, formatters=_formatters_network ) return (display_columns, data) def take_action_compute(self, client, parsed_args): description = self._get_description(parsed_args) - obj = client.api.security_group_create( + obj = compute_v2.create_security_group( + client, parsed_args.name, description, ) - display_columns, property_columns = _get_columns(obj) + display_columns = ('description', 'id', 'name', 'project_id', 'rules') + property_columns = ('description', 'id', 'name', 'tenant_id', 'rules') data = utils.get_dict_properties( - obj, - property_columns, - formatters=_formatters_compute + obj, property_columns, formatters=_formatters_compute ) return (display_columns, data) @@ -217,14 +215,22 @@ def take_action_network(self, client, parsed_args): client.delete_security_group(obj) def take_action_compute(self, client, parsed_args): - client.api.security_group_delete(self.r) + security_group = compute_v2.find_security_group(client, self.r) + compute_v2.delete_security_group(client, security_group['id']) # TODO(rauta): Use the SDK resource mapped attribute names once # the OSC minimum requirements include SDK 1.0. class ListSecurityGroup(common.NetworkAndComputeLister): _description = _("List security groups") - FIELDS_TO_RETRIEVE = ['id', 'name', 'description', 'project_id', 'tags'] + FIELDS_TO_RETRIEVE = [ + 'id', + 'name', + 'description', + 'project_id', + 'tags', + 'shared', + ] def update_parser_network(self, parser): if not self.is_docs_build: @@ -241,14 +247,35 @@ def update_parser_network(self, parser): '--project', metavar='', help=self.enhance_help_neutron( - _("List security groups according to the project (name or " - "ID)")) + _( + "List only security groups with the specified project " + "(name or ID)" + ) + ), ) identity_common.add_project_domain_option_to_parser( - parser, enhance_help=self.enhance_help_neutron) + parser, enhance_help=self.enhance_help_neutron + ) + + shared_group = parser.add_mutually_exclusive_group() + shared_group.add_argument( + '--share', + action='store_true', + dest='shared', + default=None, + help=_("List only security groups shared between projects"), + ) + shared_group.add_argument( + '--no-share', + action='store_false', + dest='shared', + default=None, + help=_("List only security groups not shared between projects"), + ) + _tag.add_tag_filtering_option_to_parser( - parser, _('security group'), - enhance_help=self.enhance_help_neutron) + parser, _('security group'), enhance_help=self.enhance_help_neutron + ) return parser def update_parser_compute(self, parser): @@ -257,7 +284,8 @@ def update_parser_compute(self, parser): action='store_true', default=False, help=self.enhance_help_nova_network( - _("Display information from all projects (admin only)")) + _("Display information from all projects (admin only)") + ), ) return parser @@ -272,95 +300,109 @@ def take_action_network(self, client, parsed_args): ).id filters['project_id'] = project_id + if parsed_args.shared is not None: + filters['shared'] = parsed_args.shared + _tag.get_tag_filtering_args(parsed_args, filters) - data = client.security_groups(fields=self.FIELDS_TO_RETRIEVE, - **filters) + data = client.security_groups( + fields=self.FIELDS_TO_RETRIEVE, **filters + ) columns = ( - "ID", - "Name", - "Description", - "Project ID", - "tags" + "id", + "name", + "description", + "project_id", + "tags", + "is_shared", ) column_headers = ( "ID", "Name", "Description", "Project", - "Tags" + "Tags", + "Shared", + ) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), ) - return (column_headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) def take_action_compute(self, client, parsed_args): - search = {'all_tenants': parsed_args.all_projects} - data = client.api.security_group_list( + data = compute_v2.list_security_groups( # TODO(dtroyer): add limit, marker - search_opts=search, + client, + all_projects=parsed_args.all_projects, ) - columns = ( - "ID", - "Name", - "Description", - ) - column_headers = columns + columns: tuple[str, ...] = ("id", "name", "description") + column_headers: tuple[str, ...] = ("ID", "Name", "Description") if parsed_args.all_projects: - columns = columns + ('Tenant ID',) - column_headers = column_headers + ('Project',) - return (column_headers, - (utils.get_dict_properties( - s, columns, - ) for s in data)) + columns += ('tenant_id',) + column_headers += ('Project',) + return ( + column_headers, + ( + utils.get_dict_properties( + s, + columns, + ) + for s in data + ), + ) -class SetSecurityGroup(common.NetworkAndComputeCommand, - common.NeutronCommandWithExtraArgs): +class SetSecurityGroup( + common.NetworkAndComputeCommand, common.NeutronCommandWithExtraArgs +): _description = _("Set security group properties") def update_parser_common(self, parser): parser.add_argument( 'group', metavar='', - help=_("Security group to modify (name or ID)") + help=_("Security group to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar='', - help=_("New security group name") + '--name', metavar='', help=_("New security group name") ) parser.add_argument( "--description", metavar="", - help=_("New security group description") + help=_("New security group description"), ) stateful_group = parser.add_mutually_exclusive_group() stateful_group.add_argument( "--stateful", action='store_true', default=None, - help=_("Security group is stateful (Default)") + help=_("Security group is stateful (default)"), ) stateful_group.add_argument( "--stateless", action='store_true', default=None, - help=_("Security group is stateless") + help=_("Security group is stateless"), ) return parser def update_parser_network(self, parser): _tag.add_tag_option_to_parser_for_set( - parser, _('security group'), - enhance_help=self.enhance_help_neutron) + parser, _('security group'), enhance_help=self.enhance_help_neutron + ) return parser def take_action_network(self, client, parsed_args): - obj = client.find_security_group(parsed_args.group, - ignore_missing=False) + obj = client.find_security_group( + parsed_args.group, ignore_missing=False + ) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name @@ -371,7 +413,8 @@ def take_action_network(self, client, parsed_args): if parsed_args.stateless: attrs['stateful'] = False attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) # NOTE(rtheis): Previous behavior did not raise a CommandError # if there were no updates. Maintain this behavior and issue # the update. @@ -381,20 +424,21 @@ def take_action_network(self, client, parsed_args): _tag.update_tags_for_set(client, obj, parsed_args) def take_action_compute(self, client, parsed_args): - data = client.api.security_group_find(parsed_args.group) + security_group = compute_v2.find_security_group( + client, parsed_args.group + ) + params = {} if parsed_args.name is not None: - data['name'] = parsed_args.name + params['name'] = parsed_args.name if parsed_args.description is not None: - data['description'] = parsed_args.description + params['description'] = parsed_args.description # NOTE(rtheis): Previous behavior did not raise a CommandError # if there were no updates. Maintain this behavior and issue # the update. - client.api.security_group_set( - data, - data['name'], - data['description'], + compute_v2.update_security_group( + client, security_group['id'], **params ) @@ -405,28 +449,26 @@ def update_parser_common(self, parser): parser.add_argument( 'group', metavar='', - help=_("Security group to display (name or ID)") + help=_("Security group to display (name or ID)"), ) return parser def take_action_network(self, client, parsed_args): - obj = client.find_security_group(parsed_args.group, - ignore_missing=False) + obj = client.find_security_group( + parsed_args.group, ignore_missing=False + ) display_columns, property_columns = _get_columns(obj) data = utils.get_item_properties( - obj, - property_columns, - formatters=_formatters_network + obj, property_columns, formatters=_formatters_network ) return (display_columns, data) def take_action_compute(self, client, parsed_args): - obj = client.api.security_group_find(parsed_args.group) - display_columns, property_columns = _get_columns(obj) + obj = compute_v2.find_security_group(client, parsed_args.group) + display_columns = ('description', 'id', 'name', 'project_id', 'rules') + property_columns = ('description', 'id', 'name', 'tenant_id', 'rules') data = utils.get_dict_properties( - obj, - property_columns, - formatters=_formatters_compute + obj, property_columns, formatters=_formatters_compute ) return (display_columns, data) @@ -435,19 +477,20 @@ class UnsetSecurityGroup(command.Command): _description = _("Unset security group properties") def get_parser(self, prog_name): - parser = super(UnsetSecurityGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'group', metavar="", - help=_("Security group to modify (name or ID)") + help=_("Security group to modify (name or ID)"), ) _tag.add_tag_option_to_parser_for_unset(parser, _('security group')) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_security_group(parsed_args.group, - ignore_missing=False) + obj = client.find_security_group( + parsed_args.group, ignore_missing=False + ) # tags is a subresource and it needs to be updated separately. _tag.update_tags_for_unset(client, obj, parsed_args) diff --git a/openstackclient/network/v2/security_group_rule.py b/openstackclient/network/v2/security_group_rule.py index 2179f33d8e..f6baac3cb7 100644 --- a/openstackclient/network/v2/security_group_rule.py +++ b/openstackclient/network/v2/security_group_rule.py @@ -20,6 +20,7 @@ from osc_lib import exceptions from osc_lib import utils +from openstackclient.api import compute_v2 from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -28,100 +29,35 @@ LOG = logging.getLogger(__name__) -def _format_security_group_rule_show(obj): - data = network_utils.transform_compute_security_group_rule(obj) - return zip(*sorted(data.items())) - - -def _format_network_port_range(rule): - # Display port range or ICMP type and code. For example: - # - ICMP type: 'type=3' - # - ICMP type and code: 'type=3:code=0' - # - ICMP code: Not supported - # - Matching port range: '443:443' - # - Different port range: '22:24' - # - Single port: '80:80' - # - No port range: '' - port_range = '' - if _is_icmp_protocol(rule['protocol']): - if rule['port_range_min']: - port_range += 'type=' + str(rule['port_range_min']) - if rule['port_range_max']: - port_range += ':code=' + str(rule['port_range_max']) - elif rule['port_range_min'] or rule['port_range_max']: - port_range_min = str(rule['port_range_min']) - port_range_max = str(rule['port_range_max']) - if rule['port_range_min'] is None: - port_range_min = port_range_max - if rule['port_range_max'] is None: - port_range_max = port_range_min - port_range = port_range_min + ':' + port_range_max - return port_range - - -def _format_remote_ip_prefix(rule): - remote_ip_prefix = rule['remote_ip_prefix'] - if remote_ip_prefix is None: - ethertype = rule['ether_type'] - if ethertype == 'IPv4': - remote_ip_prefix = '0.0.0.0/0' - elif ethertype == 'IPv6': - remote_ip_prefix = '::/0' - return remote_ip_prefix - - def _get_columns(item): - column_map = {} - hidden_columns = ['location', 'tenant_id'] + hidden_columns = ['location', 'name', 'tenant_id', 'tags'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, {}, hidden_columns ) -def _convert_to_lowercase(string): - return string.lower() - - -def _convert_ipvx_case(string): - if string.lower() == 'ipv4': - return 'IPv4' - if string.lower() == 'ipv6': - return 'IPv6' - return string - - -def _is_icmp_protocol(protocol): - # NOTE(rtheis): Neutron has deprecated protocol icmpv6. - # However, while the OSC CLI doesn't document the protocol, - # the code must still handle it. In addition, handle both - # protocol names and numbers. - if protocol in ['icmp', 'icmpv6', 'ipv6-icmp', '1', '58']: - return True - else: - return False - - # TODO(abhiraut): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. -class CreateSecurityGroupRule(common.NetworkAndComputeShowOne, - common.NeutronCommandWithExtraArgs): +class CreateSecurityGroupRule( + common.NetworkAndComputeShowOne, common.NeutronCommandWithExtraArgs +): _description = _("Create a new security group rule") def update_parser_common(self, parser): parser.add_argument( 'group', metavar='', - help=_("Create rule in this security group (name or ID)") + help=_("Create rule in this security group (name or ID)"), ) remote_group = parser.add_mutually_exclusive_group() remote_group.add_argument( "--remote-ip", metavar="", - help=_("Remote IP address block (may use CIDR notation; " - "default for IPv4 rule: 0.0.0.0/0, " - "default for IPv6 rule: ::/0)"), + help=_( + "Remote IP address block (may use CIDR notation; " + "default for IPv4 rule: 0.0.0.0/0, " + "default for IPv6 rule: ::/0)" + ), ) remote_group.add_argument( "--remote-group", @@ -150,10 +86,12 @@ def update_parser_common(self, parser): '--dst-port', metavar='', action=parseractions.RangeAction, - help=_("Destination port, may be a single port or a starting and " - "ending port range: 137:139. Required for IP protocols TCP " - "and UDP. Ignored for ICMP IP protocols."), - **dst_port_default + help=_( + "Destination port, may be a single port or a starting and " + "ending port range: 137:139. Required for IP protocols TCP " + "and UDP. Ignored for ICMP IP protocols." + ), + **dst_port_default, ) # NOTE(rtheis): Support either protocol option name for now. @@ -170,7 +108,8 @@ def update_parser_common(self, parser): "IP protocol (ah, dccp, egp, esp, gre, icmp, igmp, ipv6-encap, " "ipv6-frag, ipv6-icmp, ipv6-nonxt, ipv6-opts, ipv6-route, ospf, " "pgm, rsvp, sctp, tcp, udp, udplite, vrrp and integer " - "representations [0-255] or any; default: any (all protocols))") + "representations [0-255] or any; default: any (all protocols))" + ) if self.is_nova_network: protocol_help = protocol_help_compute elif self.is_neutron: @@ -178,22 +117,23 @@ def update_parser_common(self, parser): else: # Docs build: compose help for both nova-network and neutron protocol_help = self.split_help( - protocol_help_network, protocol_help_compute) + protocol_help_network, protocol_help_compute + ) protocol_group.add_argument( '--protocol', metavar='', - type=_convert_to_lowercase, + type=network_utils.convert_to_lowercase, help=protocol_help, - **proto_choices + **proto_choices, ) if not self.is_docs_build: protocol_group.add_argument( '--proto', metavar='', - type=_convert_to_lowercase, + type=network_utils.convert_to_lowercase, help=argparse.SUPPRESS, - **proto_choices + **proto_choices, ) return parser @@ -203,92 +143,71 @@ def update_parser_network(self, parser): '--description', metavar='', help=self.enhance_help_neutron( - _("Set security group rule description")) + _("Set security group rule description") + ), ) parser.add_argument( '--icmp-type', metavar='', type=int, help=self.enhance_help_neutron( - _("ICMP type for ICMP IP protocols")) + _("ICMP type for ICMP IP protocols") + ), ) parser.add_argument( '--icmp-code', metavar='', type=int, help=self.enhance_help_neutron( - _("ICMP code for ICMP IP protocols")) + _("ICMP code for ICMP IP protocols") + ), ) direction_group = parser.add_mutually_exclusive_group() direction_group.add_argument( '--ingress', action='store_true', help=self.enhance_help_neutron( - _("Rule applies to incoming network traffic (default)")) + _("Rule applies to incoming network traffic (default)") + ), ) direction_group.add_argument( '--egress', action='store_true', help=self.enhance_help_neutron( - _("Rule applies to outgoing network traffic")) + _("Rule applies to outgoing network traffic") + ), ) parser.add_argument( '--ethertype', metavar='', choices=['IPv4', 'IPv6'], - type=_convert_ipvx_case, + type=network_utils.convert_ipvx_case, help=self.enhance_help_neutron( - _("Ethertype of network traffic " - "(IPv4, IPv6; default: based on IP protocol)")) + _( + "Ethertype of network traffic " + "(IPv4, IPv6; default: based on IP protocol)" + ) + ), ) parser.add_argument( '--project', metavar='', - help=self.enhance_help_neutron(_("Owner's project (name or ID)")) + help=self.enhance_help_neutron(_("Owner's project (name or ID)")), ) identity_common.add_project_domain_option_to_parser( - parser, enhance_help=self.enhance_help_neutron) + parser, enhance_help=self.enhance_help_neutron + ) return parser - def _get_protocol(self, parsed_args, default_protocol='any'): - protocol = default_protocol - if parsed_args.protocol is not None: - protocol = parsed_args.protocol - if parsed_args.proto is not None: - protocol = parsed_args.proto - if protocol == 'any': - protocol = None - return protocol - - def _get_ethertype(self, parsed_args, protocol): - ethertype = 'IPv4' - if parsed_args.ethertype is not None: - ethertype = parsed_args.ethertype - elif self._is_ipv6_protocol(protocol): - ethertype = 'IPv6' - return ethertype - - def _is_ipv6_protocol(self, protocol): - # NOTE(rtheis): Neutron has deprecated protocol icmpv6. - # However, while the OSC CLI doesn't document the protocol, - # the code must still handle it. In addition, handle both - # protocol names and numbers. - if (protocol is not None and protocol.startswith('ipv6-') or - protocol in ['icmpv6', '41', '43', '44', '58', '59', '60']): - return True - else: - return False - def take_action_network(self, client, parsed_args): # Get the security group ID to hold the rule. security_group_id = client.find_security_group( - parsed_args.group, - ignore_missing=False + parsed_args.group, ignore_missing=False ).id # Build the create attributes. attrs = {} - attrs['protocol'] = self._get_protocol(parsed_args) + attrs['protocol'] = network_utils.get_protocol(parsed_args) if parsed_args.description is not None: attrs['description'] = parsed_args.description @@ -302,24 +221,31 @@ def take_action_network(self, client, parsed_args): # NOTE(rtheis): Use ethertype specified else default based # on IP protocol. - attrs['ethertype'] = self._get_ethertype(parsed_args, - attrs['protocol']) + attrs['ethertype'] = network_utils.get_ethertype( + parsed_args, attrs['protocol'] + ) # NOTE(rtheis): Validate the port range and ICMP type and code. # It would be ideal if argparse could do this. - if parsed_args.dst_port and (parsed_args.icmp_type or - parsed_args.icmp_code): - msg = _('Argument --dst-port not allowed with arguments ' - '--icmp-type and --icmp-code') + if parsed_args.dst_port and ( + parsed_args.icmp_type or parsed_args.icmp_code + ): + msg = _( + 'Argument --dst-port not allowed with arguments ' + '--icmp-type and --icmp-code' + ) raise exceptions.CommandError(msg) if parsed_args.icmp_type is None and parsed_args.icmp_code is not None: msg = _('Argument --icmp-type required with argument --icmp-code') raise exceptions.CommandError(msg) - is_icmp_protocol = _is_icmp_protocol(attrs['protocol']) - if not is_icmp_protocol and (parsed_args.icmp_type or - parsed_args.icmp_code): - msg = _('ICMP IP protocol required with arguments ' - '--icmp-type and --icmp-code') + is_icmp_protocol = network_utils.is_icmp_protocol(attrs['protocol']) + if not is_icmp_protocol and ( + parsed_args.icmp_type or parsed_args.icmp_code + ): + msg = _( + 'ICMP IP protocol required with arguments ' + '--icmp-type and --icmp-code' + ) raise exceptions.CommandError(msg) # NOTE(rtheis): For backwards compatibility, continue ignoring # the destination port range when an ICMP IP protocol is specified. @@ -333,13 +259,11 @@ def take_action_network(self, client, parsed_args): if parsed_args.remote_group is not None: attrs['remote_group_id'] = client.find_security_group( - parsed_args.remote_group, - ignore_missing=False + parsed_args.remote_group, ignore_missing=False ).id elif parsed_args.remote_address_group is not None: attrs['remote_address_group_id'] = client.find_address_group( - parsed_args.remote_address_group, - ignore_missing=False + parsed_args.remote_address_group, ignore_missing=False ).id elif parsed_args.remote_ip is not None: attrs['remote_ip_prefix'] = parsed_args.remote_ip @@ -358,7 +282,8 @@ def take_action_network(self, client, parsed_args): attrs['project_id'] = project_id attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) # Create and show the security group rule. obj = client.create_security_group_rule(**attrs) @@ -367,8 +292,10 @@ def take_action_network(self, client, parsed_args): return (display_columns, data) def take_action_compute(self, client, parsed_args): - group = client.api.security_group_find(parsed_args.group) - protocol = self._get_protocol(parsed_args, default_protocol='tcp') + group = compute_v2.find_security_group(client, parsed_args.group) + protocol = network_utils.get_protocol( + parsed_args, default_protocol='tcp' + ) if protocol == 'icmp': from_port, to_port = -1, -1 else: @@ -376,15 +303,16 @@ def take_action_compute(self, client, parsed_args): remote_ip = None if parsed_args.remote_group is not None: - parsed_args.remote_group = client.api.security_group_find( - parsed_args.remote_group, + parsed_args.remote_group = compute_v2.find_security_group( + client, parsed_args.remote_group )['id'] if parsed_args.remote_ip is not None: remote_ip = parsed_args.remote_ip else: remote_ip = '0.0.0.0/0' - obj = client.api.security_group_rule_create( + obj = compute_v2.create_security_group_rule( + client, security_group_id=group['id'], ip_protocol=protocol, from_port=from_port, @@ -392,7 +320,7 @@ def take_action_compute(self, client, parsed_args): remote_ip=remote_ip, remote_group=parsed_args.remote_group, ) - return _format_security_group_rule_show(obj) + return network_utils.format_security_group_rule_show(obj) class DeleteSecurityGroupRule(common.NetworkAndComputeDelete): @@ -407,17 +335,16 @@ def update_parser_common(self, parser): 'rule', metavar='', nargs="+", - help=_("Security group rule(s) to delete (ID only)") + help=_("Security group rule(s) to delete (ID only)"), ) return parser def take_action_network(self, client, parsed_args): - obj = client.find_security_group_rule( - self.r, ignore_missing=False) + obj = client.find_security_group_rule(self.r, ignore_missing=False) client.delete_security_group_rule(obj) def take_action_compute(self, client, parsed_args): - client.api.security_group_rule_delete(self.r) + compute_v2.delete_security_group_rule(client, self.r) class ListSecurityGroupRule(common.NetworkAndComputeLister): @@ -430,8 +357,8 @@ def _format_network_security_group_rule(self, rule): Create port_range column from port_range_min and port_range_max """ rule = rule.to_dict() - rule['port_range'] = _format_network_port_range(rule) - rule['remote_ip_prefix'] = _format_remote_ip_prefix(rule) + rule['port_range'] = network_utils.format_network_port_range(rule) + rule['remote_ip_prefix'] = network_utils.format_remote_ip_prefix(rule) return rule def update_parser_common(self, parser): @@ -439,7 +366,7 @@ def update_parser_common(self, parser): 'group', metavar='', nargs='?', - help=_("List all rules in this security group (name or ID)") + help=_("List all rules in this security group (name or ID)"), ) return parser @@ -451,46 +378,68 @@ def update_parser_network(self, parser): '--all-projects', action='store_true', default=False, - help=argparse.SUPPRESS + help=argparse.SUPPRESS, ) parser.add_argument( '--protocol', metavar='', - type=_convert_to_lowercase, + type=network_utils.convert_to_lowercase, help=self.enhance_help_neutron( - _("List rules by the IP protocol (ah, dhcp, egp, esp, gre, " - "icmp, igmp, ipv6-encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, " - "ipv6-opts, ipv6-route, ospf, pgm, rsvp, sctp, tcp, udp, " - "udplite, vrrp and integer representations [0-255] or any; " - "default: any (all protocols))")) + _( + "List only rules with the specified IP protocol " + "(ah, dhcp, egp, esp, gre, " + "icmp, igmp, ipv6-encap, ipv6-frag, ipv6-icmp, " + "ipv6-nonxt, ipv6-opts, ipv6-route, ospf, pgm, rsvp, " + "sctp, tcp, udp, udplite, vrrp and integer " + "representations [0-255] or any; " + "default: any (all protocols))" + ) + ), ) parser.add_argument( '--ethertype', metavar='', - type=_convert_to_lowercase, + type=network_utils.convert_to_lowercase, help=self.enhance_help_neutron( - _("List rules by the Ethertype (IPv4 or IPv6)")) + _( + "List only rules with the specified Ethertype " + "(IPv4 or IPv6)" + ) + ), ) direction_group = parser.add_mutually_exclusive_group() direction_group.add_argument( '--ingress', action='store_true', help=self.enhance_help_neutron( - _("List rules applied to incoming network traffic")) + _("List only rules applied to incoming network traffic") + ), ) direction_group.add_argument( '--egress', action='store_true', help=self.enhance_help_neutron( - _("List rules applied to outgoing network traffic")) + _("List only rules applied to outgoing network traffic") + ), ) parser.add_argument( '--long', action='store_true', default=False, help=self.enhance_help_neutron( - _("**Deprecated** This argument is no longer needed")) + _("**Deprecated** This argument is no longer needed") + ), + ) + parser.add_argument( + '--project', + metavar='', + help=self.enhance_help_neutron( + _("List only rules with the specified project (name or ID)") + ), + ) + identity_common.add_project_domain_option_to_parser( + parser, enhance_help=self.enhance_help_neutron ) return parser @@ -500,7 +449,8 @@ def update_parser_compute(self, parser): action='store_true', default=False, help=self.enhance_help_nova_network( - _("Display information from all projects (admin only)")) + _("Display information from all projects (admin only)") + ), ) if not self.is_docs_build: # Accept but hide the argument for consistency with network. @@ -509,12 +459,12 @@ def update_parser_compute(self, parser): '--long', action='store_false', default=False, - help=argparse.SUPPRESS + help=argparse.SUPPRESS, ) return parser def _get_column_headers(self, parsed_args): - column_headers = ( + column_headers: tuple[str, ...] = ( 'ID', 'IP Protocol', 'Ethertype', @@ -524,19 +474,20 @@ def _get_column_headers(self, parsed_args): 'Remote Security Group', ) if self.is_neutron: - column_headers = column_headers + ('Remote Address Group',) + column_headers += ('Remote Address Group',) if parsed_args.group is None: - column_headers = column_headers + ('Security Group',) + column_headers += ('Security Group',) return column_headers def take_action_network(self, client, parsed_args): if parsed_args.long: - self.log.warning(_( + msg = _( "The --long option has been deprecated and is no longer needed" - )) + ) + self.log.warning(msg) column_headers = self._get_column_headers(parsed_args) - columns = ( + columns: tuple[str, ...] = ( 'id', 'protocol', 'ether_type', @@ -554,12 +505,11 @@ def take_action_network(self, client, parsed_args): # does not contain security group rules resources. So use # the security group ID in a query to get the resources. security_group_id = client.find_security_group( - parsed_args.group, - ignore_missing=False + parsed_args.group, ignore_missing=False ).id query = {'security_group_id': security_group_id} else: - columns = columns + ('security_group_id',) + columns += ('security_group_id',) if parsed_args.ingress: query['direction'] = 'ingress' @@ -567,20 +517,35 @@ def take_action_network(self, client, parsed_args): query['direction'] = 'egress' if parsed_args.protocol is not None: query['protocol'] = parsed_args.protocol + if parsed_args.project is not None: + identity_client = self.app.client_manager.identity + project_id = identity_common.find_project( + identity_client, + parsed_args.project, + parsed_args.project_domain, + ).id + query['tenant_id'] = project_id + query['project_id'] = project_id rules = [ self._format_network_security_group_rule(r) for r in client.security_group_rules(**query) ] - return (column_headers, - (utils.get_dict_properties( - s, columns, - ) for s in rules)) + return ( + column_headers, + ( + utils.get_dict_properties( + s, + columns, + ) + for s in rules + ), + ) def take_action_compute(self, client, parsed_args): column_headers = self._get_column_headers(parsed_args) - columns = ( + columns: tuple[str, ...] = ( "ID", "IP Protocol", "Ethertype", @@ -591,15 +556,16 @@ def take_action_compute(self, client, parsed_args): rules_to_list = [] if parsed_args.group is not None: - group = client.api.security_group_find( - parsed_args.group, + security_group = compute_v2.find_security_group( + client, parsed_args.group ) - rules_to_list = group['rules'] + rules_to_list = security_group['rules'] else: - columns = columns + ('parent_group_id',) - search = {'all_tenants': parsed_args.all_projects} - for group in client.api.security_group_list(search_opts=search): - rules_to_list.extend(group['rules']) + columns += ('parent_group_id',) + for security_group in compute_v2.list_security_groups( + client, all_projects=parsed_args.all_projects + ): + rules_to_list.extend(security_group['rules']) # NOTE(rtheis): Turn the raw rules into resources. rules = [] @@ -612,10 +578,16 @@ def take_action_compute(self, client, parsed_args): # network_utils.transform_compute_security_group_rule(rule), # )) - return (column_headers, - (utils.get_dict_properties( - s, columns, - ) for s in rules)) + return ( + column_headers, + ( + utils.get_dict_properties( + s, + columns, + ) + for s in rules + ), + ) class ShowSecurityGroupRule(common.NetworkAndComputeShowOne): @@ -625,16 +597,19 @@ def update_parser_common(self, parser): parser.add_argument( 'rule', metavar="", - help=_("Security group rule to display (ID only)") + help=_("Security group rule to display (ID only)"), ) return parser def take_action_network(self, client, parsed_args): - obj = client.find_security_group_rule(parsed_args.rule, - ignore_missing=False) + obj = client.find_security_group_rule( + parsed_args.rule, ignore_missing=False + ) # necessary for old rules that have None in this field if not obj['remote_ip_prefix']: - obj['remote_ip_prefix'] = _format_remote_ip_prefix(obj) + obj['remote_ip_prefix'] = network_utils.format_remote_ip_prefix( + obj + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return (display_columns, data) @@ -646,7 +621,7 @@ def take_action_compute(self, client, parsed_args): # the requested rule. obj = None security_group_rules = [] - for security_group in client.api.security_group_list(): + for security_group in compute_v2.list_security_groups(client): security_group_rules.extend(security_group['rules']) for security_group_rule in security_group_rules: if parsed_args.rule == str(security_group_rule.get('id')): @@ -654,9 +629,11 @@ def take_action_compute(self, client, parsed_args): break if obj is None: - msg = _("Could not find security group rule " - "with ID '%s'") % parsed_args.rule + msg = ( + _("Could not find security group rule with ID '%s'") + % parsed_args.rule + ) raise exceptions.CommandError(msg) # NOTE(rtheis): Format security group rule - return _format_security_group_rule_show(obj) + return network_utils.format_security_group_rule_show(obj) diff --git a/openstackclient/network/v2/subnet.py b/openstackclient/network/v2/subnet.py index 8e3a877ffd..3357f8930e 100644 --- a/openstackclient/network/v2/subnet.py +++ b/openstackclient/network/v2/subnet.py @@ -15,15 +15,16 @@ import copy import logging +import typing as ty from cliff import columns as cliff_columns from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -36,29 +37,40 @@ def _update_arguments(obj_list, parsed_args_list, option): try: obj_list.remove(item) except ValueError: - msg = (_("Subnet does not contain %(option)s %(value)s") % - {'option': option, 'value': item}) + msg = _("Subnet does not contain %(option)s %(value)s") % { + 'option': option, + 'value': item, + } raise exceptions.CommandError(msg) -class AllocationPoolsColumn(cliff_columns.FormattableColumn): +class AllocationPoolsColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): - pool_formatted = ['%s-%s' % (pool.get('start', ''), - pool.get('end', '')) - for pool in self._value] + pool_formatted = [ + '{}-{}'.format(pool.get('start', ''), pool.get('end', '')) + for pool in self._value + ] return ','.join(pool_formatted) -class HostRoutesColumn(cliff_columns.FormattableColumn): +class HostRoutesColumn(cliff_columns.FormattableColumn[ty.Any]): def human_readable(self): # Map the host route keys to match --host-route option. return utils.format_list_of_dicts( - convert_entries_to_gateway(self._value)) + convert_entries_to_gateway(self._value) + ) + + +class UnsortedListColumn(cliff_columns.FormattableColumn[list[ty.Any]]): + # format_columns.ListColumn sorts the output, but for things like + # DNS server addresses the order matters + def human_readable(self): + return ', '.join(self._value) _formatters = { 'allocation_pools': AllocationPoolsColumn, - 'dns_nameservers': format_columns.ListColumn, + 'dns_nameservers': UnsortedListColumn, 'host_routes': HostRoutesColumn, 'service_types': format_columns.ListColumn, 'tags': format_columns.ListColumn, @@ -72,34 +84,42 @@ def _get_common_parse_arguments(parser, is_create=True): dest='allocation_pools', action=parseractions.MultiKeyValueAction, required_keys=['start', 'end'], - help=_("Allocation pool IP addresses for this subnet " - "e.g.: start=192.168.199.2,end=192.168.199.254 " - "(repeat option to add multiple IP addresses)") + help=_( + "Allocation pool IP addresses for this subnet, " + "for example, start=192.168.199.2,end=192.168.199.254 " + "(repeat option to add multiple IP addresses)" + ), ) if not is_create: parser.add_argument( '--no-allocation-pool', action='store_true', - help=_("Clear associated allocation-pools from the subnet. " - "Specify both --allocation-pool and --no-allocation-pool " - "to overwrite the current allocation pool information.") + help=_( + "Clear associated allocation-pools from the subnet. " + "Specify both --allocation-pool and --no-allocation-pool " + "to overwrite the current allocation pool information." + ), ) parser.add_argument( '--dns-nameserver', metavar='', action='append', dest='dns_nameservers', - help=_("DNS server for this subnet " - "(repeat option to set multiple DNS servers)") + help=_( + "DNS server for this subnet " + "(repeat option to set multiple DNS servers)" + ), ) if not is_create: parser.add_argument( '--no-dns-nameservers', action='store_true', - help=_("Clear existing information of DNS Nameservers. " - "Specify both --dns-nameserver and --no-dns-nameserver " - "to overwrite the current DNS Nameserver information.") + help=_( + "Clear existing information of DNS Nameservers. " + "Specify both --dns-nameserver and --no-dns-nameserver " + "to overwrite the current DNS Nameserver information." + ), ) parser.add_argument( '--host-route', @@ -107,29 +127,35 @@ def _get_common_parse_arguments(parser, is_create=True): dest='host_routes', action=parseractions.MultiKeyValueAction, required_keys=['destination', 'gateway'], - help=_("Additional route for this subnet " - "e.g.: destination=10.10.0.0/16,gateway=192.168.71.254 " - "destination: destination subnet (in CIDR notation) " - "gateway: nexthop IP address " - "(repeat option to add multiple routes)") + help=_( + "Additional route for this subnet, " + "for example, destination=10.10.0.0/16,gateway=192.168.71.254 " + "destination: destination subnet (in CIDR notation) " + "gateway: next-hop IP address " + "(repeat option to add multiple routes)" + ), ) if not is_create: parser.add_argument( '--no-host-route', action='store_true', - help=_("Clear associated host-routes from the subnet. " - "Specify both --host-route and --no-host-route " - "to overwrite the current host route information.") + help=_( + "Clear associated host-routes from the subnet. " + "Specify both --host-route and --no-host-route " + "to overwrite the current host route information." + ), ) parser.add_argument( '--service-type', metavar='', action='append', dest='service_types', - help=_("Service type for this subnet " - "e.g.: network:floatingip_agent_gateway. " - "Must be a valid device owner value for a network port " - "(repeat option to set multiple service types)") + help=_( + "Service type for this subnet, " + "for example, network:floatingip_agent_gateway. " + "Must be a valid device owner value for a network port " + "(repeat option to set multiple service types)" + ), ) @@ -146,9 +172,7 @@ def _get_columns(item): 'tenant_id', ] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -189,11 +213,13 @@ def _get_attrs(client_manager, parsed_args, is_create=True): parsed_args.project_domain, ).id attrs['project_id'] = project_id - attrs['network_id'] = client.find_network(parsed_args.network, - ignore_missing=False).id + attrs['network_id'] = client.find_network( + parsed_args.network, ignore_missing=False + ).id if parsed_args.subnet_pool is not None: - subnet_pool = client.find_subnet_pool(parsed_args.subnet_pool, - ignore_missing=False) + subnet_pool = client.find_subnet_pool( + parsed_args.subnet_pool, ignore_missing=False + ) attrs['subnetpool_id'] = subnet_pool.id if parsed_args.use_prefix_delegation: attrs['subnetpool_id'] = "prefix_delegation" @@ -212,21 +238,26 @@ def _get_attrs(client_manager, parsed_args, is_create=True): if parsed_args.network_segment is not None: attrs['segment_id'] = client.find_segment( - parsed_args.network_segment, ignore_missing=False).id + parsed_args.network_segment, ignore_missing=False + ).id if 'gateway' in parsed_args and parsed_args.gateway is not None: gateway = parsed_args.gateway.lower() if not is_create and gateway == 'auto': - msg = _("Auto option is not available for Subnet Set. " - "Valid options are or none") + msg = _( + "Auto option is not available for Subnet Set. " + "Valid options are or none" + ) raise exceptions.CommandError(msg) elif gateway != 'auto': if gateway == 'none': attrs['gateway_ip'] = None else: attrs['gateway_ip'] = gateway - if ('allocation_pools' in parsed_args and - parsed_args.allocation_pools is not None): + if ( + 'allocation_pools' in parsed_args + and parsed_args.allocation_pools is not None + ): attrs['allocation_pools'] = parsed_args.allocation_pools if parsed_args.dhcp: attrs['enable_dhcp'] = True @@ -236,15 +267,20 @@ def _get_attrs(client_manager, parsed_args, is_create=True): attrs['dns_publish_fixed_ip'] = True if parsed_args.no_dns_publish_fixed_ip: attrs['dns_publish_fixed_ip'] = False - if ('dns_nameservers' in parsed_args and - parsed_args.dns_nameservers is not None): + if ( + 'dns_nameservers' in parsed_args + and parsed_args.dns_nameservers is not None + ): attrs['dns_nameservers'] = parsed_args.dns_nameservers if 'host_routes' in parsed_args and parsed_args.host_routes is not None: # Change 'gateway' entry to 'nexthop' to match the API attrs['host_routes'] = convert_entries_to_nexthop( - parsed_args.host_routes) - if ('service_types' in parsed_args and - parsed_args.service_types is not None): + parsed_args.host_routes + ) + if ( + 'service_types' in parsed_args + and parsed_args.service_types is not None + ): attrs['service_types'] = parsed_args.service_types if parsed_args.description is not None: attrs['description'] = parsed_args.description @@ -257,117 +293,127 @@ class CreateSubnet(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create a subnet") def get_parser(self, prog_name): - parser = super(CreateSubnet, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_("New subnet name") + 'name', metavar='', help=_("New subnet name") ) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) subnet_pool_group = parser.add_mutually_exclusive_group() subnet_pool_group.add_argument( '--subnet-pool', metavar='', - help=_("Subnet pool from which this subnet will obtain a CIDR " - "(Name or ID)") + help=_( + "Subnet pool from which this subnet will obtain a CIDR " + "(Name or ID)" + ), ) subnet_pool_group.add_argument( '--use-prefix-delegation', - help=_("Use 'prefix-delegation' if IP is IPv6 format " - "and IP would be delegated externally") + action='store_true', + help=_( + "Use 'prefix-delegation' if IP is IPv6 format " + "and IP would be delegated externally" + ), ) subnet_pool_group.add_argument( '--use-default-subnet-pool', action='store_true', - help=_("Use default subnet pool for --ip-version") + help=_("Use default subnet pool for --ip-version"), ) parser.add_argument( '--prefix-length', metavar='', - help=_("Prefix length for subnet allocation from subnet pool") + help=_("Prefix length for subnet allocation from subnet pool"), ) parser.add_argument( '--subnet-range', metavar='', - help=_("Subnet range in CIDR notation " - "(required if --subnet-pool is not specified, " - "optional otherwise)") + help=_( + "Subnet range in CIDR notation " + "(required if --subnet-pool is not specified, " + "optional otherwise)" + ), ) dhcp_enable_group = parser.add_mutually_exclusive_group() dhcp_enable_group.add_argument( - '--dhcp', - action='store_true', - help=_("Enable DHCP (default)") + '--dhcp', action='store_true', help=_("Enable DHCP (default)") ) dhcp_enable_group.add_argument( - '--no-dhcp', - action='store_true', - help=_("Disable DHCP") + '--no-dhcp', action='store_true', help=_("Disable DHCP") ) dns_publish_fixed_ip_group = parser.add_mutually_exclusive_group() dns_publish_fixed_ip_group.add_argument( '--dns-publish-fixed-ip', action='store_true', - help=_("Enable publishing fixed IPs in DNS") + help=_("Enable publishing fixed IPs in DNS"), ) dns_publish_fixed_ip_group.add_argument( '--no-dns-publish-fixed-ip', action='store_true', - help=_("Disable publishing fixed IPs in DNS (default)") + help=_("Disable publishing fixed IPs in DNS (default)"), ) parser.add_argument( '--gateway', metavar='', default='auto', - help=_("Specify a gateway for the subnet. The three options are: " - ": Specific IP address to use as the gateway, " - "'auto': Gateway address should automatically be chosen " - "from within the subnet itself, 'none': This subnet will " - "not use a gateway, e.g.: --gateway 192.168.9.1, " - "--gateway auto, --gateway none (default is 'auto').") + help=_( + "Specify a gateway for the subnet. The three options are: " + ": Specific IP address to use as the gateway, " + "'auto': Gateway address should automatically be chosen " + "from within the subnet itself, 'none': This subnet will " + "not use a gateway. For example, --gateway 192.168.9.1, " + "--gateway auto or --gateway none (default is 'auto')." + ), ) parser.add_argument( '--ip-version', type=int, default=4, choices=[4, 6], - help=_("IP version (default is 4). Note that when subnet pool is " - "specified, IP version is determined from the subnet pool " - "and this option is ignored.") + help=_( + "IP version (default is 4). Note that when subnet pool is " + "specified, IP version is determined from the subnet pool " + "and this option is ignored." + ), ) parser.add_argument( '--ipv6-ra-mode', choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'], - help=_("IPv6 RA (Router Advertisement) mode, " - "valid modes: [dhcpv6-stateful, dhcpv6-stateless, slaac]") + help=_( + "IPv6 RA (Router Advertisement) mode, " + "valid modes: [dhcpv6-stateful, dhcpv6-stateless, slaac]" + ), ) parser.add_argument( '--ipv6-address-mode', choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'], - help=_("IPv6 address mode, " - "valid modes: [dhcpv6-stateful, dhcpv6-stateless, slaac]") + help=_( + "IPv6 address mode, " + "valid modes: [dhcpv6-stateful, dhcpv6-stateless, slaac]" + ), ) parser.add_argument( '--network-segment', metavar='', - help=_("Network segment to associate with this subnet " - "(name or ID)") + help=_( + "Network segment to associate with this subnet (name or ID)" + ), ) parser.add_argument( '--network', required=True, metavar='', - help=_("Network this subnet belongs to (name or ID)") + help=_("Network this subnet belongs to (name or ID)"), ) parser.add_argument( '--description', metavar='', - help=_("Set subnet description") + help=_("Set subnet description"), ) _get_common_parse_arguments(parser) _tag.add_tag_option_to_parser_for_create(parser, _('subnet')) @@ -377,7 +423,8 @@ def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(self.app.client_manager, parsed_args) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_subnet(**attrs) # tags cannot be set when created, so tags need to be set later. _tag.update_tags_for_set(client, obj, parsed_args) @@ -390,12 +437,12 @@ class DeleteSubnet(command.Command): _description = _("Delete subnet(s)") def get_parser(self, prog_name): - parser = super(DeleteSubnet, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet', metavar="", nargs='+', - help=_("Subnet(s) to delete (name or ID)") + help=_("Subnet(s) to delete (name or ID)"), ) return parser @@ -409,14 +456,20 @@ def take_action(self, parsed_args): client.delete_subnet(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete subnet with " - "name or ID '%(subnet)s': %(e)s"), - {'subnet': subnet, 'e': e}) + LOG.error( + _( + "Failed to delete subnet with " + "name or ID '%(subnet)s': %(e)s" + ), + {'subnet': subnet, 'e': e}, + ) if result > 0: total = len(parsed_args.subnet) - msg = (_("%(result)s of %(total)s subnets failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s subnets failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -426,12 +479,12 @@ class ListSubnet(command.Lister): _description = _("List subnets") def get_parser(self, prog_name): - parser = super(ListSubnet, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) parser.add_argument( '--ip-version', @@ -439,65 +492,76 @@ def get_parser(self, prog_name): choices=[4, 6], metavar='', dest='ip_version', - help=_("List only subnets of given IP version in output. " - "Allowed values for IP version are 4 and 6."), + help=_( + "List only subnets with the specified IP version. " + "Allowed values for IP version are 4 and 6." + ), ) dhcp_enable_group = parser.add_mutually_exclusive_group() dhcp_enable_group.add_argument( '--dhcp', action='store_true', - help=_("List subnets which have DHCP enabled") + help=_("List only subnets which have DHCP enabled"), ) dhcp_enable_group.add_argument( '--no-dhcp', action='store_true', - help=_("List subnets which have DHCP disabled") + help=_("List only subnets which have DHCP disabled"), ) parser.add_argument( '--service-type', metavar='', action='append', dest='service_types', - help=_("List only subnets of a given service type in output " - "e.g.: network:floatingip_agent_gateway. " - "Must be a valid device owner value for a network port " - "(repeat option to list multiple service types)") + help=_( + "List only subnets with the specified service type, " + "for example, network:floatingip_agent_gateway. " + "Must be a valid device owner value for a network port " + "(repeat option to list multiple service types)." + ), ) parser.add_argument( '--project', metavar='', - help=_("List only subnets which belong to a given project " - "in output (name or ID)") + help=_( + "List only subnets with the specified project (name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--network', metavar='', - help=_("List only subnets which belong to a given network " - "in output (name or ID)") + help=_( + "List only subnets which belong to the specified network " + "(name or ID)" + ), ) parser.add_argument( '--gateway', metavar='', - help=_("List only subnets of given gateway IP in output") + help=_("List only subnets with the specified gateway IP"), ) parser.add_argument( '--name', metavar='', - help=_("List only subnets of given name in output") + help=_("List only subnets with the specified name"), ) parser.add_argument( '--subnet-range', metavar='', - help=_("List only subnets of given subnet range " - "(in CIDR notation) in output " - "e.g.: --subnet-range 10.10.0.0/16") + help=_( + "List only subnets with the specified subnet range " + "(in CIDR notation). " + "For example, --subnet-range 10.10.0.0/16" + ), ) parser.add_argument( '--subnet-pool', metavar='', - help=_("List only subnets which belong to a given subnet pool " - "in output (Name or ID)") + help=_( + "List only subnets which belong to the specified subnet pool " + "(name or ID)" + ), ) _tag.add_tag_filtering_option_to_parser(parser, _('subnets')) return parser @@ -524,8 +588,9 @@ def take_action(self, parsed_args): ).id filters['project_id'] = project_id if parsed_args.network: - network_id = network_client.find_network(parsed_args.network, - ignore_missing=False).id + network_id = network_client.find_network( + parsed_args.network, ignore_missing=False + ).id filters['network_id'] = network_id if parsed_args.gateway: filters['gateway_ip'] = parsed_args.gateway @@ -535,26 +600,49 @@ def take_action(self, parsed_args): filters['cidr'] = parsed_args.subnet_range if parsed_args.subnet_pool: subnetpool_id = network_client.find_subnet_pool( - parsed_args.subnet_pool, ignore_missing=False).id + parsed_args.subnet_pool, ignore_missing=False + ).id filters['subnetpool_id'] = subnetpool_id _tag.get_tag_filtering_args(parsed_args, filters) data = network_client.subnets(**filters) - headers = ('ID', 'Name', 'Network', 'Subnet') - columns = ('id', 'name', 'network_id', 'cidr') + headers: tuple[str, ...] = ('ID', 'Name', 'Network', 'Subnet') + columns: tuple[str, ...] = ('id', 'name', 'network_id', 'cidr') if parsed_args.long: - headers += ('Project', 'DHCP', 'Name Servers', - 'Allocation Pools', 'Host Routes', 'IP Version', - 'Gateway', 'Service Types', 'Tags') - columns += ('project_id', 'is_dhcp_enabled', 'dns_nameservers', - 'allocation_pools', 'host_routes', 'ip_version', - 'gateway_ip', 'service_types', 'tags') - - return (headers, - (utils.get_item_properties( - s, columns, + headers += ( + 'Project', + 'DHCP', + 'Name Servers', + 'Allocation Pools', + 'Host Routes', + 'IP Version', + 'Gateway', + 'Service Types', + 'Tags', + ) + columns += ( + 'project_id', + 'is_dhcp_enabled', + 'dns_nameservers', + 'allocation_pools', + 'host_routes', + 'ip_version', + 'gateway_ip', + 'service_types', + 'tags', + ) + + return ( + headers, + ( + utils.get_item_properties( + s, + columns, formatters=_formatters, - ) for s in data)) + ) + for s in data + ), + ) # TODO(abhiraut): Use the SDK resource mapped attribute names once the @@ -563,60 +651,57 @@ class SetSubnet(common.NeutronCommandWithExtraArgs): _description = _("Set subnet properties") def get_parser(self, prog_name): - parser = super(SetSubnet, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet', metavar="", - help=_("Subnet to modify (name or ID)") + help=_("Subnet to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar='', - help=_("Updated name of the subnet") + '--name', metavar='', help=_("Updated name of the subnet") ) dhcp_enable_group = parser.add_mutually_exclusive_group() dhcp_enable_group.add_argument( - '--dhcp', - action='store_true', - default=None, - help=_("Enable DHCP") + '--dhcp', action='store_true', default=None, help=_("Enable DHCP") ) dhcp_enable_group.add_argument( - '--no-dhcp', - action='store_true', - help=_("Disable DHCP") + '--no-dhcp', action='store_true', help=_("Disable DHCP") ) dns_publish_fixed_ip_group = parser.add_mutually_exclusive_group() dns_publish_fixed_ip_group.add_argument( '--dns-publish-fixed-ip', action='store_true', - help=_("Enable publishing fixed IPs in DNS") + help=_("Enable publishing fixed IPs in DNS"), ) dns_publish_fixed_ip_group.add_argument( '--no-dns-publish-fixed-ip', action='store_true', - help=_("Disable publishing fixed IPs in DNS") + help=_("Disable publishing fixed IPs in DNS"), ) parser.add_argument( '--gateway', metavar='', - help=_("Specify a gateway for the subnet. The options are: " - ": Specific IP address to use as the gateway, " - "'none': This subnet will not use a gateway, " - "e.g.: --gateway 192.168.9.1, --gateway none.") + help=_( + "Specify a gateway for the subnet. The options are: " + ": Specific IP address to use as the gateway, " + "'none': This subnet will not use a gateway. " + "For example, --gateway 192.168.9.1 or --gateway none." + ), ) parser.add_argument( '--network-segment', metavar='', - help=_("Network segment to associate with this subnet (name or " - "ID). It is only allowed to set the segment if the current " - "value is `None`, the network must also have only one " - "segment and only one subnet can exist on the network.") + help=_( + "Network segment to associate with this subnet (name or " + "ID). It is only allowed to set the segment if the current " + "value is `None`. The network must also have only one " + "segment and only one subnet can exist on the network." + ), ) parser.add_argument( '--description', metavar='', - help=_("Set subnet description") + help=_("Set subnet description"), ) _tag.add_tag_option_to_parser_for_set(parser, _('subnet')) _get_common_parse_arguments(parser, is_create=False) @@ -625,8 +710,9 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_subnet(parsed_args.subnet, ignore_missing=False) - attrs = _get_attrs(self.app.client_manager, parsed_args, - is_create=False) + attrs = _get_attrs( + self.app.client_manager, parsed_args, is_create=False + ) if 'dns_nameservers' in attrs: if not parsed_args.no_dns_nameservers: attrs['dns_nameservers'] += obj.dns_nameservers @@ -645,7 +731,8 @@ def take_action(self, parsed_args): if 'service_types' in attrs: attrs['service_types'] += obj.service_types attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_subnet(obj, **attrs) # tags is a subresource and it needs to be updated separately. @@ -657,17 +744,18 @@ class ShowSubnet(command.ShowOne): _description = _("Display subnet details") def get_parser(self, prog_name): - parser = super(ShowSubnet, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet', metavar="", - help=_("Subnet to display (name or ID)") + help=_("Subnet to display (name or ID)"), ) return parser def take_action(self, parsed_args): - obj = self.app.client_manager.network.find_subnet(parsed_args.subnet, - ignore_missing=False) + obj = self.app.client_manager.network.find_subnet( + parsed_args.subnet, ignore_missing=False + ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters=_formatters) return (display_columns, data) @@ -677,29 +765,33 @@ class UnsetSubnet(common.NeutronUnsetCommandWithExtraArgs): _description = _("Unset subnet properties") def get_parser(self, prog_name): - parser = super(UnsetSubnet, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--allocation-pool', metavar='start=,end=', dest='allocation_pools', action=parseractions.MultiKeyValueAction, required_keys=['start', 'end'], - help=_('Allocation pool IP addresses to be removed from this ' - 'subnet e.g.: start=192.168.199.2,end=192.168.199.254 ' - '(repeat option to unset multiple allocation pools)') + help=_( + 'Allocation pool IP addresses to be removed from this ' + 'subnet, for example, start=192.168.199.2,end=192.168.199.254 ' + '(repeat option to unset multiple allocation pools)' + ), ) parser.add_argument( '--gateway', action='store_true', - help=_("Remove gateway IP from this subnet") + help=_("Remove gateway IP from this subnet"), ) parser.add_argument( '--dns-nameserver', metavar='', action='append', dest='dns_nameservers', - help=_('DNS server to be removed from this subnet ' - '(repeat option to unset multiple DNS servers)') + help=_( + 'DNS server to be removed from this subnet ' + '(repeat option to unset multiple DNS servers)' + ), ) parser.add_argument( '--host-route', @@ -707,27 +799,31 @@ def get_parser(self, prog_name): dest='host_routes', action=parseractions.MultiKeyValueAction, required_keys=['destination', 'gateway'], - help=_('Route to be removed from this subnet ' - 'e.g.: destination=10.10.0.0/16,gateway=192.168.71.254 ' - 'destination: destination subnet (in CIDR notation) ' - 'gateway: nexthop IP address ' - '(repeat option to unset multiple host routes)') + help=_( + 'Route to be removed from this subnet, ' + 'for example, destination=10.10.0.0/16,gateway=192.168.71.254 ' + 'destination: destination subnet (in CIDR notation) ' + 'gateway: next-hop IP address ' + '(repeat option to unset multiple host routes)' + ), ) parser.add_argument( '--service-type', metavar='', action='append', dest='service_types', - help=_('Service type to be removed from this subnet ' - 'e.g.: network:floatingip_agent_gateway. ' - 'Must be a valid device owner value for a network port ' - '(repeat option to unset multiple service types)') + help=_( + 'Service type to be removed from this subnet, ' + 'for example, network:floatingip_agent_gateway. ' + 'Must be a valid device owner value for a network port ' + '(repeat option to unset multiple service types)' + ), ) _tag.add_tag_option_to_parser_for_unset(parser, _('subnet')) parser.add_argument( 'subnet', metavar="", - help=_("Subnet to modify (name or ID)") + help=_("Subnet to modify (name or ID)"), ) return parser @@ -735,33 +831,41 @@ def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_subnet(parsed_args.subnet, ignore_missing=False) - attrs = {} + attrs: dict[str, ty.Any] = {} if parsed_args.gateway: attrs['gateway_ip'] = None if parsed_args.dns_nameservers: attrs['dns_nameservers'] = copy.deepcopy(obj.dns_nameservers) - _update_arguments(attrs['dns_nameservers'], - parsed_args.dns_nameservers, - 'dns-nameserver') + _update_arguments( + attrs['dns_nameservers'], + parsed_args.dns_nameservers, + 'dns-nameserver', + ) if parsed_args.host_routes: attrs['host_routes'] = copy.deepcopy(obj.host_routes) _update_arguments( attrs['host_routes'], convert_entries_to_nexthop(parsed_args.host_routes), - 'host-route') + 'host-route', + ) if parsed_args.allocation_pools: attrs['allocation_pools'] = copy.deepcopy(obj.allocation_pools) - _update_arguments(attrs['allocation_pools'], - parsed_args.allocation_pools, - 'allocation-pool') + _update_arguments( + attrs['allocation_pools'], + parsed_args.allocation_pools, + 'allocation-pool', + ) if parsed_args.service_types: attrs['service_types'] = copy.deepcopy(obj.service_types) - _update_arguments(attrs['service_types'], - parsed_args.service_types, - 'service-type') + _update_arguments( + attrs['service_types'], + parsed_args.service_types, + 'service-type', + ) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_subnet(obj, **attrs) diff --git a/openstackclient/network/v2/subnet_pool.py b/openstackclient/network/v2/subnet_pool.py index 2369960ecb..399ce483f3 100644 --- a/openstackclient/network/v2/subnet_pool.py +++ b/openstackclient/network/v2/subnet_pool.py @@ -17,11 +17,11 @@ from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from osc_lib.utils import tags as _tag +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common from openstackclient.network import common @@ -39,9 +39,7 @@ def _get_columns(item): } hidden_columns = ['location', 'tenant_id'] return utils.get_osc_show_columns_for_sdk_resource( - item, - column_map, - hidden_columns + item, column_map, hidden_columns ) @@ -68,7 +66,8 @@ def _get_attrs(client_manager, parsed_args): if parsed_args.address_scope is not None: attrs['address_scope_id'] = network_client.find_address_scope( - parsed_args.address_scope, ignore_missing=False).id + parsed_args.address_scope, ignore_missing=False + ).id if 'no_address_scope' in parsed_args and parsed_args.no_address_scope: attrs['address_scope_id'] = None @@ -108,29 +107,31 @@ def _add_prefix_options(parser, for_create=False): dest='prefixes', action='append', required=for_create, - help=_("Set subnet pool prefixes (in CIDR notation) " - "(repeat option to set multiple prefixes)") + help=_( + "Set subnet pool prefixes (in CIDR notation) " + "(repeat option to set multiple prefixes)" + ), ) parser.add_argument( '--default-prefix-length', metavar='', type=int, action=parseractions.NonNegativeAction, - help=_("Set subnet pool default prefix length") + help=_("Set subnet pool default prefix length"), ) parser.add_argument( '--min-prefix-length', metavar='', action=parseractions.NonNegativeAction, type=int, - help=_("Set subnet pool minimum prefix length") + help=_("Set subnet pool minimum prefix length"), ) parser.add_argument( '--max-prefix-length', metavar='', type=int, action=parseractions.NonNegativeAction, - help=_("Set subnet pool maximum prefix length") + help=_("Set subnet pool maximum prefix length"), ) @@ -154,25 +155,25 @@ class CreateSubnetPool(command.ShowOne, common.NeutronCommandWithExtraArgs): _description = _("Create subnet pool") def get_parser(self, prog_name): - parser = super(CreateSubnetPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - 'name', - metavar='', - help=_("Name of the new subnet pool") + 'name', metavar='', help=_("Name of the new subnet pool") ) _add_prefix_options(parser, for_create=True) parser.add_argument( '--project', metavar='', - help=_("Owner's project (name or ID)") + help=_("Owner's project (name or ID)"), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--address-scope', metavar='', - help=_("Set address scope associated with the subnet pool " - "(name or ID), prefixes must be unique across address " - "scopes") + help=_( + "Set address scope associated with the subnet pool " + "(name or ID), prefixes must be unique across address " + "scopes" + ), ) _add_default_options(parser) shared_group = parser.add_mutually_exclusive_group() @@ -189,15 +190,18 @@ def get_parser(self, prog_name): parser.add_argument( '--description', metavar='', - help=_("Set subnet pool description") + help=_("Set subnet pool description"), ) parser.add_argument( '--default-quota', type=int, metavar='', - help=_("Set default per-project quota for this subnet pool " - "as the number of IP addresses that can be allocated " - "from the subnet pool")), + help=_( + "Set default per-project quota for this subnet pool " + "as the number of IP addresses that can be allocated " + "from the subnet pool" + ), + ) _tag.add_tag_option_to_parser_for_create(parser, _('subnet pool')) return parser @@ -208,7 +212,8 @@ def take_action(self, parsed_args): if "prefixes" not in attrs: attrs['prefixes'] = [] attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) obj = client.create_subnet_pool(**attrs) # tags cannot be set when created, so tags need to be set later. _tag.update_tags_for_set(client, obj, parsed_args) @@ -221,12 +226,12 @@ class DeleteSubnetPool(command.Command): _description = _("Delete subnet pool(s)") def get_parser(self, prog_name): - parser = super(DeleteSubnetPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet_pool', metavar='', nargs='+', - help=_("Subnet pool(s) to delete (name or ID)") + help=_("Subnet pool(s) to delete (name or ID)"), ) return parser @@ -240,14 +245,19 @@ def take_action(self, parsed_args): client.delete_subnet_pool(obj) except Exception as e: result += 1 - LOG.error(_("Failed to delete subnet pool with " - "name or ID '%(pool)s': %(e)s"), - {'pool': pool, 'e': e}) + LOG.error( + _( + "Failed to delete subnet pool with " + "name or ID '%(pool)s': %(e)s" + ), + {'pool': pool, 'e': e}, + ) if result > 0: total = len(parsed_args.subnet_pool) - msg = (_("%(result)s of %(total)s subnet pools failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s subnet pools failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -257,53 +267,62 @@ class ListSubnetPool(command.Lister): _description = _("List subnet pools") def get_parser(self, prog_name): - parser = super(ListSubnetPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) shared_group = parser.add_mutually_exclusive_group() shared_group.add_argument( '--share', action='store_true', - help=_("List subnet pools shared between projects"), + help=_("List only subnet pools shared between projects"), ) shared_group.add_argument( '--no-share', action='store_true', - help=_("List subnet pools not shared between projects"), + help=_("List only subnet pools not shared between projects"), ) default_group = parser.add_mutually_exclusive_group() default_group.add_argument( '--default', action='store_true', - help=_("List subnet pools used as the default external " - "subnet pool"), + help=_( + "List only subnet pools used as the default external " + "subnet pool" + ), ) default_group.add_argument( '--no-default', action='store_true', - help=_("List subnet pools not used as the default external " - "subnet pool") + help=_( + "List only subnet pools not used as the default external " + "subnet pool" + ), ) parser.add_argument( '--project', metavar='', - help=_("List subnet pools according to their project (name or ID)") + help=_( + "List only subnet pools with the specified project " + "(name or ID)" + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--name', metavar='', - help=_("List only subnet pools of given name in output") + help=_("List only subnet pools with the specified name"), ) parser.add_argument( '--address-scope', metavar='', - help=_("List only subnet pools of given address scope " - "in output (name or ID)") + help=_( + "List only subnet pools with the specified address scope " + "(name or ID)" + ), ) _tag.add_tag_filtering_option_to_parser(parser, _('subnet pools')) return parser @@ -333,25 +352,41 @@ def take_action(self, parsed_args): filters['name'] = parsed_args.name if parsed_args.address_scope: address_scope = network_client.find_address_scope( - parsed_args.address_scope, - ignore_missing=False) + parsed_args.address_scope, ignore_missing=False + ) filters['address_scope_id'] = address_scope.id _tag.get_tag_filtering_args(parsed_args, filters) data = network_client.subnet_pools(**filters) - headers = ('ID', 'Name', 'Prefixes') - columns = ('id', 'name', 'prefixes') + headers: tuple[str, ...] = ('ID', 'Name', 'Prefixes') + columns: tuple[str, ...] = ('id', 'name', 'prefixes') if parsed_args.long: - headers += ('Default Prefix Length', 'Address Scope', - 'Default Subnet Pool', 'Shared', 'Tags') - columns += ('default_prefix_length', 'address_scope_id', - 'is_default', 'is_shared', 'tags') - - return (headers, - (utils.get_item_properties( - s, columns, + headers += ( + 'Default Prefix Length', + 'Address Scope', + 'Default Subnet Pool', + 'Shared', + 'Tags', + ) + columns += ( + 'default_prefix_length', + 'address_scope_id', + 'is_default', + 'is_shared', + 'tags', + ) + + return ( + headers, + ( + utils.get_item_properties( + s, + columns, formatters=_formatters, - ) for s in data)) + ) + for s in data + ), + ) # TODO(rtheis): Use the SDK resource mapped attribute names once the @@ -360,52 +395,56 @@ class SetSubnetPool(common.NeutronCommandWithExtraArgs): _description = _("Set subnet pool properties") def get_parser(self, prog_name): - parser = super(SetSubnetPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet_pool', metavar='', - help=_("Subnet pool to modify (name or ID)") + help=_("Subnet pool to modify (name or ID)"), ) parser.add_argument( - '--name', - metavar='', - help=_("Set subnet pool name") + '--name', metavar='', help=_("Set subnet pool name") ) _add_prefix_options(parser) address_scope_group = parser.add_mutually_exclusive_group() address_scope_group.add_argument( '--address-scope', metavar='', - help=_("Set address scope associated with the subnet pool " - "(name or ID), prefixes must be unique across address " - "scopes") + help=_( + "Set address scope associated with the subnet pool " + "(name or ID), prefixes must be unique across address " + "scopes" + ), ) address_scope_group.add_argument( '--no-address-scope', action='store_true', - help=_("Remove address scope associated with the subnet pool") + help=_("Remove address scope associated with the subnet pool"), ) _add_default_options(parser) parser.add_argument( '--description', metavar='', - help=_("Set subnet pool description") + help=_("Set subnet pool description"), ) parser.add_argument( '--default-quota', type=int, metavar='', - help=_("Set default per-project quota for this subnet pool " - "as the number of IP addresses that can be allocated " - "from the subnet pool")), + help=_( + "Set default per-project quota for this subnet pool " + "as the number of IP addresses that can be allocated " + "from the subnet pool" + ), + ) _tag.add_tag_option_to_parser_for_set(parser, _('subnet pool')) return parser def take_action(self, parsed_args): client = self.app.client_manager.network - obj = client.find_subnet_pool(parsed_args.subnet_pool, - ignore_missing=False) + obj = client.find_subnet_pool( + parsed_args.subnet_pool, ignore_missing=False + ) attrs = _get_attrs(self.app.client_manager, parsed_args) @@ -414,7 +453,8 @@ def take_action(self, parsed_args): attrs['prefixes'].extend(obj.prefixes) attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) if attrs: client.update_subnet_pool(obj, **attrs) @@ -426,19 +466,18 @@ class ShowSubnetPool(command.ShowOne): _description = _("Display subnet pool details") def get_parser(self, prog_name): - parser = super(ShowSubnetPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet_pool', metavar='', - help=_("Subnet pool to display (name or ID)") + help=_("Subnet pool to display (name or ID)"), ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_subnet_pool( - parsed_args.subnet_pool, - ignore_missing=False + parsed_args.subnet_pool, ignore_missing=False ) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters=_formatters) @@ -449,11 +488,11 @@ class UnsetSubnetPool(command.Command): _description = _("Unset subnet pool properties") def get_parser(self, prog_name): - parser = super(UnsetSubnetPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'subnet_pool', metavar="", - help=_("Subnet pool to modify (name or ID)") + help=_("Subnet pool to modify (name or ID)"), ) _tag.add_tag_option_to_parser_for_unset(parser, _('subnet pool')) return parser @@ -461,6 +500,7 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): client = self.app.client_manager.network obj = client.find_subnet_pool( - parsed_args.subnet_pool, ignore_missing=False) + parsed_args.subnet_pool, ignore_missing=False + ) # tags is a subresource and it needs to be updated separately. _tag.update_tags_for_unset(client, obj, parsed_args) diff --git a/openstackclient/tests/functional/volume/v1/__init__.py b/openstackclient/network/v2/taas/__init__.py similarity index 100% rename from openstackclient/tests/functional/volume/v1/__init__.py rename to openstackclient/network/v2/taas/__init__.py diff --git a/openstackclient/network/v2/taas/tap_flow.py b/openstackclient/network/v2/taas/tap_flow.py new file mode 100644 index 0000000000..206fbde7fa --- /dev/null +++ b/openstackclient/network/v2/taas/tap_flow.py @@ -0,0 +1,245 @@ +# All Rights Reserved 2020 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from osc_lib.cli import format_columns +from osc_lib.cli import identity as identity_utils +from osc_lib import exceptions +from osc_lib import utils as osc_utils +from osc_lib.utils import columns as column_util + +from openstackclient import command +from openstackclient.i18n import _ +from openstackclient.identity import common +from openstackclient.network.v2.taas import tap_service + +LOG = logging.getLogger(__name__) + +TAP_FLOW = 'tap_flow' +TAP_FLOWS = f'{TAP_FLOW}s' + +_attr_map = [ + ('id', 'ID', column_util.LIST_BOTH), + ('tenant_id', 'Tenant', column_util.LIST_LONG_ONLY), + ('name', 'Name', column_util.LIST_BOTH), + ('status', 'Status', column_util.LIST_BOTH), + ('source_port', 'source_port', column_util.LIST_BOTH), + ('tap_service_id', 'tap_service_id', column_util.LIST_BOTH), + ('direction', 'Direction', column_util.LIST_BOTH), +] + +_formatters = { + 'vlan_filter': format_columns.ListColumn, +} + + +def _add_updatable_args(parser): + parser.add_argument('--name', help=_('Name of the tap flow.')) + parser.add_argument( + '--description', help=_('Description of the tap flow.') + ) + + +class CreateTapFlow(command.ShowOne): + _description = _("Create a new tap flow.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + identity_utils.add_project_owner_option_to_parser(parser) + _add_updatable_args(parser) + parser.add_argument( + '--port', + required=True, + metavar="SOURCE_PORT", + help=_('Source port (name or ID) to monitor.'), + ) + parser.add_argument( + '--tap-service', + required=True, + metavar="TAP_SERVICE", + help=_( + 'Tap service (name or ID) to associate with this tap flow.' + ), + ) + parser.add_argument( + '--direction', + required=True, + metavar="DIRECTION", + choices=['IN', 'OUT', 'BOTH'], + type=lambda s: s.upper(), + help=_( + 'Direction of the Tap flow. Valid options are: ' + 'IN, OUT and BOTH' + ), + ) + parser.add_argument( + '--vlan-filter', + required=False, + metavar="VLAN_FILTER", + help=_('VLAN IDs to mirror in the form of range string.'), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + attrs = {} + if parsed_args.name is not None: + attrs['name'] = parsed_args.name + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + if parsed_args.port is not None: + source_port = client.find_port( + parsed_args.port, ignore_missing=False + ).id + attrs['source_port'] = source_port + if parsed_args.tap_service is not None: + tap_service_id = client.find_tap_service( + parsed_args.tap_service, ignore_missing=False + ).id + attrs['tap_service_id'] = tap_service_id + if parsed_args.direction is not None: + attrs['direction'] = parsed_args.direction + if parsed_args.vlan_filter is not None: + attrs['vlan_filter'] = parsed_args.vlan_filter + if 'project' in parsed_args and parsed_args.project is not None: + attrs['project_id'] = common.find_project( + self.app.client_manager.identity, + parsed_args.project, + parsed_args.project_domain, + ).id + obj = client.create_tap_flow(**attrs) + display_columns, columns = tap_service._get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data + + +class ListTapFlow(command.Lister): + _description = _("List tap flows.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + identity_utils.add_project_owner_option_to_parser(parser) + + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + params = {} + if parsed_args.project is not None: + params['project_id'] = common.find_project( + self.app.client_manager.identity, + parsed_args.project, + parsed_args.project_domain, + ).id + objs = client.tap_flows(retrieve_all=True, params=params) + headers, columns = column_util.get_column_definitions( + _attr_map, long_listing=True + ) + return ( + headers, + ( + osc_utils.get_dict_properties( + s, columns, formatters=_formatters + ) + for s in objs + ), + ) + + +class ShowTapFlow(command.ShowOne): + _description = _("Show tap flow details.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_FLOW, + metavar=f"<{TAP_FLOW}>", + help=_("Tap flow to display (name or ID)."), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + id = client.find_tap_flow( + parsed_args.tap_flow, ignore_missing=False + ).id + obj = client.get_tap_flow(id) + display_columns, columns = tap_service._get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data + + +class DeleteTapFlow(command.Command): + _description = _("Delete a tap flow.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_FLOW, + metavar=f"<{TAP_FLOW}>", + nargs="+", + help=_("Tap flow to delete (name or ID)."), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + fails = 0 + for id_or_name in parsed_args.tap_flow: + try: + id = client.find_tap_flow(id_or_name, ignore_missing=False).id + client.delete_tap_flow(id) + except Exception as e: + fails += 1 + LOG.error( + "Failed to delete tap flow with name or ID " + "'%(id_or_name)s': %(e)s", + {'id_or_name': id_or_name, 'e': e}, + ) + if fails > 0: + msg = _("Failed to delete %(fails)s of %(total)s tap flow.") % { + 'fails': fails, + 'total': len(parsed_args.tap_flow), + } + raise exceptions.CommandError(msg) + + +class UpdateTapFlow(command.ShowOne): + _description = _("Update a tap flow.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_FLOW, + metavar=f"<{TAP_FLOW}>", + help=_("Tap flow to modify (name or ID)."), + ) + _add_updatable_args(parser) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + original_t_f = client.find_tap_flow( + parsed_args.tap_flow, ignore_missing=False + ).id + attrs = {} + if parsed_args.name is not None: + attrs['name'] = parsed_args.name + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + obj = client.update_tap_flow(original_t_f, **attrs) + columns, display_columns = column_util.get_columns(obj, _attr_map) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data diff --git a/openstackclient/network/v2/taas/tap_mirror.py b/openstackclient/network/v2/taas/tap_mirror.py new file mode 100644 index 0000000000..876109dc46 --- /dev/null +++ b/openstackclient/network/v2/taas/tap_mirror.py @@ -0,0 +1,237 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from osc_lib.cli import identity as identity_utils +from osc_lib import exceptions +from osc_lib import utils as osc_utils +from osc_lib.utils import columns as column_util + +from openstackclient import command +from openstackclient.i18n import _ +from openstackclient.identity import common +from openstackclient.network.v2 import port as osc_port +from openstackclient.network.v2.taas import tap_service + +LOG = logging.getLogger(__name__) + +TAP_MIRROR = 'tap_mirror' +TAP_MIRRORS = f'{TAP_MIRROR}s' + +_attr_map = [ + ('id', 'ID', column_util.LIST_BOTH), + ('tenant_id', 'Tenant', column_util.LIST_LONG_ONLY), + ('name', 'Name', column_util.LIST_BOTH), + ('port_id', 'Port', column_util.LIST_BOTH), + ('directions', 'Directions', column_util.LIST_LONG_ONLY), + ('remote_ip', 'Remote IP', column_util.LIST_BOTH), + ('mirror_type', 'Mirror Type', column_util.LIST_LONG_ONLY), +] + + +def _get_columns(item): + column_map: dict[str, str] = {} + hidden_columns = ['location', 'tenant_id'] + return osc_utils.get_osc_show_columns_for_sdk_resource( + item, column_map, hidden_columns + ) + + +class CreateTapMirror(command.ShowOne): + _description = _("Create a new tap mirror.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + identity_utils.add_project_owner_option_to_parser(parser) + tap_service._add_updatable_args(parser) + parser.add_argument( + '--port', + dest='port_id', + required=True, + metavar="PORT", + help=_('Port (name or ID) to which the Tap Mirror is connected.'), + ) + parser.add_argument( + '--directions', + dest='directions', + action=osc_port.JSONKeyValueAction, + required=True, + help=_( + 'Dictionary of direction and tunnel_id. Valid directions are: ' + 'IN and OUT.' + ), + ) + parser.add_argument( + '--remote-ip', + dest='remote_ip', + required=True, + help=_( + 'Remote IP address for the tap mirror (remote end of the ' + 'GRE or ERSPAN v1 tunnel).' + ), + ) + parser.add_argument( + '--mirror-type', + dest='mirror_type', + required=True, + help=_('Mirror type. Valid values are: gre and erspanv1.'), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + attrs = {} + if parsed_args.name is not None: + attrs['name'] = parsed_args.name + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + if parsed_args.port_id is not None: + port_id = client.find_port( + parsed_args.port_id, ignore_missing=False + ).id + attrs['port_id'] = port_id + if parsed_args.directions is not None: + attrs['directions'] = parsed_args.directions + if parsed_args.remote_ip is not None: + attrs['remote_ip'] = parsed_args.remote_ip + if parsed_args.mirror_type is not None: + attrs['mirror_type'] = parsed_args.mirror_type + if 'project' in parsed_args and parsed_args.project is not None: + attrs['project_id'] = common.find_project( + self.app.client_manager.identity, + parsed_args.project, + parsed_args.project_domain, + ).id + obj = client.create_tap_mirror(**attrs) + display_columns, columns = tap_service._get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data + + +class ListTapMirror(command.Lister): + _description = _("List tap mirrors.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + identity_utils.add_project_owner_option_to_parser(parser) + + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + params = {} + if parsed_args.project is not None: + params['project_id'] = common.find_project( + self.app.client_manager.identity, + parsed_args.project, + parsed_args.project_domain, + ).id + objs = client.tap_mirrors(retrieve_all=True, params=params) + headers, columns = column_util.get_column_definitions( + _attr_map, long_listing=True + ) + return ( + headers, + (osc_utils.get_dict_properties(s, columns) for s in objs), + ) + + +class ShowTapMirror(command.ShowOne): + _description = _("Show tap mirror details.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_MIRROR, + metavar=f"<{TAP_MIRROR}>", + help=_("Tap mirror to display (name or ID)."), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + id = client.find_tap_mirror( + parsed_args.tap_mirror, ignore_missing=False + ).id + obj = client.get_tap_mirror(id) + display_columns, columns = tap_service._get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data + + +class DeleteTapMirror(command.Command): + _description = _("Delete a tap mirror.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_MIRROR, + metavar=f"<{TAP_MIRROR}>", + nargs="+", + help=_("Tap mirror to delete (name or ID)."), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + fails = 0 + for id_or_name in parsed_args.tap_mirror: + try: + id = client.find_tap_mirror( + id_or_name, ignore_missing=False + ).id + + client.delete_tap_mirror(id) + LOG.warning("Tap Mirror %(id)s deleted", {'id': id}) + except Exception as e: + fails += 1 + LOG.error( + "Failed to delete Tap Mirror with name or ID " + "'%(id_or_name)s': %(e)s", + {'id_or_name': id_or_name, 'e': e}, + ) + if fails > 0: + msg = _("Failed to delete %(fails)s of %(total)s Tap Mirror.") % { + 'fails': fails, + 'total': len(parsed_args.tap_mirror), + } + raise exceptions.CommandError(msg) + + +class UpdateTapMirror(command.ShowOne): + _description = _("Update a tap mirror.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_MIRROR, + metavar=f"<{TAP_MIRROR}>", + help=_("Tap mirror to modify (name or ID)."), + ) + tap_service._add_updatable_args(parser) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + original_t_s = client.find_tap_mirror( + parsed_args.tap_mirror, ignore_missing=False + ).id + attrs = {} + if parsed_args.name is not None: + attrs['name'] = parsed_args.name + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + obj = client.update_tap_mirror(original_t_s, **attrs) + display_columns, columns = tap_service._get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data diff --git a/openstackclient/network/v2/taas/tap_service.py b/openstackclient/network/v2/taas/tap_service.py new file mode 100644 index 0000000000..df27658f5d --- /dev/null +++ b/openstackclient/network/v2/taas/tap_service.py @@ -0,0 +1,211 @@ +# All Rights Reserved 2020 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from osc_lib.cli import identity as identity_utils +from osc_lib import exceptions +from osc_lib import utils as osc_utils +from osc_lib.utils import columns as column_util + +from openstackclient import command +from openstackclient.i18n import _ +from openstackclient.identity import common + +LOG = logging.getLogger(__name__) + +TAP_SERVICE = 'tap_service' +TAP_SERVICES = f'{TAP_SERVICE}s' + +_attr_map = [ + ('id', 'ID', column_util.LIST_BOTH), + ('tenant_id', 'Tenant', column_util.LIST_LONG_ONLY), + ('name', 'Name', column_util.LIST_BOTH), + ('port_id', 'Port', column_util.LIST_BOTH), + ('status', 'Status', column_util.LIST_BOTH), +] + + +def _add_updatable_args(parser): + parser.add_argument('--name', help=_('Name of the tap service.')) + parser.add_argument( + '--description', help=_('Description of the tap service.') + ) + + +def _get_columns(item): + column_map: dict[str, str] = {} + hidden_columns = ['location', 'tenant_id'] + return osc_utils.get_osc_show_columns_for_sdk_resource( + item, column_map, hidden_columns + ) + + +class CreateTapService(command.ShowOne): + _description = _("Create a new tap service.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + identity_utils.add_project_owner_option_to_parser(parser) + _add_updatable_args(parser) + parser.add_argument( + '--port', + dest='port_id', + required=True, + metavar="PORT", + help=_('Port (name or ID) to connect to the tap service.'), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + attrs = {} + if parsed_args.name is not None: + attrs['name'] = parsed_args.name + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + if parsed_args.port_id is not None: + port_id = client.find_port( + parsed_args.port_id, ignore_missing=False + ).id + attrs['port_id'] = port_id + if 'project' in parsed_args and parsed_args.project is not None: + attrs['project_id'] = common.find_project( + self.app.client_manager.identity, + parsed_args.project, + parsed_args.project_domain, + ).id + obj = client.create_tap_service(**attrs) + display_columns, columns = _get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data + + +class ListTapService(command.Lister): + _description = _("List tap services.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + identity_utils.add_project_owner_option_to_parser(parser) + + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + params = {} + if parsed_args.project is not None: + params['project_id'] = common.find_project( + self.app.client_manager.identity, + parsed_args.project, + parsed_args.project_domain, + ).id + objs = client.tap_services(retrieve_all=True, params=params) + headers, columns = column_util.get_column_definitions( + _attr_map, long_listing=True + ) + return ( + headers, + (osc_utils.get_dict_properties(s, columns) for s in objs), + ) + + +class ShowTapService(command.ShowOne): + _description = _("Show tap service details.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_SERVICE, + metavar=f"<{TAP_SERVICE}>", + help=_("Tap service to display (name or ID)."), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + id = client.find_tap_service( + parsed_args.tap_service, ignore_missing=False + ).id + obj = client.get_tap_service(id) + display_columns, columns = _get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data + + +class DeleteTapService(command.Command): + _description = _("Delete a tap service.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_SERVICE, + metavar=f"<{TAP_SERVICE}>", + nargs="+", + help=_("Tap service to delete (name or ID)."), + ) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + fails = 0 + for id_or_name in parsed_args.tap_service: + try: + id = client.find_tap_service( + id_or_name, ignore_missing=False + ).id + + client.delete_tap_service(id) + LOG.warning("Tap service %(id)s deleted", {'id': id}) + except Exception as e: + fails += 1 + LOG.error( + "Failed to delete tap service with name or ID " + "'%(id_or_name)s': %(e)s", + {'id_or_name': id_or_name, 'e': e}, + ) + if fails > 0: + msg = _("Failed to delete %(fails)s of %(total)s tap service.") % { + 'fails': fails, + 'total': len(parsed_args.tap_service), + } + raise exceptions.CommandError(msg) + + +class UpdateTapService(command.ShowOne): + _description = _("Update a tap service.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + TAP_SERVICE, + metavar=f"<{TAP_SERVICE}>", + help=_("Tap service to modify (name or ID)."), + ) + _add_updatable_args(parser) + return parser + + def take_action(self, parsed_args): + client = self.app.client_manager.network + original_t_s = client.find_tap_service( + parsed_args.tap_service, ignore_missing=False + ).id + attrs = {} + if parsed_args.name is not None: + attrs['name'] = parsed_args.name + if parsed_args.description is not None: + attrs['description'] = parsed_args.description + obj = client.update_tap_service(original_t_s, **attrs) + display_columns, columns = _get_columns(obj) + data = osc_utils.get_dict_properties(obj, columns) + return display_columns, data diff --git a/openstackclient/object/client.py b/openstackclient/object/client.py index 865f18f6de..466f132ec2 100644 --- a/openstackclient/object/client.py +++ b/openstackclient/object/client.py @@ -19,12 +19,11 @@ from openstackclient.api import object_store_v1 +# global variables used when building the shell DEFAULT_API_VERSION = '1' API_VERSION_OPTION = 'os_object_api_version' API_NAME = 'object_store' -API_VERSIONS = { - '1': 'openstackclient.object.client.ObjectClientv1', -} +API_VERSIONS = ('1',) def make_client(instance): @@ -50,7 +49,8 @@ def build_option_parser(parser): '--os-object-api-version', metavar='', default=utils.env('OS_OBJECT_API_VERSION'), - help='Object API version, default=' + - DEFAULT_API_VERSION + - ' (Env: OS_OBJECT_API_VERSION)') + help='Object API version, default=' + + DEFAULT_API_VERSION + + ' (Env: OS_OBJECT_API_VERSION)', + ) return parser diff --git a/openstackclient/object/v1/account.py b/openstackclient/object/v1/account.py index d6bc9fd780..199e5222fd 100644 --- a/openstackclient/object/v1/account.py +++ b/openstackclient/object/v1/account.py @@ -15,8 +15,8 @@ from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command +from openstackclient import command from openstackclient.i18n import _ @@ -24,14 +24,16 @@ class SetAccount(command.Command): _description = _("Set account properties") def get_parser(self, prog_name): - parser = super(SetAccount, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--property", metavar="", required=True, action=parseractions.KeyValueAction, - help=_("Set a property on this account " - "(repeat option to set multiple properties)") + help=_( + "Set a property on this account " + "(repeat option to set multiple properties)" + ), ) return parser @@ -48,7 +50,8 @@ def take_action(self, parsed_args): data = self.app.client_manager.object_store.account_show() if 'properties' in data: data['properties'] = format_columns.DictColumn( - data.pop('properties')) + data.pop('properties') + ) return zip(*sorted(data.items())) @@ -56,15 +59,17 @@ class UnsetAccount(command.Command): _description = _("Unset account properties") def get_parser(self, prog_name): - parser = super(UnsetAccount, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--property', metavar='', required=True, action='append', default=[], - help=_('Property to remove from account ' - '(repeat option to remove multiple properties)'), + help=_( + 'Property to remove from account ' + '(repeat option to remove multiple properties)' + ), ) return parser diff --git a/openstackclient/object/v1/container.py b/openstackclient/object/v1/container.py index 917e41c02f..b0f92c7619 100644 --- a/openstackclient/object/v1/container.py +++ b/openstackclient/object/v1/container.py @@ -19,12 +19,12 @@ from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) @@ -32,16 +32,16 @@ class CreateContainer(command.Lister): _description = _("Create new container") def get_parser(self, prog_name): - parser = super(CreateContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--public', action='store_true', default=False, - help="Make the container publicly accessible" + help="Make the container publicly accessible", ) parser.add_argument( '--storage-policy', - help="Specify a particular storage policy to use." + help="Specify a particular storage policy to use.", ) parser.add_argument( 'containers', @@ -52,35 +52,43 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - results = [] for container in parsed_args.containers: if len(container) > 256: - LOG.warning( - _('Container name is %s characters long, the default limit' - ' is 256'), len(container)) + msg = _( + 'Container name is %d characters long, the default limit' + ' is 256' + ) + LOG.warning(msg, len(container)) data = self.app.client_manager.object_store.container_create( container=container, public=parsed_args.public, - storage_policy=parsed_args.storage_policy + storage_policy=parsed_args.storage_policy, ) results.append(data) columns = ("account", "container", "x-trans-id") - return (columns, - (utils.get_dict_properties( - s, columns, + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, formatters={}, - ) for s in results)) + ) + for s in results + ), + ) class DeleteContainer(command.Command): _description = _("Delete container") def get_parser(self, prog_name): - parser = super(DeleteContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - '--recursive', '-r', + '--recursive', + '-r', action='store_true', default=False, help=_('Recursively delete objects and container'), @@ -94,11 +102,11 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - for container in parsed_args.containers: if parsed_args.recursive: objs = self.app.client_manager.object_store.object_list( - container=container) + container=container + ) for obj in objs: self.app.client_manager.object_store.object_delete( container=container, @@ -113,28 +121,18 @@ class ListContainer(command.Lister): _description = _("List containers") def get_parser(self, prog_name): - parser = super(ListContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--prefix", metavar="", help=_("Filter list using "), ) - parser.add_argument( - "--marker", - metavar="", - help=_("Anchor for paging"), - ) + pagination.add_marker_pagination_option_to_parser(parser) parser.add_argument( "--end-marker", metavar="", help=_("End anchor for paging"), ) - parser.add_argument( - "--limit", - metavar="", - type=int, - help=_("Limit the number of containers returned"), - ) parser.add_argument( '--long', action='store_true', @@ -150,11 +148,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - + columns: tuple[str, ...] = ('Name',) if parsed_args.long: - columns = ('Name', 'Bytes', 'Count') - else: - columns = ('Name',) + columns += ('Bytes', 'Count') kwargs = {} if parsed_args.prefix: @@ -168,22 +164,26 @@ def take_action(self, parsed_args): if parsed_args.all: kwargs['full_listing'] = True - data = self.app.client_manager.object_store.container_list( - **kwargs - ) + data = self.app.client_manager.object_store.container_list(**kwargs) - return (columns, - (utils.get_dict_properties( - s, columns, + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SaveContainer(command.Command): _description = _("Save container contents locally") def get_parser(self, prog_name): - parser = super(SaveContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -201,7 +201,7 @@ class SetContainer(command.Command): _description = _("Set container properties") def get_parser(self, prog_name): - parser = super(SetContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -212,8 +212,10 @@ def get_parser(self, prog_name): metavar="", required=True, action=parseractions.KeyValueAction, - help=_("Set a property on this container " - "(repeat option to set multiple properties)") + help=_( + "Set a property on this container " + "(repeat option to set multiple properties)" + ), ) return parser @@ -228,7 +230,7 @@ class ShowContainer(command.ShowOne): _description = _("Display container details") def get_parser(self, prog_name): - parser = super(ShowContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -237,7 +239,6 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - data = self.app.client_manager.object_store.container_show( container=parsed_args.container, ) @@ -251,7 +252,7 @@ class UnsetContainer(command.Command): _description = _("Unset container properties") def get_parser(self, prog_name): - parser = super(UnsetContainer, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -263,8 +264,10 @@ def get_parser(self, prog_name): required=True, action='append', default=[], - help=_('Property to remove from container ' - '(repeat option to remove multiple properties)'), + help=_( + 'Property to remove from container ' + '(repeat option to remove multiple properties)' + ), ) return parser diff --git a/openstackclient/object/v1/object.py b/openstackclient/object/v1/object.py index 01e537eef7..e8ee0fc692 100644 --- a/openstackclient/object/v1/object.py +++ b/openstackclient/object/v1/object.py @@ -19,10 +19,11 @@ from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ @@ -33,7 +34,7 @@ class CreateObject(command.Lister): _description = _("Upload object to container") def get_parser(self, prog_name): - parser = super(CreateObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -48,23 +49,31 @@ def get_parser(self, prog_name): parser.add_argument( '--name', metavar='', - help=_('Upload a file and rename it. ' - 'Can only be used when uploading a single object') + help=_( + 'Upload a file and rename it. ' + 'Can only be used when uploading a single object' + ), ) return parser def take_action(self, parsed_args): if parsed_args.name: if len(parsed_args.objects) > 1: - msg = _('Attempting to upload multiple objects and ' - 'using --name is not permitted') + msg = _( + 'Attempting to upload multiple objects and ' + 'using --name is not permitted' + ) raise exceptions.CommandError(msg) results = [] for obj in parsed_args.objects: if len(obj) > 1024: LOG.warning( - _('Object name is %s characters long, default limit' - ' is 1024'), len(obj)) + _( + 'Object name is %s characters long, default limit' + ' is 1024' + ), + len(obj), + ) data = self.app.client_manager.object_store.object_create( container=parsed_args.container, object=obj, @@ -73,18 +82,24 @@ def take_action(self, parsed_args): results.append(data) columns = ("object", "container", "etag") - return (columns, - (utils.get_dict_properties( - s, columns, + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, formatters={}, - ) for s in results)) + ) + for s in results + ), + ) class DeleteObject(command.Command): _description = _("Delete object from container") def get_parser(self, prog_name): - parser = super(DeleteObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -99,7 +114,6 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - for obj in parsed_args.objects: self.app.client_manager.object_store.object_delete( container=parsed_args.container, @@ -111,7 +125,7 @@ class ListObject(command.Lister): _description = _("List objects") def get_parser(self, prog_name): - parser = super(ListObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "container", metavar="", @@ -127,22 +141,12 @@ def get_parser(self, prog_name): metavar="", help=_("Roll up items with "), ) - parser.add_argument( - "--marker", - metavar="", - help=_("Anchor for paging"), - ) + pagination.add_marker_pagination_option_to_parser(parser) parser.add_argument( "--end-marker", metavar="", help=_("End anchor for paging"), ) - parser.add_argument( - "--limit", - metavar="", - type=int, - help=_("Limit the number of objects returned"), - ) parser.add_argument( '--long', action='store_true', @@ -158,17 +162,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - + columns: tuple[str, ...] = ('Name',) if parsed_args.long: - columns = ( - 'Name', - 'Bytes', - 'Hash', - 'Content Type', - 'Last Modified', - ) - else: - columns = ('Name',) + columns += ('Bytes', 'Hash', 'Content Type', 'Last Modified') kwargs = {} if parsed_args.prefix: @@ -185,27 +181,34 @@ def take_action(self, parsed_args): kwargs['full_listing'] = True data = self.app.client_manager.object_store.object_list( - container=parsed_args.container, - **kwargs + container=parsed_args.container, **kwargs ) - return (columns, - (utils.get_dict_properties( - s, columns, + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, formatters={}, - ) for s in data)) + ) + for s in data + ), + ) class SaveObject(command.Command): _description = _("Save object locally") def get_parser(self, prog_name): - parser = super(SaveObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--file", metavar="", - help=_("Destination filename (defaults to object name); using '-'" - " as the filename will print the file to stdout"), + help=_( + "Destination filename (defaults to object name); using '-'" + " as the filename will print the file to stdout" + ), ) parser.add_argument( 'container', @@ -231,7 +234,7 @@ class SetObject(command.Command): _description = _("Set object properties") def get_parser(self, prog_name): - parser = super(SetObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -247,8 +250,10 @@ def get_parser(self, prog_name): metavar="", required=True, action=parseractions.KeyValueAction, - help=_("Set a property on this object " - "(repeat option to set multiple properties)") + help=_( + "Set a property on this object " + "(repeat option to set multiple properties)" + ), ) return parser @@ -264,7 +269,7 @@ class ShowObject(command.ShowOne): _description = _("Display object details") def get_parser(self, prog_name): - parser = super(ShowObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -278,7 +283,6 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - data = self.app.client_manager.object_store.object_show( container=parsed_args.container, object=parsed_args.object, @@ -293,7 +297,7 @@ class UnsetObject(command.Command): _description = _("Unset object properties") def get_parser(self, prog_name): - parser = super(UnsetObject, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'container', metavar='', @@ -310,8 +314,10 @@ def get_parser(self, prog_name): required=True, action='append', default=[], - help=_('Property to remove from object ' - '(repeat option to remove multiple properties)'), + help=_( + 'Property to remove from object ' + '(repeat option to remove multiple properties)' + ), ) return parser diff --git a/openstackclient/releasenotes/notes/volume-backup-created-at-list-b49ec893ae1f6b0d.yaml b/openstackclient/releasenotes/notes/volume-backup-created-at-list-b49ec893ae1f6b0d.yaml new file mode 100644 index 0000000000..974d88ed9c --- /dev/null +++ b/openstackclient/releasenotes/notes/volume-backup-created-at-list-b49ec893ae1f6b0d.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Listing volume backups now shows the created_at column. diff --git a/openstackclient/shell.py b/openstackclient/shell.py index bc88e1f1e2..743ed2bc82 100644 --- a/openstackclient/shell.py +++ b/openstackclient/shell.py @@ -17,6 +17,7 @@ """Command-line interface to the OpenStack APIs""" import sys +import warnings from osc_lib.api import auth from osc_lib.command import commandmanager @@ -25,48 +26,59 @@ import openstackclient from openstackclient.common import clientmanager - DEFAULT_DOMAIN = 'default' +# list of modules that were originally out-of-tree and are now in +# core OSC +IGNORED_MODULES = ( + 'neutron_taas.taas_client.osc', + 'neutronclient.osc.v2.taas', +) class OpenStackShell(shell.OpenStackShell): + client_manager: clientmanager.ClientManager def __init__(self): + command_manager = commandmanager.CommandManager( + 'openstack.cli', ignored_modules=IGNORED_MODULES + ) - super(OpenStackShell, self).__init__( + super().__init__( description=__doc__.strip(), version=openstackclient.__version__, - command_manager=commandmanager.CommandManager('openstack.cli'), - deferred_help=True) + command_manager=command_manager, + deferred_help=True, + ) self.api_version = {} # Assume TLS host certificate verification is enabled self.verify = True - def build_option_parser(self, description, version): - parser = super(OpenStackShell, self).build_option_parser( - description, - version) + # ignore warnings from openstacksdk since our users can't do anything + # about them + warnings.filterwarnings('ignore', module='openstack') + + def build_option_parser(self, description, version, argparse_kwargs=None): + parser = super().build_option_parser( + description, version, argparse_kwargs + ) parser = clientmanager.build_plugin_option_parser(parser) parser = auth.build_auth_plugins_option_parser(parser) return parser def _final_defaults(self): - super(OpenStackShell, self)._final_defaults() + super()._final_defaults() # Set the default plugin to admin_token if endpoint and token are given - if (self.options.endpoint and self.options.token): + if self.options.endpoint and self.options.token: # Use token authentication self._auth_type = 'admin_token' else: self._auth_type = 'password' def _load_plugins(self): - """Load plugins via stevedore - - osc-lib has no opinion on what plugins should be loaded - """ + """Load plugins via stevedore.""" # Loop through extensions to get API versions for mod in clientmanager.PLUGIN_MODULES: default_version = getattr(mod, 'DEFAULT_API_VERSION', None) @@ -86,15 +98,35 @@ def _load_plugins(self): # this throws an exception if invalid skip_old_check = mod_check_api_version(version_opt) + # NOTE(stephenfin): API_VERSIONS has traditionally been a + # dictionary but the values are only used internally and are + # ignored for the modules using SDK. So we now support tuples + # instead. mod_versions = getattr(mod, 'API_VERSIONS', None) - if not skip_old_check and mod_versions: + if mod_versions is not None and not isinstance( + mod_versions, dict | tuple + ): + raise TypeError( + f'Plugin {mod} has incompatible API_VERSIONS. ' + f'Expected: tuple, dict. Got: {type(mod_versions)}. ' + f'Please report this to your package maintainer.' + ) + + if mod_versions and not skip_old_check: if version_opt not in mod_versions: sorted_versions = sorted( - mod.API_VERSIONS.keys(), - key=lambda s: list(map(int, s.split('.')))) + list(mod.API_VERSIONS), + key=lambda s: list(map(int, s.split('.'))), + ) self.log.warning( - "%s version %s is not in supported versions: %s" - % (api, version_opt, ', '.join(sorted_versions))) + "%(name)s API version %(version)s is not in " + "supported versions: %(supported)s", + { + 'name': api, + 'version': version_opt, + 'supported': ', '.join(sorted_versions), + }, + ) # Command groups deal only with major versions version = '.v' + version_opt.replace('.', '_').split('_')[0] @@ -102,7 +134,7 @@ def _load_plugins(self): self.command_manager.add_command_group(cmd_group) self.log.debug( '%(name)s API version %(version)s, cmd group %(group)s', - {'name': api, 'version': version_opt, 'group': cmd_group} + {'name': api, 'version': version_opt, 'group': cmd_group}, ) def _load_commands(self): @@ -111,8 +143,7 @@ def _load_commands(self): osc-lib has no opinion on what commands should be loaded """ # Commands that span multiple APIs - self.command_manager.add_command_group( - 'openstack.common') + self.command_manager.add_command_group('openstack.common') # This is the naive extension implementation referred to in # blueprint 'client-extensions' @@ -124,11 +155,10 @@ def _load_commands(self): # 'show_repo=qaz.github.repo:ShowRepo', # ], # } - self.command_manager.add_command_group( - 'openstack.extension') + self.command_manager.add_command_group('openstack.extension') def initialize_app(self, argv): - super(OpenStackShell, self).initialize_app(argv) + super().initialize_app(argv) # Re-create the client_manager with our subclass self.client_manager = clientmanager.ClientManager( diff --git a/openstackclient/tests/functional/base.py b/openstackclient/tests/functional/base.py index 0c430267be..96c9accf6b 100644 --- a/openstackclient/tests/functional/base.py +++ b/openstackclient/tests/functional/base.py @@ -24,30 +24,35 @@ LOG = logging.getLogger(__name__) -def execute(cmd, fail_ok=False, merge_stderr=False): +def execute(cmd, *, fail_ok=False): """Executes specified command for the given action.""" LOG.debug('Executing: %s', cmd) cmdlist = shlex.split(cmd) stdout = subprocess.PIPE - stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE + stderr = subprocess.PIPE + env = { + k: v for k, v in os.environ.copy().items() if not k.startswith('OS_') + } - proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr) + proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr, env=env) - result_out, result_err = proc.communicate() - result_out = result_out.decode('utf-8') + result_out_b, result_err = proc.communicate() + result_out = result_out_b.decode('utf-8') LOG.debug('stdout: %s', result_out) LOG.debug('stderr: %s', result_err) if not fail_ok and proc.returncode != 0: raise exceptions.CommandFailed( - proc.returncode, cmd, result_out, result_err, + proc.returncode, + cmd, + result_out, + result_err, ) return result_out class TestCase(testtools.TestCase): - @classmethod def openstack( cls, @@ -63,7 +68,7 @@ def openstack( :param cloud: The cloud to execute against. This can be a string, empty string, or None. A string results in '--os-auth-type $cloud', an empty string results in the '--os-auth-type' option being - omitted, and None resuts in '--os-auth-type none' for legacy + omitted, and None results in '--os-auth-type none' for legacy reasons. :param fail_ok: If failure is permitted. If False (default), a command failure will result in `~tempest.lib.exceptions.CommandFailed` @@ -92,7 +97,11 @@ def openstack( ) if parse_output: - return json.loads(output) + try: + return json.loads(output) + except json.JSONDecodeError: + print(f'failed to decode: {output}') + raise else: return output @@ -117,9 +126,13 @@ def is_service_enabled(cls, service, version=None): return bool(ret) @classmethod - def is_extension_enabled(cls, alias): + def is_extension_enabled(cls, alias, *, service='network'): """Ask client cloud if extension is enabled""" - return alias in cls.openstack('extension list -f value -c Alias') + extensions = cls.openstack( + f'extension list --{service}', + parse_output=True, + ) + return alias in [x['Alias'] for x in extensions] @classmethod def get_openstack_configuration_value(cls, configuration): @@ -128,8 +141,9 @@ def get_openstack_configuration_value(cls, configuration): @classmethod def get_opts(cls, fields, output_format='value'): - return ' -f {0} {1}'.format(output_format, - ' '.join(['-c ' + it for it in fields])) + return ' -f {} {}'.format( + output_format, ' '.join(['-c ' + it for it in fields]) + ) @classmethod def assertOutput(cls, expected, actual): diff --git a/openstackclient/tests/functional/common/test_availability_zone.py b/openstackclient/tests/functional/common/test_availability_zone.py index f319ffc5ef..cce1b2616d 100644 --- a/openstackclient/tests/functional/common/test_availability_zone.py +++ b/openstackclient/tests/functional/common/test_availability_zone.py @@ -14,7 +14,7 @@ class AvailabilityZoneTests(base.TestCase): - """Functional tests for availability zone. """ + """Functional tests for availability zone.""" def test_availability_zone_list(self): cmd_output = self.openstack( @@ -22,11 +22,5 @@ def test_availability_zone_list(self): parse_output=True, ) zones = [x['Zone Name'] for x in cmd_output] - self.assertIn( - 'internal', - zones - ) - self.assertIn( - 'nova', - zones - ) + self.assertIn('internal', zones) + self.assertIn('nova', zones) diff --git a/openstackclient/tests/functional/common/test_configuration.py b/openstackclient/tests/functional/common/test_configuration.py index 614b3e46ee..2a4660a4bc 100644 --- a/openstackclient/tests/functional/common/test_configuration.py +++ b/openstackclient/tests/functional/common/test_configuration.py @@ -23,17 +23,13 @@ class ConfigurationTests(base.TestCase): """Functional test for configuration.""" def test_configuration_show(self): - # Test show without option raw_output = self.openstack('configuration show') items = self.parse_listing(raw_output) self.assert_table_structure(items, BASIC_CONFIG_HEADERS) cmd_output = self.openstack('configuration show', parse_output=True) - self.assertEqual( - configuration.REDACTED, - cmd_output['auth.password'] - ) + self.assertEqual(configuration.REDACTED, cmd_output['auth.password']) self.assertIn( 'auth.password', cmd_output.keys(), @@ -41,30 +37,25 @@ def test_configuration_show(self): # Test show --mask cmd_output = self.openstack( - 'configuration show --mask', parse_output=True, - ) - self.assertEqual( - configuration.REDACTED, - cmd_output['auth.password'] + 'configuration show --mask', + parse_output=True, ) + self.assertEqual(configuration.REDACTED, cmd_output['auth.password']) # Test show --unmask cmd_output = self.openstack( - 'configuration show --unmask', parse_output=True, + 'configuration show --unmask', + parse_output=True, ) # If we are using os-client-config, this will not be set. Rather than # parse clouds.yaml to get the right value, just make sure # we are not getting redacted. passwd = os.environ.get('OS_PASSWORD') if passwd: - self.assertEqual( - passwd, - cmd_output['auth.password'] - ) + self.assertEqual(passwd, cmd_output['auth.password']) else: self.assertNotEqual( - configuration.REDACTED, - cmd_output['auth.password'] + configuration.REDACTED, cmd_output['auth.password'] ) @@ -72,7 +63,6 @@ class ConfigurationTestsNoAuth(base.TestCase): """Functional test for configuration with no auth""" def test_configuration_show(self): - # Test show without option raw_output = self.openstack( 'configuration show', diff --git a/openstackclient/tests/functional/common/test_extension.py b/openstackclient/tests/functional/common/test_extension.py index 8784c55b14..c65f52db51 100644 --- a/openstackclient/tests/functional/common/test_extension.py +++ b/openstackclient/tests/functional/common/test_extension.py @@ -23,7 +23,7 @@ class ExtensionTests(base.TestCase): @classmethod def setUpClass(cls): - super(ExtensionTests, cls).setUpClass() + super().setUpClass() cls.haz_network = cls.is_service_enabled('network') def test_extension_list_compute(self): diff --git a/openstackclient/tests/functional/common/test_help.py b/openstackclient/tests/functional/common/test_help.py index e84c22e00d..c44a906091 100644 --- a/openstackclient/tests/functional/common/test_help.py +++ b/openstackclient/tests/functional/common/test_help.py @@ -21,14 +21,16 @@ class HelpTests(base.TestCase): """Functional tests for openstackclient help output.""" SERVER_COMMANDS = [ - ('server add security group', 'Add security group to server'), + ('server add security group', 'Add security group(s) to server'), ('server add volume', 'Add volume to server'), ('server backup create', 'Create a server backup image'), ('server create', 'Create a new server'), ('server delete', 'Delete server(s)'), ('server dump create', 'Create a dump file in server(s)'), - ('server image create', - 'Create a new server disk image from an existing server'), + ( + 'server image create', + 'Create a new server disk image from an existing server', + ), ('server list', 'List servers'), ('server lock', 'Lock server(s)'), ('server migrate', 'Migrate server to different host'), @@ -51,18 +53,16 @@ class HelpTests(base.TestCase): ('server unpause', 'Unpause server(s)'), ('server unrescue', 'Restore server from rescue mode'), ('server unset', 'Unset server properties'), - ('server unshelve', 'Unshelve server(s)') + ('server unshelve', 'Unshelve server(s)'), ] def test_server_commands_main_help(self): """Check server commands in main help message.""" raw_output = self.openstack('help') for command, description in self.SERVER_COMMANDS: - msg = 'Command: %s not found in help output:\n%s' % ( - command, raw_output) + msg = f'Command: {command} not found in help output:\n{raw_output}' self.assertIn(command, raw_output, msg) - msg = 'Description: %s not found in help output:\n%s' % ( - description, raw_output) + msg = f'Description: {description} not found in help output:\n{raw_output}' self.assertIn(description, raw_output, msg) def test_server_only_help(self): diff --git a/openstackclient/tests/functional/common/test_module.py b/openstackclient/tests/functional/common/test_module.py index 967d3b4982..41486d1ff5 100644 --- a/openstackclient/tests/functional/common/test_module.py +++ b/openstackclient/tests/functional/common/test_module.py @@ -18,13 +18,9 @@ class ModuleTest(base.TestCase): """Functional tests for openstackclient module list output.""" - CLIENTS = ['openstackclient', - 'keystoneclient', - 'novaclient', - 'openstack'] + CLIENTS = ['openstackclient', 'keystoneclient', 'openstack'] - LIBS = ['osc_lib', - 'keystoneauth1'] + LIBS = ['osc_lib', 'keystoneauth1'] def test_module_list(self): # Test module list @@ -42,6 +38,7 @@ def test_module_list(self): class CommandTest(base.TestCase): """Functional tests for openstackclient command list.""" + GROUPS = [ 'openstack.volume.v3', 'openstack.network.v2', @@ -59,16 +56,10 @@ def test_command_list_no_option(self): self.assertIn(one_group, group_names) def test_command_list_with_group(self): - input_groups = [ - 'volume', - 'network', - 'image', - 'identity', - 'compute.v2' - ] + input_groups = ['volume', 'network', 'image', 'identity', 'compute.v2'] for each_input in input_groups: cmd_output = self.openstack( - 'command list --group %s' % each_input, + f'command list --group {each_input}', parse_output=True, ) group_names = [each.get('Command Group') for each in cmd_output] diff --git a/openstackclient/tests/functional/common/test_quota.py b/openstackclient/tests/functional/common/test_quota.py index 6e48df1d76..373b178c15 100644 --- a/openstackclient/tests/functional/common/test_quota.py +++ b/openstackclient/tests/functional/common/test_quota.py @@ -12,6 +12,7 @@ import uuid +from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from openstackclient.tests.functional import base @@ -24,48 +25,19 @@ class QuotaTests(base.TestCase): test runs as these may run in parallel and otherwise step on each other. """ - PROJECT_NAME = None + PROJECT_NAME: str @classmethod def setUpClass(cls): - super(QuotaTests, cls).setUpClass() + super().setUpClass() cls.haz_network = cls.is_service_enabled('network') - cls.PROJECT_NAME =\ - cls.get_openstack_configuration_value('auth.project_name') + cls.PROJECT_NAME = data_utils.rand_name('TestProject') + cls.openstack(f'project create {cls.PROJECT_NAME}') - def test_quota_list_details_compute(self): - expected_headers = ["Resource", "In Use", "Reserved", "Limit"] - cmd_output = self.openstack( - 'quota list --detail --compute', - parse_output=True, - ) - self.assertIsNotNone(cmd_output) - resources = [] - for row in cmd_output: - row_headers = [str(r) for r in row.keys()] - self.assertEqual(sorted(expected_headers), sorted(row_headers)) - resources.append(row['Resource']) - # Ensure that returned quota is compute quota - self.assertIn("instances", resources) - # and that there is no network quota here - self.assertNotIn("networks", resources) - - def test_quota_list_details_network(self): - expected_headers = ["Resource", "In Use", "Reserved", "Limit"] - cmd_output = self.openstack( - 'quota list --detail --network', - parse_output=True, - ) - self.assertIsNotNone(cmd_output) - resources = [] - for row in cmd_output: - row_headers = [str(r) for r in row.keys()] - self.assertEqual(sorted(expected_headers), sorted(row_headers)) - resources.append(row['Resource']) - # Ensure that returned quota is network quota - self.assertIn("networks", resources) - # and that there is no compute quota here - self.assertNotIn("instances", resources) + @classmethod + def tearDownClass(cls): + cls.openstack(f'project delete {cls.PROJECT_NAME}') + super().tearDownClass() def test_quota_list_network_option(self): if not self.haz_network: @@ -111,9 +83,9 @@ def test_quota_set_project(self): if self.haz_network: network_option = "--routers 21 " self.openstack( - 'quota set --cores 31 --backups 41 ' + - network_option + - self.PROJECT_NAME + 'quota set --cores 31 --backups 41 ' + + network_option + + self.PROJECT_NAME ) cmd_output = self.openstack( 'quota show ' + self.PROJECT_NAME, @@ -149,42 +121,23 @@ def test_quota_set_project(self): if self.haz_network: self.assertTrue(cmd_output["routers"] >= 0) - def test_quota_set_class(self): + def test_quota_set_default(self): self.openstack( - 'quota set --key-pairs 33 --snapshots 43 ' + - '--class default' - ) - cmd_output = self.openstack( - 'quota show --class default', - parse_output=True, - ) - self.assertIsNotNone(cmd_output) - cmd_output = {x['Resource']: x['Limit'] for x in cmd_output} - self.assertEqual( - 33, - cmd_output["key-pairs"], - ) - self.assertEqual( - 43, - cmd_output["snapshots"], + 'quota set --key-pairs 33 --snapshots 43 --class default' ) - - # Check default quota class cmd_output = self.openstack( - 'quota show --class', + 'quota show --default', parse_output=True, ) self.assertIsNotNone(cmd_output) - # We don't necessarily know the default quotas, we're checking the - # returned attributes cmd_output = {x['Resource']: x['Limit'] for x in cmd_output} - self.assertTrue(cmd_output["key-pairs"] >= 0) - self.assertTrue(cmd_output["snapshots"] >= 0) + self.assertEqual(33, cmd_output["key-pairs"]) + self.assertEqual(43, cmd_output["snapshots"]) def _restore_quota_limit(self, resource, limit, project): - self.openstack('quota set --%s %s %s' % (resource, limit, project)) + self.openstack(f'quota set --{resource} {limit} {project}') - def test_quota_network_set_with_no_force(self): + def test_quota_set_network(self): if not self.haz_network: self.skipTest('No Network service present') if not self.is_extension_enabled('quota-check-limit'): @@ -194,8 +147,12 @@ def test_quota_network_set_with_no_force(self): 'quota list --network', parse_output=True, ) - self.addCleanup(self._restore_quota_limit, 'network', - cmd_output[0]['Networks'], self.PROJECT_NAME) + self.addCleanup( + self._restore_quota_limit, + 'network', + cmd_output[0]['Networks'], + self.PROJECT_NAME, + ) self.openstack('quota set --networks 40 ' + self.PROJECT_NAME) cmd_output = self.openstack( @@ -207,14 +164,17 @@ def test_quota_network_set_with_no_force(self): # That will ensure we have at least two networks in the system. for _ in range(2): - self.openstack('network create --project %s %s' % - (self.PROJECT_NAME, uuid.uuid4().hex)) + self.openstack( + f'network create --project {self.PROJECT_NAME} {uuid.uuid4().hex}' + ) - self.assertRaises(exceptions.CommandFailed, self.openstack, - 'quota set --networks 1 --no-force ' + - self.PROJECT_NAME) + self.assertRaises( + exceptions.CommandFailed, + self.openstack, + 'quota set --networks 1 ' + self.PROJECT_NAME, + ) - def test_quota_network_set_with_force(self): + def test_quota_set_network_with_force(self): self.skipTest('story 2010110') if not self.haz_network: self.skipTest('No Network service present') @@ -232,8 +192,12 @@ def test_quota_network_set_with_force(self): 'quota list --network', parse_output=True, ) - self.addCleanup(self._restore_quota_limit, 'network', - cmd_output[0]['Networks'], self.PROJECT_NAME) + self.addCleanup( + self._restore_quota_limit, + 'network', + cmd_output[0]['Networks'], + self.PROJECT_NAME, + ) self.openstack('quota set --networks 40 ' + self.PROJECT_NAME) cmd_output = self.openstack( @@ -245,8 +209,9 @@ def test_quota_network_set_with_force(self): # That will ensure we have at least two networks in the system. for _ in range(2): - self.openstack('network create --project %s %s' % - (self.PROJECT_NAME, uuid.uuid4().hex)) + self.openstack( + f'network create --project {self.PROJECT_NAME} {uuid.uuid4().hex}' + ) self.openstack('quota set --networks 1 --force ' + self.PROJECT_NAME) cmd_output = self.openstack( @@ -255,3 +220,39 @@ def test_quota_network_set_with_force(self): ) self.assertIsNotNone(cmd_output) self.assertEqual(1, cmd_output[0]['Networks']) + + def test_quota_show(self): + expected_headers = ["Resource", "Limit"] + cmd_output = self.openstack( + 'quota show', + parse_output=True, + ) + self.assertIsNotNone(cmd_output) + resources = [] + for row in cmd_output: + row_headers = [str(r) for r in row.keys()] + self.assertEqual(sorted(expected_headers), sorted(row_headers)) + resources.append(row['Resource']) + # Ensure that returned quota has network quota... + self.assertIn("networks", resources) + # ...and compute quota + self.assertIn("instances", resources) + + def test_quota_show_usage_option(self): + expected_headers = ["Resource", "Limit", "In Use", "Reserved"] + cmd_output = self.openstack( + 'quota show --usage', + parse_output=True, + ) + self.assertIsNotNone(cmd_output) + resources = [] + for row in cmd_output: + row_headers = [str(r) for r in row.keys()] + self.assertEqual(sorted(expected_headers), sorted(row_headers)) + resources.append(row['Resource']) + for header in expected_headers[1:]: + self.assertIsInstance(row[header], int) + # Ensure that returned quota has network quota... + self.assertIn("networks", resources) + # ...and compute quota + self.assertIn("instances", resources) diff --git a/openstackclient/tests/functional/compute/v2/common.py b/openstackclient/tests/functional/compute/v2/common.py index 7eca4603b4..5892ee3a0b 100644 --- a/openstackclient/tests/functional/compute/v2/common.py +++ b/openstackclient/tests/functional/compute/v2/common.py @@ -22,19 +22,19 @@ class ComputeTestCase(base.TestCase): """Common functional test bits for Compute commands""" - flavor_name = None - image_name = None - network_arg = None + flavor_name: str + image_name: str + network_arg: str def setUp(self): """Select common resources""" - super(ComputeTestCase, self).setUp() + super().setUp() self.flavor_name = self.get_flavor() self.image_name = self.get_image() self.network_arg = self.get_network() @classmethod - def get_flavor(cls): + def get_flavor(cls) -> str: # NOTE(rtheis): Get cirros256 or m1.tiny flavors since functional # tests may create other flavors. flavors = cls.openstack("flavor list", parse_output=True) @@ -43,25 +43,32 @@ def get_flavor(cls): if flavor['Name'] in ['m1.tiny', 'cirros256']: server_flavor = flavor['Name'] break + + assert server_flavor is not None + return server_flavor @classmethod - def get_image(cls): + def get_image(cls) -> str: # NOTE(rtheis): Get first Cirros image since functional tests may # create other images. Image may be named '-uec' or # '-disk'. images = cls.openstack("image list", parse_output=True) server_image = None for image in images: - if (image['Name'].startswith('cirros-') and - (image['Name'].endswith('-uec') or - image['Name'].endswith('-disk'))): + if image['Name'].startswith('cirros-') and ( + image['Name'].endswith('-uec') + or image['Name'].endswith('-disk') + ): server_image = image['Name'] break + + assert server_image is not None + return server_image @classmethod - def get_network(cls): + def get_network(cls) -> str: try: # NOTE(rtheis): Get private network since functional tests may # create other networks. @@ -83,12 +90,17 @@ def server_create(self, name=None, cleanup=True): self.network_arg = self.get_network() name = name or uuid.uuid4().hex cmd_output = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - self.network_arg + ' ' + - '--wait ' + - name, + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + self.network_arg + + ' ' + + '--wait ' + + name, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -106,11 +118,11 @@ def server_delete(self, name): self.assertOutput('', raw_output) def wait_for_status( - self, - name, - expected_status='ACTIVE', - wait=900, - interval=10, + self, + name, + expected_status='ACTIVE', + wait=900, + interval=10, ): """Wait until server reaches expected status""" # TODO(thowe): Add a server wait command to osc @@ -118,24 +130,22 @@ def wait_for_status( total_sleep = 0 while total_sleep < wait: cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) status = cmd_output['status'] if status == expected_status: - print('Server {} now has status {}'.format( - name, status)) + print(f'Server {name} now has status {status}') break - print('Server {}: Waiting for {}, current status: {}'.format( - name, expected_status, status)) + print( + f'Server {name}: Waiting for {expected_status}, current status: {status}' + ) self.assertNotIn(status, failures) time.sleep(interval) total_sleep += interval cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) status = cmd_output['status'] diff --git a/openstackclient/tests/functional/compute/v2/test_aggregate.py b/openstackclient/tests/functional/compute/v2/test_aggregate.py index 80750faf58..d5d7c57967 100644 --- a/openstackclient/tests/functional/compute/v2/test_aggregate.py +++ b/openstackclient/tests/functional/compute/v2/test_aggregate.py @@ -27,24 +27,12 @@ def test_aggregate_crud(self): fail_ok=True, ) cmd_output = self.openstack( - 'aggregate create ' + - '--zone nova ' + - '--property a=b ' + - name1, + 'aggregate create ' + '--zone nova ' + '--property a=b ' + name1, parse_output=True, ) - self.assertEqual( - name1, - cmd_output['name'] - ) - self.assertEqual( - 'nova', - cmd_output['availability_zone'] - ) - self.assertIn( - 'a', - cmd_output['properties'] - ) + self.assertEqual(name1, cmd_output['name']) + self.assertEqual('nova', cmd_output['availability_zone']) + self.assertIn('a', cmd_output['properties']) cmd_output = self.openstack( 'aggregate show ' + name1, parse_output=True, @@ -58,19 +46,11 @@ def test_aggregate_crud(self): fail_ok=True, ) cmd_output = self.openstack( - 'aggregate create ' + - '--zone external ' + - name2, + 'aggregate create ' + '--zone external ' + name2, parse_output=True, ) - self.assertEqual( - name2, - cmd_output['name'] - ) - self.assertEqual( - 'external', - cmd_output['availability_zone'] - ) + self.assertEqual(name2, cmd_output['name']) + self.assertEqual('external', cmd_output['availability_zone']) cmd_output = self.openstack( 'aggregate show ' + name2, parse_output=True, @@ -85,36 +65,25 @@ def test_aggregate_crud(self): fail_ok=True, ) raw_output = self.openstack( - 'aggregate set ' + - '--name ' + name3 + ' ' + - '--zone internal ' + - '--no-property ' + - '--property c=d ' + - name1 + 'aggregate set ' + + '--name ' + + name3 + + ' ' + + '--zone internal ' + + '--no-property ' + + '--property c=d ' + + name1 ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'aggregate show ' + - name3, + 'aggregate show ' + name3, parse_output=True, ) - self.assertEqual( - name3, - cmd_output['name'] - ) - self.assertEqual( - 'internal', - cmd_output['availability_zone'] - ) - self.assertIn( - 'c', - cmd_output['properties'] - ) - self.assertNotIn( - 'a', - cmd_output['properties'] - ) + self.assertEqual(name3, cmd_output['name']) + self.assertEqual('internal', cmd_output['availability_zone']) + self.assertIn('c', cmd_output['properties']) + self.assertNotIn('a', cmd_output['properties']) # Test aggregate list cmd_output = self.openstack( @@ -145,28 +114,18 @@ def test_aggregate_crud(self): # Test unset raw_output = self.openstack( - 'aggregate unset ' + - '--property c ' + - name3 + 'aggregate unset ' + '--property c ' + name3 ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'aggregate show ' + - name3, + 'aggregate show ' + name3, parse_output=True, ) - self.assertNotIn( - "c='d'", - cmd_output['properties'] - ) + self.assertNotIn("c='d'", cmd_output['properties']) # test aggregate delete - del_output = self.openstack( - 'aggregate delete ' + - name3 + ' ' + - name2 - ) + del_output = self.openstack('aggregate delete ' + name3 + ' ' + name2) self.assertOutput('', del_output) def test_aggregate_add_and_remove_host(self): @@ -185,31 +144,18 @@ def test_aggregate_add_and_remove_host(self): name = uuid.uuid4().hex self.addCleanup(self.openstack, 'aggregate delete ' + name) - self.openstack( - 'aggregate create ' + - name - ) + self.openstack('aggregate create ' + name) # Test add host cmd_output = self.openstack( - 'aggregate add host ' + - name + ' ' + - host_name, + 'aggregate add host ' + name + ' ' + host_name, parse_output=True, ) - self.assertIn( - host_name, - cmd_output['hosts'] - ) + self.assertIn(host_name, cmd_output['hosts']) # Test remove host cmd_output = self.openstack( - 'aggregate remove host ' + - name + ' ' + - host_name, + 'aggregate remove host ' + name + ' ' + host_name, parse_output=True, ) - self.assertNotIn( - host_name, - cmd_output['hosts'] - ) + self.assertNotIn(host_name, cmd_output['hosts']) diff --git a/openstackclient/tests/functional/compute/v2/test_flavor.py b/openstackclient/tests/functional/compute/v2/test_flavor.py index 98bf1ca5ed..4a0ff4883c 100644 --- a/openstackclient/tests/functional/compute/v2/test_flavor.py +++ b/openstackclient/tests/functional/compute/v2/test_flavor.py @@ -22,7 +22,7 @@ class FlavorTests(base.TestCase): @classmethod def setUpClass(cls): - super(FlavorTests, cls).setUpClass() + super().setUpClass() # Make a project cmd_output = cls.openstack( "project create --enable " + cls.PROJECT_NAME, @@ -36,27 +36,31 @@ def tearDownClass(cls): raw_output = cls.openstack("project delete " + cls.PROJECT_NAME) cls.assertOutput('', raw_output) finally: - super(FlavorTests, cls).tearDownClass() + super().tearDownClass() def test_flavor_delete(self): """Test create w/project, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - "flavor create " + - "--project " + self.PROJECT_NAME + " " + - "--private " + - name1, + "flavor create " + + "--project " + + self.PROJECT_NAME + + " " + + "--private " + + name1, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) name2 = uuid.uuid4().hex cmd_output = self.openstack( - "flavor create " + - "--id qaz " + - "--project " + self.PROJECT_NAME + " " + - "--private " + - name2, + "flavor create " + + "--id qaz " + + "--project " + + self.PROJECT_NAME + + " " + + "--private " + + name2, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -74,10 +78,7 @@ def test_flavor_list(self): """Test create defaults, list filters, delete""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - "flavor create " + - "--property a=b " + - "--property c=d " + - name1, + "flavor create " + "--property a=b " + "--property c=d " + name1, parse_output=True, ) self.addCleanup(self.openstack, "flavor delete " + name1) @@ -89,13 +90,13 @@ def test_flavor_list(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - "flavor create " + - "--id qaz " + - "--ram 123 " + - "--private " + - "--property a=b2 " + - "--property b=d2 " + - name2, + "flavor create " + + "--id qaz " + + "--ram 123 " + + "--private " + + "--property a=b2 " + + "--property b=d2 " + + name2, parse_output=True, ) self.addCleanup(self.openstack, "flavor delete " + name2) @@ -135,8 +136,7 @@ def test_flavor_list(self): # Test list --long cmd_output = self.openstack( - "flavor list " + - "--long", + "flavor list " + "--long", parse_output=True, ) # We have list of complex json objects @@ -154,8 +154,7 @@ def test_flavor_list(self): # Test list --public cmd_output = self.openstack( - "flavor list " + - "--public", + "flavor list " + "--public", parse_output=True, ) col_name = [x["Name"] for x in cmd_output] @@ -164,8 +163,7 @@ def test_flavor_list(self): # Test list --private cmd_output = self.openstack( - "flavor list " + - "--private", + "flavor list " + "--private", parse_output=True, ) col_name = [x["Name"] for x in cmd_output] @@ -174,8 +172,7 @@ def test_flavor_list(self): # Test list --all cmd_output = self.openstack( - "flavor list " + - "--all", + "flavor list " + "--all", parse_output=True, ) col_name = [x["Name"] for x in cmd_output] @@ -186,14 +183,14 @@ def test_flavor_properties(self): """Test create defaults, list filters, delete""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - "flavor create " + - "--id qaz " + - "--ram 123 " + - "--disk 20 " + - "--private " + - "--property a=first " + - "--property b=second " + - name1, + "flavor create " + + "--id qaz " + + "--ram 123 " + + "--disk 20 " + + "--private " + + "--property a=first " + + "--property b=second " + + name1, parse_output=True, ) self.addCleanup(self.openstack, "flavor delete " + name1) @@ -223,42 +220,30 @@ def test_flavor_properties(self): ) raw_output = self.openstack( - "flavor set " + - "--property a='third and 10' " + - "--property g=fourth " + - name1 + "flavor set " + + "--property a='third and 10' " + + "--property g=fourth " + + name1 ) self.assertEqual('', raw_output) cmd_output = self.openstack( - "flavor show " + - name1, + "flavor show " + name1, parse_output=True, ) self.assertEqual( "qaz", cmd_output["id"], ) - self.assertEqual( - 'third and 10', - cmd_output['properties']['a']) - self.assertEqual( - 'second', - cmd_output['properties']['b']) - self.assertEqual( - 'fourth', - cmd_output['properties']['g']) + self.assertEqual('third and 10', cmd_output['properties']['a']) + self.assertEqual('second', cmd_output['properties']['b']) + self.assertEqual('fourth', cmd_output['properties']['g']) - raw_output = self.openstack( - "flavor unset " + - "--property b " + - name1 - ) + raw_output = self.openstack("flavor unset " + "--property b " + name1) self.assertEqual('', raw_output) cmd_output = self.openstack( - "flavor show " + - name1, + "flavor show " + name1, parse_output=True, ) diff --git a/openstackclient/tests/functional/compute/v2/test_hypervisor.py b/openstackclient/tests/functional/compute/v2/test_hypervisor.py index 9bc2328095..0ed4e904bc 100644 --- a/openstackclient/tests/functional/compute/v2/test_hypervisor.py +++ b/openstackclient/tests/functional/compute/v2/test_hypervisor.py @@ -21,32 +21,32 @@ class HypervisorTests(base.TestCase): def test_hypervisor_list(self): """Test create defaults, list filters, delete""" # Test list - cmd_output = json.loads(self.openstack( - "hypervisor list -f json --os-compute-api-version 2.1" - )) + cmd_output = json.loads( + self.openstack( + "hypervisor list -f json --os-compute-api-version 2.1" + ) + ) ids1 = [x["ID"] for x in cmd_output] self.assertIsNotNone(cmd_output) - cmd_output = json.loads(self.openstack( - "hypervisor list -f json" - )) + cmd_output = json.loads(self.openstack("hypervisor list -f json")) ids2 = [x["ID"] for x in cmd_output] self.assertIsNotNone(cmd_output) # Show test - old microversion for i in ids1: - cmd_output = json.loads(self.openstack( - "hypervisor show %s -f json " - " --os-compute-api-version 2.1" - % (i) - )) + cmd_output = json.loads( + self.openstack( + f"hypervisor show {i} -f json " + " --os-compute-api-version 2.1" + ) + ) self.assertIsNotNone(cmd_output) # When we list hypervisors with older MV we get ids as integers. We # need to verify that show finds resources independently # Show test - latest microversion for i in ids2: - cmd_output = json.loads(self.openstack( - "hypervisor show %s -f json" - % (i) - )) + cmd_output = json.loads( + self.openstack(f"hypervisor show {i} -f json") + ) self.assertIsNotNone(cmd_output) diff --git a/openstackclient/tests/functional/compute/v2/test_keypair.py b/openstackclient/tests/functional/compute/v2/test_keypair.py index 828d5dad18..2e01b1bed2 100644 --- a/openstackclient/tests/functional/compute/v2/test_keypair.py +++ b/openstackclient/tests/functional/compute/v2/test_keypair.py @@ -21,12 +21,18 @@ class KeypairBase(base.TestCase): """Methods for functional tests.""" - def keypair_create(self, name=data_utils.rand_uuid()): + def keypair_create(self, name=data_utils.rand_uuid(), user=None): """Create keypair and add cleanup.""" - raw_output = self.openstack('keypair create ' + name) - self.addCleanup(self.keypair_delete, name, True) + cmd = 'keypair create ' + name + if user is not None: + cmd += ' --user ' + user + raw_output = self.openstack(cmd) + self.addCleanup( + self.keypair_delete, name, ignore_exceptions=True, user=user + ) if not raw_output: self.fail('Keypair has not been created!') + return name def keypair_list(self, params=''): """Return dictionary with list of keypairs.""" @@ -34,10 +40,13 @@ def keypair_list(self, params=''): keypairs = self.parse_show_as_object(raw_output) return keypairs - def keypair_delete(self, name, ignore_exceptions=False): + def keypair_delete(self, name, ignore_exceptions=False, user=None): """Try to delete keypair by name.""" try: - self.openstack('keypair delete ' + name) + cmd = 'keypair delete ' + name + if user is not None: + cmd += ' --user ' + user + self.openstack(cmd) except exceptions.CommandFailed: if not ignore_exceptions: raise @@ -57,7 +66,7 @@ class KeypairTests(KeypairBase): def setUp(self): """Create keypair with randomized name for tests.""" - super(KeypairTests, self).setUp() + super().setUp() self.KPName = data_utils.rand_name('TestKeyPair') self.keypair = self.keypair_create(self.KPName) @@ -68,8 +77,11 @@ def test_keypair_create_duplicate(self): 1) Create keypair in setUp 2) Try to create duplicate keypair with the same name """ - self.assertRaises(exceptions.CommandFailed, - self.openstack, 'keypair create ' + self.KPName) + self.assertRaises( + exceptions.CommandFailed, + self.openstack, + 'keypair create ' + self.KPName, + ) def test_keypair_create_noname(self): """Try to create keypair without name. @@ -77,8 +89,9 @@ def test_keypair_create_noname(self): Test steps: 1) Try to create keypair without a name """ - self.assertRaises(exceptions.CommandFailed, - self.openstack, 'keypair create') + self.assertRaises( + exceptions.CommandFailed, self.openstack, 'keypair create' + ) def test_keypair_create_public_key(self): """Test for create keypair with --public-key option. @@ -92,7 +105,7 @@ def test_keypair_create_public_key(self): f.flush() raw_output = self.openstack( - 'keypair create --public-key %s tmpkey' % f.name, + f'keypair create --public-key {f.name} tmpkey', ) self.addCleanup( self.openstack, @@ -109,7 +122,7 @@ def test_keypair_create_private_key(self): """ with tempfile.NamedTemporaryFile(mode='w+') as f: cmd_output = self.openstack( - 'keypair create --private-key %s tmpkey' % f.name, + f'keypair create --private-key {f.name} tmpkey', parse_output=True, ) self.addCleanup(self.openstack, 'keypair delete tmpkey') @@ -117,24 +130,30 @@ def test_keypair_create_private_key(self): self.assertIsNotNone(cmd_output.get('user_id')) self.assertIsNotNone(cmd_output.get('fingerprint')) pk_content = f.read() - self.assertInOutput('-----BEGIN RSA PRIVATE KEY-----', pk_content) + self.assertInOutput( + '-----BEGIN OPENSSH PRIVATE KEY-----', + pk_content, + ) self.assertRegex(pk_content, "[0-9A-Za-z+/]+[=]{0,3}\n") - self.assertInOutput('-----END RSA PRIVATE KEY-----', pk_content) + self.assertInOutput( + '-----END OPENSSH PRIVATE KEY-----', + pk_content, + ) def test_keypair_create(self): """Test keypair create command. Test steps: 1) Create keypair in setUp - 2) Check RSA private key in output + 2) Check Ed25519 private key in output 3) Check for new keypair in keypairs list """ NewName = data_utils.rand_name('TestKeyPairCreated') raw_output = self.openstack('keypair create ' + NewName) self.addCleanup(self.openstack, 'keypair delete ' + NewName) - self.assertInOutput('-----BEGIN RSA PRIVATE KEY-----', raw_output) + self.assertInOutput('-----BEGIN OPENSSH PRIVATE KEY-----', raw_output) self.assertRegex(raw_output, "[0-9A-Za-z+/]+[=]{0,3}\n") - self.assertInOutput('-----END RSA PRIVATE KEY-----', raw_output) + self.assertInOutput('-----END OPENSSH PRIVATE KEY-----', raw_output) self.assertIn(NewName, self.keypair_list()) def test_keypair_delete_not_existing(self): @@ -144,8 +163,11 @@ def test_keypair_delete_not_existing(self): 1) Create keypair in setUp 2) Try to delete not existing keypair """ - self.assertRaises(exceptions.CommandFailed, - self.openstack, 'keypair delete not_existing') + self.assertRaises( + exceptions.CommandFailed, + self.openstack, + 'keypair delete not_existing', + ) def test_keypair_delete(self): """Test keypair delete command. @@ -187,3 +209,30 @@ def test_keypair_show(self): items = self.parse_listing(raw_output) self.assert_table_structure(items, HEADERS) self.assertInOutput(self.KPName, raw_output) + + def test_keypair_list_by_project(self): + """Test keypair list by project. + + Test steps: + 1) Create keypair for admin project in setUp + 2) Create a new project + 3) Create a new user + 4) Associate the new user with the new project + 5) Create keypair for the new user + 6) List keypairs by the new project + 7) Check that only the keypair from step 5 is returned + """ + project_name = data_utils.rand_name('TestProject') + self.openstack(f'project create {project_name}') + self.addCleanup(self.openstack, f'project delete {project_name}') + user_name = data_utils.rand_name('TestUser') + self.openstack(f'user create {user_name}') + self.addCleanup(self.openstack, f'user delete {user_name}') + self.openstack( + f'role add --user {user_name} --project {project_name} member' + ) + keypair_name = self.keypair_create(user=user_name) + raw_output = self.openstack(f'keypair list --project {project_name}') + items = self.parse_listing(raw_output) + self.assertEqual(1, len(items)) + self.assertEqual(keypair_name, items[0]['Name']) diff --git a/openstackclient/tests/functional/compute/v2/test_server.py b/openstackclient/tests/functional/compute/v2/test_server.py index 05945a0249..6afa2c7c0e 100644 --- a/openstackclient/tests/functional/compute/v2/test_server.py +++ b/openstackclient/tests/functional/compute/v2/test_server.py @@ -27,11 +27,11 @@ class ServerTests(common.ComputeTestCase): @classmethod def setUpClass(cls): - super(ServerTests, cls).setUpClass() + super().setUpClass() cls.haz_network = cls.is_service_enabled('network') def test_server_list(self): - """Test server list, set""" + """Test server list""" cmd_output = self.server_create() name1 = cmd_output['name'] cmd_output = self.server_create() @@ -52,16 +52,14 @@ def test_server_list(self): self.assertEqual("", raw_output) self.wait_for_status(name2, "PAUSED") cmd_output = self.openstack( - 'server list ' + - '--status ACTIVE', + 'server list ' + '--status ACTIVE', parse_output=True, ) col_name = [x["Name"] for x in cmd_output] self.assertIn(name1, col_name) self.assertNotIn(name2, col_name) cmd_output = self.openstack( - 'server list ' + - '--status PAUSED', + 'server list ' + '--status PAUSED', parse_output=True, ) col_name = [x["Name"] for x in cmd_output] @@ -112,8 +110,9 @@ def test_server_list_with_marker_and_deleted(self): parse_output=True, ) except exceptions.CommandFailed as e: - self.assertIn('marker [%s] not found' % (name2), - e.stderr.decode('utf-8')) + self.assertIn( + f'marker [{name2}] not found', e.stderr.decode('utf-8') + ) def test_server_list_with_changes_before(self): """Test server list. @@ -132,8 +131,7 @@ def test_server_list_with_changes_before(self): server_name3 = cmd_output['name'] cmd_output = self.openstack( - '--os-compute-api-version 2.66 ' + - 'server list ' + '--os-compute-api-version 2.66 ' + 'server list ' '--changes-before ' + updated_at2, parse_output=True, ) @@ -158,8 +156,7 @@ def test_server_list_with_changes_since(self): server_name3 = cmd_output['name'] cmd_output = self.openstack( - 'server list ' - '--changes-since ' + updated_at2, + 'server list --changes-since ' + updated_at2, parse_output=True, ) @@ -184,10 +181,12 @@ def test_server_list_with_changes_before_and_changes_since(self): updated_at3 = cmd_output['updated'] cmd_output = self.openstack( - '--os-compute-api-version 2.66 ' + - 'server list ' + - '--changes-since ' + updated_at2 + - ' --changes-before ' + updated_at3, + '--os-compute-api-version 2.66 ' + + 'server list ' + + '--changes-since ' + + updated_at2 + + ' --changes-before ' + + updated_at3, parse_output=True, ) @@ -204,21 +203,19 @@ def test_server_set(self): # Have a look at some other fields flavor = self.openstack( - 'flavor show ' + - self.flavor_name, + 'flavor show ' + self.flavor_name, parse_output=True, ) self.assertEqual( self.flavor_name, flavor['name'], ) - self.assertEqual( - '%s (%s)' % (flavor['name'], flavor['id']), - cmd_output["flavor"], - ) + # assume the v2.47+ output format + self.assertIsInstance(cmd_output['flavor'], dict) + self.assertIn('name', cmd_output['flavor']) + self.assertEqual(flavor['name'], cmd_output['flavor']['name']) image = self.openstack( - 'image show ' + - self.image_name, + 'image show ' + self.image_name, parse_output=True, ) self.assertEqual( @@ -226,21 +223,18 @@ def test_server_set(self): image['name'], ) self.assertEqual( - '%s (%s)' % (image['name'], image['id']), + '{} ({})'.format(image['name'], image['id']), cmd_output["image"], ) # Test properties set raw_output = self.openstack( - 'server set ' + - '--property a=b --property c=d ' + - name + 'server set ' + '--property a=b --property c=d ' + name ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) # Really, shouldn't this be a list? @@ -249,14 +243,9 @@ def test_server_set(self): cmd_output['properties'], ) - raw_output = self.openstack( - 'server unset ' + - '--property a ' + - name - ) + raw_output = self.openstack('server unset ' + '--property a ' + name) cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) self.assertEqual( @@ -267,14 +256,11 @@ def test_server_set(self): # Test set --name new_name = uuid.uuid4().hex raw_output = self.openstack( - 'server set ' + - '--name ' + new_name + ' ' + - name + 'server set ' + '--name ' + new_name + ' ' + name ) self.assertOutput("", raw_output) cmd_output = self.openstack( - 'server show ' + - new_name, + 'server show ' + new_name, parse_output=True, ) self.assertEqual( @@ -283,9 +269,7 @@ def test_server_set(self): ) # Put it back so we clean up properly raw_output = self.openstack( - 'server set ' + - '--name ' + name + ' ' + - new_name + 'server set ' + '--name ' + name + ' ' + new_name ) self.assertOutput("", raw_output) @@ -295,25 +279,25 @@ def test_server_show(self): name = cmd_output['name'] # Simple show - cmd_output = json.loads(self.openstack( - f'server show -f json {name}' - )) + cmd_output = json.loads(self.openstack(f'server show -f json {name}')) self.assertEqual( name, cmd_output["name"], ) # Show diagnostics - cmd_output = json.loads(self.openstack( - f'server show -f json {name} --diagnostics' - )) + cmd_output = json.loads( + self.openstack(f'server show -f json {name} --diagnostics') + ) self.assertIn('driver', cmd_output) # Show topology - cmd_output = json.loads(self.openstack( - f'server show -f json {name} --topology ' - f'--os-compute-api-version 2.78' - )) + cmd_output = json.loads( + self.openstack( + f'server show -f json {name} --topology ' + f'--os-compute-api-version 2.78' + ) + ) self.assertIn('topology', cmd_output) def test_server_actions(self): @@ -358,8 +342,9 @@ def test_server_actions(self): self.wait_for_status(name, "ACTIVE") # rescue with image - raw_output = self.openstack('server rescue --image ' + - self.image_name + ' ' + name) + raw_output = self.openstack( + 'server rescue --image ' + self.image_name + ' ' + name + ) self.assertEqual("", raw_output) self.wait_for_status(name, "RESCUE") @@ -403,8 +388,7 @@ def _chain_addresses(addresses): # attach ip cmd_output = self.openstack( - 'floating ip create ' + - 'public', + 'floating ip create ' + 'public', parse_output=True, ) @@ -419,14 +403,11 @@ def _chain_addresses(addresses): self.assertNotEqual('', cmd_output['id']) self.assertNotEqual('', floating_ip) self.addCleanup( - self.openstack, - 'floating ip delete ' + str(cmd_output['id']) + self.openstack, 'floating ip delete ' + str(cmd_output['id']) ) raw_output = self.openstack( - 'server add floating ip ' + - name + ' ' + - floating_ip + 'server add floating ip ' + name + ' ' + floating_ip ) self.assertEqual("", raw_output) @@ -436,8 +417,7 @@ def _chain_addresses(addresses): wait_time = 0 while wait_time < 60: cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) if floating_ip not in _chain_addresses(cmd_output['addresses']): @@ -455,9 +435,7 @@ def _chain_addresses(addresses): # detach ip raw_output = self.openstack( - 'server remove floating ip ' + - name + ' ' + - floating_ip + 'server remove floating ip ' + name + ' ' + floating_ip ) self.assertEqual("", raw_output) @@ -467,8 +445,7 @@ def _chain_addresses(addresses): wait_time = 0 while wait_time < 60: cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) if floating_ip in _chain_addresses(cmd_output['addresses']): @@ -480,8 +457,7 @@ def _chain_addresses(addresses): break cmd_output = self.openstack( - 'server show ' + - name, + 'server show ' + name, parse_output=True, ) self.assertNotIn( @@ -506,8 +482,7 @@ def test_server_boot_from_volume(self): # get image size cmd_output = self.openstack( - 'image show ' + - self.image_name, + 'image show ' + self.image_name, parse_output=True, ) try: @@ -520,10 +495,14 @@ def test_server_boot_from_volume(self): # create volume from image volume_name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--image ' + self.image_name + ' ' + - '--size ' + str(image_size) + ' ' + - volume_name, + 'volume create ' + + '--image ' + + self.image_name + + ' ' + + '--size ' + + str(image_size) + + ' ' + + volume_name, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -537,9 +516,11 @@ def test_server_boot_from_volume(self): # create empty volume empty_volume_name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size ' + str(image_size) + ' ' + - empty_volume_name, + 'volume create ' + + '--size ' + + str(image_size) + + ' ' + + empty_volume_name, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -553,13 +534,20 @@ def test_server_boot_from_volume(self): # create server server_name = uuid.uuid4().hex server = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--volume ' + volume_name + ' ' + - '--block-device-mapping vdb=' + empty_volume_name + ' ' + - self.network_arg + ' ' + - '--wait ' + - server_name, + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--volume ' + + volume_name + + ' ' + + '--block-device-mapping vdb=' + + empty_volume_name + + ' ' + + self.network_arg + + ' ' + + '--wait ' + + server_name, parse_output=True, ) self.assertIsNotNone(server["id"]) @@ -579,15 +567,11 @@ def test_server_boot_from_volume(self): 'server list', parse_output=True, ) - self.assertEqual( - v2_server.IMAGE_STRING_FOR_BFV, - servers[0]['Image'] - ) + self.assertEqual(v2_server.IMAGE_STRING_FOR_BFV, servers[0]['Image']) # check volumes cmd_output = self.openstack( - 'volume show ' + - volume_name, + 'volume show ' + volume_name, parse_output=True, ) attachments = cmd_output['attachments'] @@ -609,8 +593,7 @@ def test_server_boot_from_volume(self): # present on the command line. Now we should see the # attachment. cmd_output = self.openstack( - 'volume show ' + - empty_volume_name, + 'volume show ' + empty_volume_name, parse_output=True, ) attachments = cmd_output['attachments'] @@ -635,9 +618,7 @@ def _test_server_boot_with_bdm_volume(self, use_legacy): # create source empty volume volume_name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) volume_id = cmd_output["id"] @@ -658,13 +639,19 @@ def _test_server_boot_with_bdm_volume(self, use_legacy): # create server server_name = uuid.uuid4().hex server = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - bdm_arg + ' ' + - self.network_arg + ' ' + - '--wait ' + - server_name, + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + bdm_arg + + ' ' + + self.network_arg + + ' ' + + '--wait ' + + server_name, parse_output=True, ) self.assertIsNotNone(server["id"]) @@ -677,8 +664,7 @@ def _test_server_boot_with_bdm_volume(self, use_legacy): # check server volumes_attached, format is # {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",} cmd_output = self.openstack( - 'server show ' + - server_name, + 'server show ' + server_name, parse_output=True, ) volumes_attached = cmd_output['volumes_attached'] @@ -686,8 +672,7 @@ def _test_server_boot_with_bdm_volume(self, use_legacy): # check volumes cmd_output = self.openstack( - 'volume show ' + - volume_name, + 'volume show ' + volume_name, parse_output=True, ) attachments = cmd_output['attachments'] @@ -723,9 +708,7 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): # create source empty volume empty_volume_name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - empty_volume_name, + 'volume create ' + '--size 1 ' + empty_volume_name, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -736,9 +719,11 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): # create snapshot of source empty volume empty_snapshot_name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume snapshot create ' + - '--volume ' + empty_volume_name + ' ' + - empty_snapshot_name, + 'volume snapshot create ' + + '--volume ' + + empty_volume_name + + ' ' + + empty_snapshot_name, parse_output=True, ) empty_snapshot_id = cmd_output["id"] @@ -746,10 +731,12 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): # Deleting volume snapshot take time, so we need to wait until the # snapshot goes. Entries registered by self.addCleanup will be called # in the reverse order, so we need to register wait_for_delete first. - self.addCleanup(volume_wait_for_delete, - 'volume snapshot', empty_snapshot_name) - self.addCleanup(self.openstack, - 'volume snapshot delete ' + empty_snapshot_name) + self.addCleanup( + volume_wait_for_delete, 'volume snapshot', empty_snapshot_name + ) + self.addCleanup( + self.openstack, 'volume snapshot delete ' + empty_snapshot_name + ) self.assertEqual( empty_snapshot_name, cmd_output['name'], @@ -772,13 +759,19 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): # create server with bdm snapshot server_name = uuid.uuid4().hex server = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - bdm_arg + ' ' + - self.network_arg + ' ' + - '--wait ' + - server_name, + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + bdm_arg + + ' ' + + self.network_arg + + ' ' + + '--wait ' + + server_name, parse_output=True, ) self.assertIsNotNone(server["id"]) @@ -791,8 +784,7 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): # check server volumes_attached, format is # {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",} cmd_output = self.openstack( - 'server show ' + - server_name, + 'server show ' + server_name, parse_output=True, ) volumes_attached = cmd_output['volumes_attached'] @@ -801,8 +793,7 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): # check the volume that attached on server cmd_output = self.openstack( - 'volume show ' + - attached_volume_id, + 'volume show ' + attached_volume_id, parse_output=True, ) attachments = cmd_output['attachments'] @@ -826,9 +817,11 @@ def _test_server_boot_with_bdm_snapshot(self, use_legacy): 'volume list', parse_output=True, ) - target_volume = [each_volume - for each_volume in cmd_output - if each_volume['ID'] == attached_volume_id] + target_volume = [ + each_volume + for each_volume in cmd_output + if each_volume['ID'] == attached_volume_id + ] if target_volume: # check the attached volume is 'deleting' status self.assertEqual('deleting', target_volume[0]['Status']) @@ -859,14 +852,12 @@ def _test_server_boot_with_bdm_image(self, use_legacy): # it to the server at /dev/vdb and delete the volume when the # server is deleted. bdm_arg = ( - f'--block-device-mapping ' - f'vdb={self.image_name}:image:1:true ' + f'--block-device-mapping vdb={self.image_name}:image:1:true ' ) else: # get image ID cmd_output = self.openstack( - 'image show ' + - self.image_name, + 'image show ' + self.image_name, parse_output=True, ) image_id = cmd_output['id'] @@ -889,13 +880,19 @@ def _test_server_boot_with_bdm_image(self, use_legacy): # that volume to the server. server_name = uuid.uuid4().hex server = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - bdm_arg + ' ' + - self.network_arg + ' ' + - '--wait ' + - server_name, + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + bdm_arg + + ' ' + + self.network_arg + + ' ' + + '--wait ' + + server_name, parse_output=True, ) self.assertIsNotNone(server["id"]) @@ -908,8 +905,7 @@ def _test_server_boot_with_bdm_image(self, use_legacy): # check server volumes_attached, format is # {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",} cmd_output = self.openstack( - 'server show ' + - server_name, + 'server show ' + server_name, parse_output=True, ) volumes_attached = cmd_output['volumes_attached'] @@ -918,8 +914,7 @@ def _test_server_boot_with_bdm_image(self, use_legacy): # check the volume that attached on server cmd_output = self.openstack( - 'volume show ' + - attached_volume_id, + 'volume show ' + attached_volume_id, parse_output=True, ) attachments = cmd_output['attachments'] @@ -950,9 +945,11 @@ def _test_server_boot_with_bdm_image(self, use_legacy): 'volume list', parse_output=True, ) - target_volume = [each_volume - for each_volume in cmd_output - if each_volume['ID'] == attached_volume_id] + target_volume = [ + each_volume + for each_volume in cmd_output + if each_volume['ID'] == attached_volume_id + ] if target_volume: # check the attached volume is 'deleting' status self.assertEqual('deleting', target_volume[0]['Status']) @@ -975,13 +972,18 @@ def test_boot_from_volume(self): # and not delete the volume when the server is deleted. server_name = uuid.uuid4().hex server = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - '--boot-from-volume 1 ' + # create a 1GB volume from the image - self.network_arg + ' ' + - '--wait ' + - server_name, + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--boot-from-volume 1 ' + + self.network_arg # create a 1GB volume from the image + + ' ' + + '--wait ' + + server_name, parse_output=True, ) self.assertIsNotNone(server["id"]) @@ -994,8 +996,7 @@ def test_boot_from_volume(self): # check server volumes_attached, format is # {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",} cmd_output = self.openstack( - 'server show ' + - server_name, + 'server show ' + server_name, parse_output=True, ) volumes_attached = cmd_output['volumes_attached'] @@ -1012,8 +1013,7 @@ def test_boot_from_volume(self): # check the volume that attached on server cmd_output = self.openstack( - 'volume show ' + - volumes_attached[0]["id"], + 'volume show ' + volumes_attached[0]["id"], parse_output=True, ) # The volume size should be what we specified on the command line. @@ -1043,8 +1043,7 @@ def test_boot_from_volume(self): # delete server, then check the attached volume was not deleted self.openstack('server delete --wait ' + server_name) cmd_output = self.openstack( - 'volume show ' + - attached_volume_id, + 'volume show ' + attached_volume_id, parse_output=True, ) # check the volume is in 'available' status @@ -1055,12 +1054,16 @@ def test_server_create_with_none_network(self): server_name = uuid.uuid4().hex server = self.openstack( # auto/none enable in nova micro version (v2.37+) - '--os-compute-api-version 2.37 ' + - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - '--nic none ' + - server_name, + '--os-compute-api-version 2.37 ' + + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--nic none ' + + server_name, parse_output=True, ) self.assertIsNotNone(server["id"]) @@ -1099,14 +1102,24 @@ def test_server_create_with_security_group(self): server_name = uuid.uuid4().hex server = self.openstack( - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + # Security group id is integer in nova-network, convert to string - '--security-group ' + str(security_group1['id']) + ' ' + - '--security-group ' + security_group2['name'] + ' ' + - self.network_arg + ' ' + - server_name, + '--security-group ' + + str(security_group1['id']) + + ' ' + + '--security-group ' + + security_group2['name'] + + ' ' + + self.network_arg + + ' ' + + server_name, parse_output=True, ) self.addCleanup(self.openstack, 'server delete --wait ' + server_name) @@ -1136,29 +1149,38 @@ def test_server_create_with_empty_network_option_latest(self): try: self.openstack( # auto/none enable in nova micro version (v2.37+) - '--os-compute-api-version 2.37 ' + - 'server create ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - server_name + '--os-compute-api-version 2.37 ' + + 'server create ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + server_name ) except exceptions.CommandFailed as e: # If we got here, it shouldn't be because a nics value wasn't # provided to the server; it is likely due to something else in # the functional tests like there being multiple available # networks and the test didn't specify a specific network. - self.assertNotIn('nics are required after microversion 2.36', - e.stderr) + self.assertNotIn( + 'nics are required after microversion 2.36', e.stderr + ) def test_server_add_remove_network(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'server create ' + - '--network private ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - '--wait ' + - name, + 'server create ' + + '--network private ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--wait ' + + name, parse_output=True, ) @@ -1167,8 +1189,7 @@ def test_server_add_remove_network(self): self.addCleanup(self.openstack, 'server delete --wait ' + name) # add network and check 'public' is in server show - self.openstack( - 'server add network ' + name + ' public') + self.openstack('server add network ' + name + ' public') wait_time = 0 while wait_time < 60: @@ -1209,12 +1230,16 @@ def test_server_add_remove_network(self): def test_server_add_remove_port(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'server create ' + - '--network private ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - '--wait ' + - name, + 'server create ' + + '--network private ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--wait ' + + name, parse_output=True, ) @@ -1232,8 +1257,7 @@ def test_server_add_remove_port(self): self.assertNotIn(port_name, cmd_output) cmd_output = self.openstack( - 'port create ' + - '--network private ' + port_name, + 'port create ' + '--network private ' + port_name, parse_output=True, ) self.assertIsNotNone(cmd_output['id']) @@ -1270,7 +1294,7 @@ def test_server_add_remove_port(self): ) if ip_address in cmd_output['addresses']['private']: # Hang out for a bit and try again - print('retrying add port check') + print('retrying remove port check') wait_time += 10 time.sleep(10) else: @@ -1281,12 +1305,16 @@ def test_server_add_remove_port(self): def test_server_add_fixed_ip(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'server create ' + - '--network private ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - '--wait ' + - name, + 'server create ' + + '--network private ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--wait ' + + name, parse_output=True, ) @@ -1306,8 +1334,7 @@ def test_server_add_fixed_ip(self): self.assertNotIn(port_name, cmd_output) cmd_output = self.openstack( - 'port create ' + - '--network private ' + port_name, + 'port create ' + '--network private ' + port_name, parse_output=True, ) self.assertIsNotNone(cmd_output['id']) @@ -1315,8 +1342,13 @@ def test_server_add_fixed_ip(self): self.openstack('port delete ' + port_name) # add fixed ip to server, assert the ip address appears - self.openstack('server add fixed ip --fixed-ip-address ' + ip_address + - ' ' + name + ' private') + self.openstack( + 'server add fixed ip --fixed-ip-address ' + + ip_address + + ' ' + + name + + ' private' + ) wait_time = 0 while wait_time < 60: @@ -1334,17 +1366,107 @@ def test_server_add_fixed_ip(self): addresses = cmd_output['addresses']['private'] self.assertIn(ip_address, addresses) + def test_server_add_remove_security_group(self): + name = uuid.uuid4().hex + cmd_output = self.openstack( + 'server create ' + + '--network private ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--wait ' + + name, + parse_output=True, + ) + + self.assertIsNotNone(cmd_output['id']) + self.assertEqual(name, cmd_output['name']) + self.addCleanup(self.openstack, 'server delete --wait ' + name) + + # create security group + security_group_name = uuid.uuid4().hex + + cmd_output = self.openstack( + 'security group list', + parse_output=True, + ) + self.assertNotIn(security_group_name, cmd_output) + + cmd_output = self.openstack( + 'security group create ' + security_group_name, + parse_output=True, + ) + self.assertIsNotNone(cmd_output['id']) + self.addCleanup( + self.openstack, 'security group delete ' + security_group_name + ) + + # add security group to server, assert the name of the security group + # appears + self.openstack( + 'server add security group ' + name + ' ' + security_group_name + ) + + wait_time = 0 + while wait_time < 60: + cmd_output = self.openstack( + 'server show ' + name, + parse_output=True, + ) + if security_group_name not in [ + x['name'] for x in cmd_output['security_groups'] + ]: + # Hang out for a bit and try again + print('retrying add security group check') + wait_time += 10 + time.sleep(10) + else: + break + security_groups = [x['name'] for x in cmd_output['security_groups']] + self.assertIn(security_group_name, security_groups) + + # remove security group, assert the name of the security group doesn't + # appear + self.openstack( + 'server remove security group ' + name + ' ' + security_group_name + ) + + wait_time = 0 + while wait_time < 60: + cmd_output = self.openstack( + 'server show ' + name, + parse_output=True, + ) + if security_group_name not in [ + x['name'] for x in cmd_output['security_groups'] + ]: + # Hang out for a bit and try again + print('retrying remove security group check') + wait_time += 10 + time.sleep(10) + else: + break + security_groups = [x['name'] for x in cmd_output['security_groups']] + self.assertNotIn(security_group_name, security_groups) + def test_server_add_remove_volume(self): volume_wait_for = volume_common.BaseVolumeTests.wait_for_status server_name = uuid.uuid4().hex cmd_output = self.openstack( - 'server create ' + - '--network private ' + - '--flavor ' + self.flavor_name + ' ' + - '--image ' + self.image_name + ' ' + - '--wait ' + - server_name, + 'server create ' + + '--network private ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--wait ' + + server_name, parse_output=True, ) @@ -1355,9 +1477,7 @@ def test_server_add_remove_volume(self): volume_name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) @@ -1368,10 +1488,12 @@ def test_server_add_remove_volume(self): volume_id = cmd_output['id'] cmd_output = self.openstack( - 'server add volume ' + - server_name + ' ' + - volume_name + ' ' + - '--tag bar', + 'server add volume ' + + server_name + + ' ' + + volume_name + + ' ' + + '--tag bar', parse_output=True, ) @@ -1379,8 +1501,7 @@ def test_server_add_remove_volume(self): self.assertEqual(volume_id, cmd_output['Volume ID']) cmd_output = self.openstack( - 'server volume list ' + - server_name, + 'server volume list ' + server_name, parse_output=True, ) @@ -1390,8 +1511,7 @@ def test_server_add_remove_volume(self): volume_wait_for('volume', volume_name, 'in-use') cmd_output = self.openstack( - 'server event list ' + - server_name, + 'server event list ' + server_name, parse_output=True, ) self.assertEqual(2, len(cmd_output)) @@ -1403,8 +1523,7 @@ def test_server_add_remove_volume(self): volume_wait_for('volume', volume_name, 'available') cmd_output = self.openstack( - 'server event list ' + - server_name, + 'server event list ' + server_name, parse_output=True, ) self.assertEqual(3, len(cmd_output)) @@ -1413,6 +1532,46 @@ def test_server_add_remove_volume(self): raw_output = self.openstack('server volume list ' + server_name) self.assertEqual('\n', raw_output) + def test_server_stop_start(self): + """Test server stop, start""" + server_name = uuid.uuid4().hex + cmd_output = self.openstack( + 'server create ' + + '--network private ' + + '--flavor ' + + self.flavor_name + + ' ' + + '--image ' + + self.image_name + + ' ' + + '--wait ' + + server_name, + parse_output=True, + ) + + self.assertIsNotNone(cmd_output['id']) + self.assertEqual(server_name, cmd_output['name']) + self.addCleanup(self.openstack, 'server delete --wait ' + server_name) + server_id = cmd_output['id'] + + cmd_output = self.openstack( + 'server stop ' + server_name, + ) + self.assertEqual("", cmd_output) + + # This is our test that the request succeeded. If it doesn't transition + # to SHUTOFF then it didn't work. + self.wait_for_status(server_id, "SHUTOFF") + + cmd_output = self.openstack( + 'server start ' + server_name, + ) + self.assertEqual("", cmd_output) + + # As above, this is our test that the request succeeded. If it doesn't + # transition to ACTIVE then it didn't work. + self.wait_for_status(server_id, "ACTIVE") + def test_server_migration_list(self): # Verify that the command does not raise an exception when we list # migrations, including when we specify a query. diff --git a/openstackclient/tests/functional/compute/v2/test_server_event.py b/openstackclient/tests/functional/compute/v2/test_server_event.py index 48147507d3..0457985a08 100644 --- a/openstackclient/tests/functional/compute/v2/test_server_event.py +++ b/openstackclient/tests/functional/compute/v2/test_server_event.py @@ -20,7 +20,7 @@ class ServerEventTests(common.ComputeTestCase): """Functional tests for server event""" def setUp(self): - super(ServerEventTests, self).setUp() + super().setUp() # NOTE(dtroyer): As long as these tests are read-only we can get away # with using the same server instance for all of them. @@ -50,7 +50,6 @@ def test_server_event_list_and_show(self): 'server event show ' + self.server_name + ' ' + request_id, parse_output=True, ) - self.assertEqual(self.server_id, cmd_output.get('instance_uuid')) self.assertEqual(request_id, cmd_output.get('request_id')) self.assertEqual('create', cmd_output.get('action')) self.assertIsNotNone(cmd_output.get('events')) @@ -78,8 +77,6 @@ def test_server_event_list_and_show(self): 'server event show ' + self.server_name + ' ' + request_id, parse_output=True, ) - - self.assertEqual(self.server_id, cmd_output.get('instance_uuid')) self.assertEqual(request_id, cmd_output.get('request_id')) self.assertEqual('reboot', cmd_output.get('action')) self.assertIsNotNone(cmd_output.get('events')) @@ -96,8 +93,7 @@ def test_server_event_list_and_show_deleted_server(self): # And verify we can get the event list after it's deleted # Test 'server event list' for deleting cmd_output = self.openstack( - '--os-compute-api-version 2.21 ' - 'server event list ' + server_id, + '--os-compute-api-version 2.21 server event list ' + server_id, parse_output=True, ) request_id = None @@ -116,7 +112,6 @@ def test_server_event_list_and_show_deleted_server(self): 'server event show ' + server_id + ' ' + request_id, parse_output=True, ) - self.assertEqual(server_id, cmd_output.get('instance_uuid')) self.assertEqual(request_id, cmd_output.get('request_id')) self.assertEqual('delete', cmd_output.get('action')) self.assertIsNotNone(cmd_output.get('events')) diff --git a/openstackclient/tests/functional/compute/v2/test_server_group.py b/openstackclient/tests/functional/compute/v2/test_server_group.py index a599951c3e..e29f307e80 100644 --- a/openstackclient/tests/functional/compute/v2/test_server_group.py +++ b/openstackclient/tests/functional/compute/v2/test_server_group.py @@ -23,37 +23,22 @@ def test_server_group_delete(self): name1 = uuid.uuid4().hex name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'server group create ' + - '--policy affinity ' + - name1, + 'server group create ' + '--policy affinity ' + name1, parse_output=True, ) - self.assertEqual( - name1, - cmd_output['name'] - ) - self.assertEqual( - 'affinity', - cmd_output['policy'] - ) + self.assertEqual(name1, cmd_output['name']) + self.assertEqual('affinity', cmd_output['policy']) cmd_output = self.openstack( - 'server group create ' + - '--policy anti-affinity ' + - name2, + 'server group create ' + '--policy anti-affinity ' + name2, parse_output=True, ) - self.assertEqual( - name2, - cmd_output['name'] - ) - self.assertEqual( - 'anti-affinity', - cmd_output['policy'] - ) + self.assertEqual(name2, cmd_output['name']) + self.assertEqual('anti-affinity', cmd_output['policy']) del_output = self.openstack( - 'server group delete ' + name1 + ' ' + name2) + 'server group delete ' + name1 + ' ' + name2 + ) self.assertOutput('', del_output) def test_server_group_show_and_list(self): @@ -63,9 +48,7 @@ def test_server_group_show_and_list(self): # test server group show cmd_output = self.openstack( - 'server group create ' + - '--policy affinity ' + - name1, + 'server group create ' + '--policy affinity ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'server group delete ' + name1) @@ -73,19 +56,11 @@ def test_server_group_show_and_list(self): 'server group show ' + name1, parse_output=True, ) - self.assertEqual( - name1, - cmd_output['name'] - ) - self.assertEqual( - 'affinity', - cmd_output['policy'] - ) + self.assertEqual(name1, cmd_output['name']) + self.assertEqual('affinity', cmd_output['policy']) cmd_output = self.openstack( - 'server group create ' + - '--policy anti-affinity ' + - name2, + 'server group create ' + '--policy anti-affinity ' + name2, parse_output=True, ) self.addCleanup(self.openstack, 'server group delete ' + name2) @@ -93,14 +68,8 @@ def test_server_group_show_and_list(self): 'server group show ' + name2, parse_output=True, ) - self.assertEqual( - name2, - cmd_output['name'] - ) - self.assertEqual( - 'anti-affinity', - cmd_output['policy'] - ) + self.assertEqual(name2, cmd_output['name']) + self.assertEqual('anti-affinity', cmd_output['policy']) # test server group list cmd_output = self.openstack( diff --git a/openstackclient/tests/functional/identity/v2/common.py b/openstackclient/tests/functional/identity/v2/common.py index 43c0cbf273..dd2e271933 100644 --- a/openstackclient/tests/functional/identity/v2/common.py +++ b/openstackclient/tests/functional/identity/v2/common.py @@ -23,27 +23,43 @@ class IdentityTests(base.TestCase): - """Functional tests for Identity commands. """ + """Functional tests for Identity commands.""" USER_FIELDS = ['email', 'enabled', 'id', 'name', 'project_id', 'username'] PROJECT_FIELDS = ['enabled', 'id', 'name', 'description'] TOKEN_FIELDS = ['expires', 'id', 'project_id', 'user_id'] ROLE_FIELDS = ['id', 'name', 'domain_id'] SERVICE_FIELDS = ['id', 'enabled', 'name', 'type', 'description'] - ENDPOINT_FIELDS = ['id', 'region', 'service_id', 'service_name', - 'service_type', 'publicurl', - 'adminurl', 'internalurl'] - - EC2_CREDENTIALS_FIELDS = ['access', 'project_id', 'secret', - 'trust_id', 'user_id'] - EC2_CREDENTIALS_LIST_HEADERS = ['Access', 'Secret', - 'Project ID', 'User ID'] + ENDPOINT_FIELDS = [ + 'id', + 'region', + 'service_id', + 'service_name', + 'service_type', + 'publicurl', + 'adminurl', + 'internalurl', + ] + + EC2_CREDENTIALS_FIELDS = [ + 'access', + 'project_id', + 'secret', + 'trust_id', + 'user_id', + ] + EC2_CREDENTIALS_LIST_HEADERS = [ + 'Access', + 'Secret', + 'Project ID', + 'User ID', + ] CATALOG_LIST_HEADERS = ['Name', 'Type', 'Endpoints'] ENDPOINT_LIST_HEADERS = ['ID', 'Region', 'Service Name', 'Service Type'] @classmethod def setUpClass(cls): - super(IdentityTests, cls).setUpClass() + super().setUpClass() # create dummy project cls.project_name = data_utils.rand_name('TestProject') cls.project_description = data_utils.rand_name('description') @@ -51,12 +67,9 @@ def setUpClass(cls): cls.openstack( '--os-identity-api-version 2 ' 'project create ' - '--description %(description)s ' + f'--description {cls.project_description} ' '--enable ' - '%(name)s' % { - 'description': cls.project_description, - 'name': cls.project_name, - } + f'{cls.project_name}' ) except tempest_exceptions.CommandFailed: # Good chance this is due to Identity v2 admin not being enabled @@ -70,15 +83,17 @@ def tearDownClass(cls): try: cls.openstack( '--os-identity-api-version 2 ' - 'project delete %s' % cls.project_name) + f'project delete {cls.project_name}' + ) finally: - super(IdentityTests, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(IdentityTests, self).setUp() + super().setUp() # prepare v2 env ver_fixture = fixtures.EnvironmentVariable( - 'OS_IDENTITY_API_VERSION', '2.0') + 'OS_IDENTITY_API_VERSION', '2.0' + ) self.useFixture(ver_fixture) auth_url = os.environ.get('OS_AUTH_URL') if auth_url: @@ -92,14 +107,14 @@ def _create_dummy_project(self, add_clean_up=True): project_description = data_utils.rand_name('description') raw_output = self.openstack( 'project create ' - '--description %(description)s ' - '--enable %(name)s' % {'description': project_description, - 'name': project_name}) + f'--description {project_description} ' + f'--enable {project_name}' + ) project = self.parse_show_as_object(raw_output) if add_clean_up: self.addCleanup( - self.openstack, - 'project delete %s' % project['id']) + self.openstack, 'project delete {}'.format(project['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.PROJECT_FIELDS) return project_name @@ -110,30 +125,31 @@ def _create_dummy_user(self, add_clean_up=True): email = data_utils.rand_name() + '@example.com' raw_output = self.openstack( 'user create ' - '--project %(project)s ' - '--password %(password)s ' - '--email %(email)s ' + f'--project {self.project_name} ' + f'--password {password} ' + f'--email {email} ' '--enable ' - '%(name)s' % {'project': self.project_name, - 'email': email, - 'password': password, - 'name': username}) + f'{username}' + ) if add_clean_up: self.addCleanup( self.openstack, - 'user delete %s' % self.parse_show_as_object(raw_output)['id']) + 'user delete {}'.format( + self.parse_show_as_object(raw_output)['id'] + ), + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.USER_FIELDS) return username def _create_dummy_role(self, add_clean_up=True): role_name = data_utils.rand_name('TestRole') - raw_output = self.openstack('role create %s' % role_name) + raw_output = self.openstack(f'role create {role_name}') role = self.parse_show_as_object(raw_output) if add_clean_up: self.addCleanup( - self.openstack, - 'role delete %s' % role['id']) + self.openstack, 'role delete {}'.format(role['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) self.assertEqual(role_name, role['name']) @@ -145,8 +161,8 @@ def _create_dummy_ec2_credentials(self, add_clean_up=True): access_key = ec2_credentials['access'] if add_clean_up: self.addCleanup( - self.openstack, - 'ec2 credentials delete %s' % access_key) + self.openstack, f'ec2 credentials delete {access_key}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.EC2_CREDENTIALS_FIELDS) return access_key @@ -155,8 +171,9 @@ def _create_dummy_token(self, add_clean_up=True): raw_output = self.openstack('token issue') token = self.parse_show_as_object(raw_output) if add_clean_up: - self.addCleanup(self.openstack, - 'token revoke %s' % token['id']) + self.addCleanup( + self.openstack, 'token revoke {}'.format(token['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.TOKEN_FIELDS) return token['id'] @@ -167,15 +184,15 @@ def _create_dummy_service(self, add_clean_up=True): type_name = data_utils.rand_name('TestType') raw_output = self.openstack( 'service create ' - '--name %(name)s ' - '--description %(description)s ' - '%(type)s' % {'name': service_name, - 'description': description, - 'type': type_name}) + f'--name {service_name} ' + f'--description {description} ' + f'{type_name}' + ) if add_clean_up: service = self.parse_show_as_object(raw_output) - self.addCleanup(self.openstack, - 'service delete %s' % service['id']) + self.addCleanup( + self.openstack, 'service delete {}'.format(service['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.SERVICE_FIELDS) return service_name @@ -188,20 +205,17 @@ def _create_dummy_endpoint(self, add_clean_up=True): internal_url = data_utils.rand_url() raw_output = self.openstack( 'endpoint create ' - '--publicurl %(publicurl)s ' - '--adminurl %(adminurl)s ' - '--internalurl %(internalurl)s ' - '--region %(region)s ' - '%(service)s' % {'publicurl': public_url, - 'adminurl': admin_url, - 'internalurl': internal_url, - 'region': region_id, - 'service': service_name}) + f'--publicurl {public_url} ' + f'--adminurl {admin_url} ' + f'--internalurl {internal_url} ' + f'--region {region_id} ' + f'{service_name}' + ) endpoint = self.parse_show_as_object(raw_output) if add_clean_up: self.addCleanup( - self.openstack, - 'endpoint delete %s' % endpoint['id']) + self.openstack, 'endpoint delete {}'.format(endpoint['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.ENDPOINT_FIELDS) return endpoint['id'] diff --git a/openstackclient/tests/functional/identity/v2/test_catalog.py b/openstackclient/tests/functional/identity/v2/test_catalog.py index f403fbfc7b..67c6c36945 100644 --- a/openstackclient/tests/functional/identity/v2/test_catalog.py +++ b/openstackclient/tests/functional/identity/v2/test_catalog.py @@ -14,7 +14,6 @@ class CatalogTests(common.IdentityTests): - def test_catalog_list(self): raw_output = self.openstack('catalog list') items = self.parse_listing(raw_output) @@ -36,7 +35,7 @@ def test_catalog_show(self): | type | identity | +-----------+-------------------------------------------+ """ - raw_output = self.openstack('catalog show %s' % 'identity') + raw_output = self.openstack('catalog show {}'.format('identity')) items = self.parse_show(raw_output) # items may have multiple endpoint urls with empty key self.assert_show_fields(items, ['endpoints', 'name', 'type', '']) diff --git a/openstackclient/tests/functional/identity/v2/test_ec2_credentials.py b/openstackclient/tests/functional/identity/v2/test_ec2_credentials.py index 43dff91f2f..b72dc40145 100644 --- a/openstackclient/tests/functional/identity/v2/test_ec2_credentials.py +++ b/openstackclient/tests/functional/identity/v2/test_ec2_credentials.py @@ -14,14 +14,13 @@ class EC2CredentialsTests(common.IdentityTests): - def test_ec2_credentials_create(self): self._create_dummy_ec2_credentials() def test_ec2_credentials_delete(self): access_key = self._create_dummy_ec2_credentials(add_clean_up=False) raw_output = self.openstack( - 'ec2 credentials delete %s' % access_key, + f'ec2 credentials delete {access_key}', ) self.assertEqual(0, len(raw_output)) @@ -42,7 +41,7 @@ def test_ec2_credentials_list(self): def test_ec2_credentials_show(self): access_key = self._create_dummy_ec2_credentials() show_output = self.openstack( - 'ec2 credentials show %s' % access_key, + f'ec2 credentials show {access_key}', ) items = self.parse_show(show_output) self.assert_show_fields(items, self.EC2_CREDENTIALS_FIELDS) diff --git a/openstackclient/tests/functional/identity/v2/test_endpoint.py b/openstackclient/tests/functional/identity/v2/test_endpoint.py index 9df5ca8aa3..8a0077b7f2 100644 --- a/openstackclient/tests/functional/identity/v2/test_endpoint.py +++ b/openstackclient/tests/functional/identity/v2/test_endpoint.py @@ -14,21 +14,20 @@ class EndpointTests(common.IdentityTests): - def test_endpoint_create(self): self._create_dummy_endpoint() def test_endpoint_delete(self): endpoint_id = self._create_dummy_endpoint(add_clean_up=False) - raw_output = self.openstack( - 'endpoint delete %s' % endpoint_id) + raw_output = self.openstack(f'endpoint delete {endpoint_id}') self.assertEqual(0, len(raw_output)) def test_endpoint_multi_delete(self): endpoint_id_1 = self._create_dummy_endpoint(add_clean_up=False) endpoint_id_2 = self._create_dummy_endpoint(add_clean_up=False) raw_output = self.openstack( - 'endpoint delete ' + endpoint_id_1 + ' ' + endpoint_id_2) + 'endpoint delete ' + endpoint_id_1 + ' ' + endpoint_id_2 + ) self.assertEqual(0, len(raw_output)) def test_endpoint_list(self): @@ -40,6 +39,6 @@ def test_endpoint_list(self): def test_endpoint_show(self): endpoint_id = self._create_dummy_endpoint() - raw_output = self.openstack('endpoint show %s' % endpoint_id) + raw_output = self.openstack(f'endpoint show {endpoint_id}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.ENDPOINT_FIELDS) diff --git a/openstackclient/tests/functional/identity/v2/test_project.py b/openstackclient/tests/functional/identity/v2/test_project.py index 38777c36c7..2dfc95edb8 100644 --- a/openstackclient/tests/functional/identity/v2/test_project.py +++ b/openstackclient/tests/functional/identity/v2/test_project.py @@ -16,22 +16,18 @@ class ProjectTests(common.IdentityTests): - def test_project_create(self): project_name = data_utils.rand_name('TestProject') description = data_utils.rand_name('description') raw_output = self.openstack( 'project create ' - '--description %(description)s ' + f'--description {description} ' '--enable ' '--property k1=v1 ' '--property k2=v2 ' - '%(name)s' % {'description': description, - 'name': project_name}) - self.addCleanup( - self.openstack, - 'project delete %s' % project_name + f'{project_name}' ) + self.addCleanup(self.openstack, f'project delete {project_name}') items = self.parse_show(raw_output) show_fields = list(self.PROJECT_FIELDS) show_fields.extend(['k1', 'k2']) @@ -42,8 +38,7 @@ def test_project_create(self): def test_project_delete(self): project_name = self._create_dummy_project(add_clean_up=False) - raw_output = self.openstack( - 'project delete %s' % project_name) + raw_output = self.openstack(f'project delete {project_name}') self.assertEqual(0, len(raw_output)) def test_project_list(self): @@ -56,16 +51,14 @@ def test_project_set(self): new_project_name = data_utils.rand_name('NewTestProject') raw_output = self.openstack( 'project set ' - '--name %(new_name)s ' + f'--name {new_project_name} ' '--disable ' '--property k0=v0 ' - '%(name)s' % {'new_name': new_project_name, - 'name': project_name}) + f'{project_name}' + ) self.assertEqual(0, len(raw_output)) # check project details - raw_output = self.openstack( - 'project show %s' % new_project_name - ) + raw_output = self.openstack(f'project show {new_project_name}') items = self.parse_show(raw_output) fields = list(self.PROJECT_FIELDS) fields.extend(['properties']) @@ -77,9 +70,7 @@ def test_project_set(self): def test_project_show(self): project_name = self._create_dummy_project() - raw_output = self.openstack( - 'project show %s' % project_name - ) + raw_output = self.openstack(f'project show {project_name}') items = self.parse_show(raw_output) fields = list(self.PROJECT_FIELDS) fields.extend(['properties']) diff --git a/openstackclient/tests/functional/identity/v2/test_role.py b/openstackclient/tests/functional/identity/v2/test_role.py index 124603d8b6..ec6134012f 100644 --- a/openstackclient/tests/functional/identity/v2/test_role.py +++ b/openstackclient/tests/functional/identity/v2/test_role.py @@ -14,13 +14,12 @@ class RoleTests(common.IdentityTests): - def test_role_create(self): self._create_dummy_role() def test_role_delete(self): role_name = self._create_dummy_role(add_clean_up=False) - raw_output = self.openstack('role delete %s' % role_name) + raw_output = self.openstack(f'role delete {role_name}') self.assertEqual(0, len(raw_output)) def test_role_list(self): @@ -31,7 +30,7 @@ def test_role_list(self): def test_role_show(self): role_name = self._create_dummy_role() - raw_output = self.openstack('role show %s' % role_name) + raw_output = self.openstack(f'role show {role_name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) @@ -40,19 +39,17 @@ def test_role_add(self): username = self._create_dummy_user() raw_output = self.openstack( 'role add ' - '--project %(project)s ' - '--user %(user)s ' - '%(role)s' % {'project': self.project_name, - 'user': username, - 'role': role_name}) + f'--project {self.project_name} ' + f'--user {username} ' + f'{role_name}' + ) self.addCleanup( self.openstack, 'role remove ' - '--project %(project)s ' - '--user %(user)s ' - '%(role)s' % {'project': self.project_name, - 'user': username, - 'role': role_name}) + f'--project {self.project_name} ' + f'--user {username} ' + f'{role_name}', + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) @@ -61,18 +58,16 @@ def test_role_remove(self): username = self._create_dummy_user() add_raw_output = self.openstack( 'role add ' - '--project %(project)s ' - '--user %(user)s ' - '%(role)s' % {'project': self.project_name, - 'user': username, - 'role': role_name}) + f'--project {self.project_name} ' + f'--user {username} ' + f'{role_name}' + ) del_raw_output = self.openstack( 'role remove ' - '--project %(project)s ' - '--user %(user)s ' - '%(role)s' % {'project': self.project_name, - 'user': username, - 'role': role_name}) + f'--project {self.project_name} ' + f'--user {username} ' + f'{role_name}' + ) items = self.parse_show(add_raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) self.assertEqual(0, len(del_raw_output)) diff --git a/openstackclient/tests/functional/identity/v2/test_service.py b/openstackclient/tests/functional/identity/v2/test_service.py index d0e0380404..5f1611cafd 100644 --- a/openstackclient/tests/functional/identity/v2/test_service.py +++ b/openstackclient/tests/functional/identity/v2/test_service.py @@ -14,20 +14,20 @@ class ServiceTests(common.IdentityTests): - def test_service_create(self): self._create_dummy_service() def test_service_delete(self): service_name = self._create_dummy_service(add_clean_up=False) - raw_output = self.openstack('service delete %s' % service_name) + raw_output = self.openstack(f'service delete {service_name}') self.assertEqual(0, len(raw_output)) def test_service_multi_delete(self): service_name_1 = self._create_dummy_service(add_clean_up=False) service_name_2 = self._create_dummy_service(add_clean_up=False) raw_output = self.openstack( - 'service delete ' + service_name_1 + ' ' + service_name_2) + 'service delete ' + service_name_1 + ' ' + service_name_2 + ) self.assertEqual(0, len(raw_output)) def test_service_list(self): @@ -38,7 +38,6 @@ def test_service_list(self): def test_service_show(self): service_name = self._create_dummy_service() - raw_output = self.openstack( - 'service show %s' % service_name) + raw_output = self.openstack(f'service show {service_name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.SERVICE_FIELDS) diff --git a/openstackclient/tests/functional/identity/v2/test_token.py b/openstackclient/tests/functional/identity/v2/test_token.py index f856974484..51be24319a 100644 --- a/openstackclient/tests/functional/identity/v2/test_token.py +++ b/openstackclient/tests/functional/identity/v2/test_token.py @@ -14,11 +14,10 @@ class TokenTests(common.IdentityTests): - def test_token_issue(self): self._create_dummy_token() def test_token_revoke(self): token_id = self._create_dummy_token(add_clean_up=False) - raw_output = self.openstack('token revoke %s' % token_id) + raw_output = self.openstack(f'token revoke {token_id}') self.assertEqual(0, len(raw_output)) diff --git a/openstackclient/tests/functional/identity/v2/test_user.py b/openstackclient/tests/functional/identity/v2/test_user.py index ac609b94c7..c92fc87927 100644 --- a/openstackclient/tests/functional/identity/v2/test_user.py +++ b/openstackclient/tests/functional/identity/v2/test_user.py @@ -17,13 +17,12 @@ class UserTests(common.IdentityTests): - def test_user_create(self): self._create_dummy_user() def test_user_delete(self): username = self._create_dummy_user(add_clean_up=False) - raw_output = self.openstack('user delete %s' % username) + raw_output = self.openstack(f'user delete {username}') self.assertEqual(0, len(raw_output)) def test_user_list(self): @@ -33,28 +32,28 @@ def test_user_list(self): def test_user_set(self): username = self._create_dummy_user() - raw_output = self.openstack('user show %s' % username) + raw_output = self.openstack(f'user show {username}') user = self.parse_show_as_object(raw_output) new_username = data_utils.rand_name('NewTestUser') new_email = data_utils.rand_name() + '@example.com' - raw_output = self.openstack('user set ' - '--email %(email)s ' - '--name %(new_name)s ' - '%(id)s' % {'email': new_email, - 'new_name': new_username, - 'id': user['id']}) + raw_output = self.openstack( + 'user set --email {email} --name {new_name} {id}'.format( + email=new_email, new_name=new_username, id=user['id'] + ) + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('user show %s' % new_username) + raw_output = self.openstack(f'user show {new_username}') new_user = self.parse_show_as_object(raw_output) self.assertEqual(user['id'], new_user['id']) self.assertEqual(new_email, new_user['email']) def test_user_show(self): username = self._create_dummy_user() - raw_output = self.openstack('user show %s' % username) + raw_output = self.openstack(f'user show {username}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.USER_FIELDS) def test_bad_user_command(self): - self.assertRaises(exceptions.CommandFailed, - self.openstack, 'user unlist') + self.assertRaises( + exceptions.CommandFailed, self.openstack, 'user unlist' + ) diff --git a/openstackclient/tests/functional/identity/v3/common.py b/openstackclient/tests/functional/identity/v3/common.py index a5edd9a549..9f21374ff5 100644 --- a/openstackclient/tests/functional/identity/v3/common.py +++ b/openstackclient/tests/functional/identity/v3/common.py @@ -23,61 +23,143 @@ class IdentityTests(base.TestCase): - """Functional tests for Identity commands. """ + """Functional tests for Identity commands.""" DOMAIN_FIELDS = ['description', 'enabled', 'id', 'name'] GROUP_FIELDS = ['description', 'domain_id', 'id', 'name'] TOKEN_FIELDS = ['expires', 'id', 'project_id', 'user_id'] - USER_FIELDS = ['email', 'enabled', 'id', 'name', 'name', - 'domain_id', 'default_project_id', 'description', - 'password_expires_at'] - PROJECT_FIELDS = ['description', 'id', 'domain_id', 'is_domain', - 'enabled', 'name', 'parent_id'] + USER_FIELDS = [ + 'email', + 'enabled', + 'id', + 'name', + 'domain_id', + 'default_project_id', + 'description', + 'password_expires_at', + ] + PROJECT_FIELDS = [ + 'description', + 'id', + 'domain_id', + 'is_domain', + 'enabled', + 'name', + 'parent_id', + ] ROLE_FIELDS = ['id', 'name', 'domain_id', 'description'] SERVICE_FIELDS = ['id', 'enabled', 'name', 'type', 'description'] - REGION_FIELDS = ['description', 'enabled', 'parent_region', 'region'] - ENDPOINT_FIELDS = ['id', 'region', 'region_id', 'service_id', - 'service_name', 'service_type', 'enabled', - 'interface', 'url'] + REGION_FIELDS = ['description', 'parent_region', 'region'] + ENDPOINT_FIELDS = [ + 'id', + 'region', + 'region_id', + 'service_id', + 'service_name', + 'service_type', + 'enabled', + 'interface', + 'url', + ] REGION_LIST_HEADERS = ['Region', 'Parent Region', 'Description'] - ENDPOINT_LIST_HEADERS = ['ID', 'Region', 'Service Name', 'Service Type', - 'Enabled', 'Interface', 'URL'] + ENDPOINT_LIST_HEADERS = [ + 'ID', + 'Region', + 'Service Name', + 'Service Type', + 'Enabled', + 'Interface', + 'URL', + ] ENDPOINT_LIST_PROJECT_HEADERS = ['ID', 'Name'] - IDENTITY_PROVIDER_FIELDS = ['description', 'enabled', 'id', 'remote_ids', - 'domain_id'] + IDENTITY_PROVIDER_FIELDS = [ + 'description', + 'enabled', + 'id', + 'remote_ids', + 'domain_id', + ] IDENTITY_PROVIDER_LIST_HEADERS = ['ID', 'Enabled', 'Description'] - SERVICE_PROVIDER_FIELDS = ['auth_url', 'description', 'enabled', - 'id', 'relay_state_prefix', 'sp_url'] - SERVICE_PROVIDER_LIST_HEADERS = ['ID', 'Enabled', 'Description', - 'Auth URL'] - IMPLIED_ROLE_LIST_HEADERS = ['Prior Role ID', 'Prior Role Name', - 'Implied Role ID', 'Implied Role Name'] - REGISTERED_LIMIT_FIELDS = ['id', 'service_id', 'resource_name', - 'default_limit', 'description', 'region_id'] - REGISTERED_LIMIT_LIST_HEADERS = ['ID', 'Service ID', 'Resource Name', - 'Default Limit', 'Description', - 'Region ID'] - LIMIT_FIELDS = ['id', 'project_id', 'service_id', 'resource_name', - 'resource_limit', 'description', 'region_id'] - LIMIT_LIST_HEADERS = ['ID', 'Project ID', 'Service ID', 'Resource Name', - 'Resource Limit', 'Description', 'Region ID'] + SERVICE_PROVIDER_FIELDS = [ + 'auth_url', + 'description', + 'enabled', + 'id', + 'relay_state_prefix', + 'sp_url', + ] + SERVICE_PROVIDER_LIST_HEADERS = [ + 'ID', + 'Enabled', + 'Description', + 'Auth URL', + ] + IMPLIED_ROLE_LIST_HEADERS = [ + 'Prior Role ID', + 'Prior Role Name', + 'Implied Role ID', + 'Implied Role Name', + ] + ROLE_ASSIGNMENT_LIST_HEADERS = [ + 'Role', + 'User', + 'Group', + 'Project', + 'Domain', + 'System', + 'Inherited', + ] + REGISTERED_LIMIT_FIELDS = [ + 'id', + 'service_id', + 'resource_name', + 'default_limit', + 'description', + 'region_id', + ] + REGISTERED_LIMIT_LIST_HEADERS = [ + 'ID', + 'Service ID', + 'Resource Name', + 'Default Limit', + 'Description', + 'Region ID', + ] + LIMIT_FIELDS = [ + 'id', + 'project_id', + 'service_id', + 'resource_name', + 'resource_limit', + 'description', + 'region_id', + ] + LIMIT_LIST_HEADERS = [ + 'ID', + 'Project ID', + 'Service ID', + 'Resource Name', + 'Resource Limit', + 'Description', + 'Region ID', + ] @classmethod def setUpClass(cls): - super(IdentityTests, cls).setUpClass() + super().setUpClass() # create dummy domain cls.domain_name = data_utils.rand_name('TestDomain') cls.domain_description = data_utils.rand_name('description') cls.openstack( '--os-identity-api-version 3 ' 'domain create ' - '--description %(description)s ' + f'--description {cls.domain_description} ' '--enable ' - '%(name)s' % {'description': cls.domain_description, - 'name': cls.domain_name}) + f'{cls.domain_name}' + ) # create dummy project cls.project_name = data_utils.rand_name('TestProject') @@ -85,32 +167,37 @@ def setUpClass(cls): cls.openstack( '--os-identity-api-version 3 ' 'project create ' - '--domain %(domain)s ' - '--description %(description)s ' + f'--domain {cls.domain_name} ' + f'--description {cls.project_description} ' '--enable ' - '%(name)s' % {'domain': cls.domain_name, - 'description': cls.project_description, - 'name': cls.project_name}) + f'{cls.project_name}' + ) @classmethod def tearDownClass(cls): try: # delete dummy project - cls.openstack('--os-identity-api-version 3 ' - 'project delete %s' % cls.project_name) + cls.openstack( + '--os-identity-api-version 3 ' + f'project delete {cls.project_name}' + ) # disable and delete dummy domain - cls.openstack('--os-identity-api-version 3 ' - 'domain set --disable %s' % cls.domain_name) - cls.openstack('--os-identity-api-version 3 ' - 'domain delete %s' % cls.domain_name) + cls.openstack( + '--os-identity-api-version 3 ' + f'domain set --disable {cls.domain_name}' + ) + cls.openstack( + f'--os-identity-api-version 3 domain delete {cls.domain_name}' + ) finally: - super(IdentityTests, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(IdentityTests, self).setUp() + super().setUp() # prepare v3 env ver_fixture = fixtures.EnvironmentVariable( - 'OS_IDENTITY_API_VERSION', '3') + 'OS_IDENTITY_API_VERSION', '3' + ) self.useFixture(ver_fixture) auth_url = os.environ.get('OS_AUTH_URL') if auth_url: @@ -126,36 +213,34 @@ def _create_dummy_user(self, add_clean_up=True): description = data_utils.rand_name('description') raw_output = self.openstack( 'user create ' - '--domain %(domain)s ' - '--project %(project)s ' - '--project-domain %(project_domain)s ' - '--password %(password)s ' - '--email %(email)s ' - '--description %(description)s ' + f'--domain {self.domain_name} ' + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--password {password} ' + f'--email {email} ' + f'--description {description} ' '--enable ' - '%(name)s' % {'domain': self.domain_name, - 'project': self.project_name, - 'project_domain': self.domain_name, - 'email': email, - 'password': password, - 'description': description, - 'name': username}) + f'{username}' + ) if add_clean_up: self.addCleanup( self.openstack, - 'user delete %s' % self.parse_show_as_object(raw_output)['id']) + 'user delete {}'.format( + self.parse_show_as_object(raw_output)['id'] + ), + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.USER_FIELDS) return username def _create_dummy_role(self, add_clean_up=True): role_name = data_utils.rand_name('TestRole') - raw_output = self.openstack('role create %s' % role_name) + raw_output = self.openstack(f'role create {role_name}') role = self.parse_show_as_object(raw_output) if add_clean_up: self.addCleanup( - self.openstack, - 'role delete %s' % role['id']) + self.openstack, 'role delete {}'.format(role['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) self.assertEqual(role_name, role['name']) @@ -166,9 +251,9 @@ def _create_dummy_implied_role(self, add_clean_up=True): implied_role_name = self._create_dummy_role(add_clean_up) self.openstack( 'implied role create ' - '--implied-role %(implied_role)s ' - '%(role)s' % {'implied_role': implied_role_name, - 'role': role_name}) + f'--implied-role {implied_role_name} ' + f'{role_name}' + ) return implied_role_name, role_name @@ -177,18 +262,15 @@ def _create_dummy_group(self, add_clean_up=True): description = data_utils.rand_name('description') raw_output = self.openstack( 'group create ' - '--domain %(domain)s ' - '--description %(description)s ' - '%(name)s' % {'domain': self.domain_name, - 'description': description, - 'name': group_name}) + f'--domain {self.domain_name} ' + f'--description {description} ' + f'{group_name}' + ) if add_clean_up: self.addCleanup( self.openstack, - 'group delete ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': group_name}) + f'group delete --domain {self.domain_name} {group_name}', + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.GROUP_FIELDS) return group_name @@ -198,17 +280,13 @@ def _create_dummy_domain(self, add_clean_up=True): domain_description = data_utils.rand_name('description') self.openstack( 'domain create ' - '--description %(description)s ' - '--enable %(name)s' % {'description': domain_description, - 'name': domain_name}) + f'--description {domain_description} ' + f'--enable {domain_name}' + ) if add_clean_up: + self.addCleanup(self.openstack, f'domain delete {domain_name}') self.addCleanup( - self.openstack, - 'domain delete %s' % domain_name - ) - self.addCleanup( - self.openstack, - 'domain set --disable %s' % domain_name + self.openstack, f'domain set --disable {domain_name}' ) return domain_name @@ -217,18 +295,15 @@ def _create_dummy_project(self, add_clean_up=True): project_description = data_utils.rand_name('description') self.openstack( 'project create ' - '--domain %(domain)s ' - '--description %(description)s ' - '--enable %(name)s' % {'domain': self.domain_name, - 'description': project_description, - 'name': project_name}) + f'--domain {self.domain_name} ' + f'--description {project_description} ' + f'--enable {project_name}' + ) if add_clean_up: self.addCleanup( self.openstack, - 'project delete ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': project_name}) + f'project delete --domain {self.domain_name} {project_name}', + ) return project_name def _create_dummy_region(self, parent_region=None, add_clean_up=True): @@ -236,17 +311,15 @@ def _create_dummy_region(self, parent_region=None, add_clean_up=True): description = data_utils.rand_name('description') parent_region_arg = '' if parent_region is not None: - parent_region_arg = '--parent-region %s' % parent_region + parent_region_arg = f'--parent-region {parent_region}' raw_output = self.openstack( 'region create ' - '%(parent_region_arg)s ' - '--description %(description)s ' - '%(id)s' % {'parent_region_arg': parent_region_arg, - 'description': description, - 'id': region_id}) + f'{parent_region_arg} ' + f'--description {description} ' + f'{region_id}' + ) if add_clean_up: - self.addCleanup(self.openstack, - 'region delete %s' % region_id) + self.addCleanup(self.openstack, f'region delete {region_id}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGION_FIELDS) return region_id @@ -257,16 +330,16 @@ def _create_dummy_service(self, add_clean_up=True): type_name = data_utils.rand_name('TestType') raw_output = self.openstack( 'service create ' - '--name %(name)s ' - '--description %(description)s ' + f'--name {service_name} ' + f'--description {description} ' '--enable ' - '%(type)s' % {'name': service_name, - 'description': description, - 'type': type_name}) + f'{type_name}' + ) if add_clean_up: service = self.parse_show_as_object(raw_output) - self.addCleanup(self.openstack, - 'service delete %s' % service['id']) + self.addCleanup( + self.openstack, 'service delete {}'.format(service['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.SERVICE_FIELDS) return service_name @@ -277,19 +350,17 @@ def _create_dummy_endpoint(self, interface='public', add_clean_up=True): endpoint_url = data_utils.rand_url() raw_output = self.openstack( 'endpoint create ' - '--region %(region)s ' + f'--region {region_id} ' '--enable ' - '%(service)s ' - '%(interface)s ' - '%(url)s' % {'region': region_id, - 'service': service_name, - 'interface': interface, - 'url': endpoint_url}) + f'{service_name} ' + f'{interface} ' + f'{endpoint_url}' + ) endpoint = self.parse_show_as_object(raw_output) if add_clean_up: self.addCleanup( - self.openstack, - 'endpoint delete %s' % endpoint['id']) + self.openstack, 'endpoint delete {}'.format(endpoint['id']) + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.ENDPOINT_FIELDS) return endpoint['id'] @@ -299,14 +370,15 @@ def _create_dummy_idp(self, add_clean_up=True): description = data_utils.rand_name('description') raw_output = self.openstack( 'identity provider create ' - ' %(name)s ' - '--description %(description)s ' - '--enable ' % {'name': identity_provider, - 'description': description}) + f' {identity_provider} ' + f'--description {description} ' + '--enable ' + ) if add_clean_up: self.addCleanup( self.openstack, - 'identity provider delete %s' % identity_provider) + f'identity provider delete {identity_provider}', + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.IDENTITY_PROVIDER_FIELDS) return identity_provider @@ -316,16 +388,16 @@ def _create_dummy_sp(self, add_clean_up=True): description = data_utils.rand_name('description') raw_output = self.openstack( 'service provider create ' - ' %(name)s ' - '--description %(description)s ' + f' {service_provider} ' + f'--description {description} ' '--auth-url https://sp.example.com:35357 ' '--service-provider-url https://sp.example.com:5000 ' - '--enable ' % {'name': service_provider, - 'description': description}) + '--enable ' + ) if add_clean_up: self.addCleanup( - self.openstack, - 'service provider delete %s' % service_provider) + self.openstack, f'service provider delete {service_provider}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.SERVICE_PROVIDER_FIELDS) return service_provider @@ -336,14 +408,14 @@ def _create_dummy_registered_limit(self, add_clean_up=True): params = { 'service_name': service_name, 'default_limit': 10, - 'resource_name': resource_name + 'resource_name': resource_name, } raw_output = self.openstack( 'registered limit create' - ' --service %(service_name)s' - ' --default-limit %(default_limit)s' - ' %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' --service {service_name}' + ' --default-limit {default_limit}' + ' {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) registered_limit_id = self._extract_value_from_items('id', items) @@ -351,8 +423,8 @@ def _create_dummy_registered_limit(self, add_clean_up=True): if add_clean_up: self.addCleanup( self.openstack, - 'registered limit delete %s' % registered_limit_id, - cloud=SYSTEM_CLOUD + f'registered limit delete {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -368,8 +440,8 @@ def _create_dummy_limit(self, add_clean_up=True): registered_limit_id = self._create_dummy_registered_limit() raw_output = self.openstack( - 'registered limit show %s' % registered_limit_id, - cloud=SYSTEM_CLOUD + f'registered limit show {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) resource_name = self._extract_value_from_items('resource_name', items) @@ -377,7 +449,7 @@ def _create_dummy_limit(self, add_clean_up=True): resource_limit = 15 project_name = self._create_dummy_project() - raw_output = self.openstack('project show %s' % project_name) + raw_output = self.openstack(f'project show {project_name}') items = self.parse_show(raw_output) project_id = self._extract_value_from_items('id', items) @@ -385,24 +457,25 @@ def _create_dummy_limit(self, add_clean_up=True): 'project_id': project_id, 'service_id': service_id, 'resource_name': resource_name, - 'resource_limit': resource_limit + 'resource_limit': resource_limit, } raw_output = self.openstack( 'limit create' - ' --project %(project_id)s' - ' --service %(service_id)s' - ' --resource-limit %(resource_limit)s' - ' %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' --project {project_id}' + ' --service {service_id}' + ' --resource-limit {resource_limit}' + ' {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) limit_id = self._extract_value_from_items('id', items) if add_clean_up: self.addCleanup( - self.openstack, 'limit delete %s' % limit_id, - cloud=SYSTEM_CLOUD + self.openstack, + f'limit delete {limit_id}', + cloud=SYSTEM_CLOUD, ) self.assert_show_fields(items, self.LIMIT_FIELDS) diff --git a/openstackclient/tests/functional/identity/v3/test_access_rule.py b/openstackclient/tests/functional/identity/v3/test_access_rule.py new file mode 100644 index 0000000000..4f7225801f --- /dev/null +++ b/openstackclient/tests/functional/identity/v3/test_access_rule.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import json + +from tempest.lib.common.utils import data_utils + +from openstackclient.tests.functional.identity.v3 import common + + +class AccessRuleTests(common.IdentityTests): + ACCESS_RULE_FIELDS = [ + 'ID', + 'Service', + 'Method', + 'Path', + ] + ACCESS_RULE_LIST_HEADERS = [ + 'ID', + 'Service', + 'Method', + 'Path', + ] + + def setUp(self): + super().setUp() + + application_credential_name = data_utils.rand_name('name') + access_rules = json.dumps( + [ + { + 'method': 'GET', + 'path': '/v2.1/servers', + 'service': 'compute', + }, + { + 'method': 'GET', + 'path': '/v2.0/networks', + 'service': 'networking', + }, + ] + ) + raw_output = self.openstack( + f"application credential create {application_credential_name} " + f"--access-rules '{access_rules}'" + ) + # we immediately delete the application credential since it will leave + # the access rules around + self.openstack( + f'application credential delete {application_credential_name}' + ) + + items = self.parse_show_as_object(raw_output) + self.access_rule_ids = [ + x['id'] for x in ast.literal_eval(items['Access Rules']) + ] + self.addCleanup( + self.openstack, + 'access rule delete ' + + ' '.join([x for x in self.access_rule_ids]), + ) + + def test_access_rule(self): + # list + + raw_output = self.openstack('access rule list') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ACCESS_RULE_LIST_HEADERS) + + # show + + raw_output = self.openstack( + f'access rule show {self.access_rule_ids[0]}' + ) + items = self.parse_show(raw_output) + self.assert_show_fields(items, self.ACCESS_RULE_FIELDS) diff --git a/openstackclient/tests/functional/identity/v3/test_application_credential.py b/openstackclient/tests/functional/identity/v3/test_application_credential.py index daf6460785..20315c4e76 100644 --- a/openstackclient/tests/functional/identity/v3/test_application_credential.py +++ b/openstackclient/tests/functional/identity/v3/test_application_credential.py @@ -20,124 +20,133 @@ class ApplicationCredentialTests(common.IdentityTests): - - APPLICATION_CREDENTIAL_FIELDS = ['id', 'name', 'project_id', - 'description', 'roles', 'expires_at', - 'unrestricted'] - APPLICATION_CREDENTIAL_LIST_HEADERS = ['ID', 'Name', 'Project ID', - 'Description', 'Expires At'] + APPLICATION_CREDENTIAL_FIELDS = [ + 'ID', + 'Name', + 'Project ID', + 'Description', + 'Roles', + 'Expires At', + 'Unrestricted', + ] + APPLICATION_CREDENTIAL_LIST_HEADERS = [ + 'ID', + 'Name', + 'Project ID', + 'Description', + 'Expires At', + ] def test_application_credential_create(self): name = data_utils.rand_name('name') - raw_output = self.openstack('application credential create %(name)s' - % {'name': name}) + raw_output = self.openstack(f'application credential create {name}') self.addCleanup( self.openstack, - 'application credential delete %(name)s' % {'name': name}) + f'application credential delete {name}', + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.APPLICATION_CREDENTIAL_FIELDS) def _create_role_assignments(self): try: - user = self.openstack('configuration show -f value' - ' -c auth.username') + user = self.openstack( + 'configuration show -f value -c auth.username' + ) except Exception: - user = self.openstack('configuration show -f value' - ' -c auth.user_id') + user = self.openstack( + 'configuration show -f value -c auth.user_id' + ) try: - user_domain = self.openstack('configuration show -f value' - ' -c auth.user_domain_name') + user_domain = self.openstack( + 'configuration show -f value -c auth.user_domain_name' + ) except Exception: - user_domain = self.openstack('configuration show -f value' - ' -c auth.user_domain_id') + user_domain = self.openstack( + 'configuration show -f value -c auth.user_domain_id' + ) try: - project = self.openstack('configuration show -f value' - ' -c auth.project_name') + project = self.openstack( + 'configuration show -f value -c auth.project_name' + ) except Exception: - project = self.openstack('configuration show -f value' - ' -c auth.project_id') + project = self.openstack( + 'configuration show -f value -c auth.project_id' + ) try: - project_domain = self.openstack('configuration show -f value' - ' -c auth.project_domain_name') + project_domain = self.openstack( + 'configuration show -f value -c auth.project_domain_name' + ) except Exception: - project_domain = self.openstack('configuration show -f value' - ' -c auth.project_domain_id') + project_domain = self.openstack( + 'configuration show -f value -c auth.project_domain_id' + ) role1 = self._create_dummy_role() role2 = self._create_dummy_role() for role in role1, role2: - self.openstack('role add' - ' --user %(user)s' - ' --user-domain %(user_domain)s' - ' --project %(project)s' - ' --project-domain %(project_domain)s' - ' %(role)s' - % {'user': user, - 'user_domain': user_domain, - 'project': project, - 'project_domain': project_domain, - 'role': role}) - self.addCleanup(self.openstack, - 'role remove' - ' --user %(user)s' - ' --user-domain %(user_domain)s' - ' --project %(project)s' - ' --project-domain %(project_domain)s' - ' %(role)s' - % {'user': user, - 'user_domain': user_domain, - 'project': project, - 'project_domain': project_domain, - 'role': role}) + self.openstack( + 'role add' + f' --user {user}' + f' --user-domain {user_domain}' + f' --project {project}' + f' --project-domain {project_domain}' + f' {role}' + ) + self.addCleanup( + self.openstack, + 'role remove' + f' --user {user}' + f' --user-domain {user_domain}' + f' --project {project}' + f' --project-domain {project_domain}' + f' {role}', + ) return role1, role2 def test_application_credential_create_with_options(self): name = data_utils.rand_name('name') secret = data_utils.rand_name('secret') description = data_utils.rand_name('description') - tomorrow = (datetime.datetime.utcnow() + - datetime.timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z') + tomorrow = ( + datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) + + datetime.timedelta(days=1) + ).strftime('%Y-%m-%dT%H:%M:%S%z') role1, role2 = self._create_role_assignments() - raw_output = self.openstack('application credential create %(name)s' - ' --secret %(secret)s' - ' --description %(description)s' - ' --expiration %(tomorrow)s' - ' --role %(role1)s' - ' --role %(role2)s' - ' --unrestricted' - % {'name': name, - 'secret': secret, - 'description': description, - 'tomorrow': tomorrow, - 'role1': role1, - 'role2': role2}) + raw_output = self.openstack( + f'application credential create {name}' + f' --secret {secret}' + f' --description {description}' + f' --expiration {tomorrow}' + f' --role {role1}' + f' --role {role2}' + ' --unrestricted' + ) self.addCleanup( self.openstack, - 'application credential delete %(name)s' % {'name': name}) + f'application credential delete {name}', + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.APPLICATION_CREDENTIAL_FIELDS) def test_application_credential_delete(self): name = data_utils.rand_name('name') - self.openstack('application credential create %(name)s' - % {'name': name}) - raw_output = self.openstack('application credential delete ' - '%(name)s' % {'name': name}) + self.openstack(f'application credential create {name}') + raw_output = self.openstack(f'application credential delete {name}') self.assertEqual(0, len(raw_output)) def test_application_credential_list(self): raw_output = self.openstack('application credential list') items = self.parse_listing(raw_output) self.assert_table_structure( - items, self.APPLICATION_CREDENTIAL_LIST_HEADERS) + items, self.APPLICATION_CREDENTIAL_LIST_HEADERS + ) def test_application_credential_show(self): name = data_utils.rand_name('name') - raw_output = self.openstack('application credential create %(name)s' - % {'name': name}) + raw_output = self.openstack(f'application credential create {name}') self.addCleanup( self.openstack, - 'application credential delete %(name)s' % {'name': name}) - raw_output = self.openstack('application credential show ' - '%(name)s' % {'name': name}) + f'application credential delete {name}', + ) + raw_output = self.openstack(f'application credential show {name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.APPLICATION_CREDENTIAL_FIELDS) diff --git a/openstackclient/tests/functional/identity/v3/test_catalog.py b/openstackclient/tests/functional/identity/v3/test_catalog.py index c8361406bb..429f94ac36 100644 --- a/openstackclient/tests/functional/identity/v3/test_catalog.py +++ b/openstackclient/tests/functional/identity/v3/test_catalog.py @@ -10,36 +10,54 @@ # License for the specific language governing permissions and limitations # under the License. + from openstackclient.tests.functional.identity.v3 import common class CatalogTests(common.IdentityTests): + """Functional tests for catalog commands""" + + def test_catalog(self): + """Test catalog list and show functionality""" + # Create a test service for isolated testing + _dummy_service_name = self._create_dummy_service(add_clean_up=True) - def test_catalog_list(self): + # list catalogs raw_output = self.openstack('catalog list') items = self.parse_listing(raw_output) self.assert_table_structure(items, ['Name', 'Type', 'Endpoints']) - def test_catalog_show(self): - """test catalog show command - - The output example: - +-----------+----------------------------------------+ - | Field | Value | - +-----------+----------------------------------------+ - | endpoints | test1 | - | | public: http://localhost:5000/v2.0 | - | | test1 | - | | internal: http://localhost:5000/v2.0 | - | | test1 | - | | admin: http://localhost:35357/v2.0 | - | | | - | id | e1e68b5ba21a43a39ff1cf58e736c3aa | - | name | keystone | - | type | identity | - +-----------+----------------------------------------+ - """ - raw_output = self.openstack('catalog show %s' % 'identity') + # Verify created service appears in catalog + service_names = [ + item.get('Name') for item in items if item.get('Name') + ] + self.assertIn( + _dummy_service_name, + service_names, + "Created dummy service should be present in catalog", + ) + + # show service (by name) + raw_output = self.openstack(f'catalog show {_dummy_service_name}') items = self.parse_show(raw_output) - # items may have multiple endpoint urls with empty key - self.assert_show_fields(items, ['endpoints', 'name', 'type', '', 'id']) + self.assert_show_fields(items, ['endpoints', 'name', 'type', 'id']) + + # Extract the type from the dummy service + _dummy_service_type = next( + (item['type'] for item in items if 'type' in item), None + ) + + # show service (by type) + raw_output = self.openstack(f'catalog show {_dummy_service_type}') + items = self.parse_show(raw_output) + self.assert_show_fields(items, ['endpoints', 'name', 'type', 'id']) + + # show service (non-existent) + result = self.openstack( + 'catalog show nonexistent-service-xyz', fail_ok=True + ) + self.assertEqual( + '', + result.strip(), + "Non-existent service should return empty result", + ) diff --git a/openstackclient/tests/functional/identity/v3/test_domain.py b/openstackclient/tests/functional/identity/v3/test_domain.py index d8946d1ef6..867db91df9 100644 --- a/openstackclient/tests/functional/identity/v3/test_domain.py +++ b/openstackclient/tests/functional/identity/v3/test_domain.py @@ -17,15 +17,12 @@ class DomainTests(common.IdentityTests): - def test_domain_create(self): domain_name = data_utils.rand_name('TestDomain') - raw_output = self.openstack('domain create %s' % domain_name) + raw_output = self.openstack(f'domain create {domain_name}') # disable domain first before deleting it - self.addCleanup(self.openstack, - 'domain delete %s' % domain_name) - self.addCleanup(self.openstack, - 'domain set --disable %s' % domain_name) + self.addCleanup(self.openstack, f'domain delete {domain_name}') + self.addCleanup(self.openstack, f'domain set --disable {domain_name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.DOMAIN_FIELDS) @@ -38,32 +35,33 @@ def test_domain_list(self): def test_domain_delete(self): domain_name = self._create_dummy_domain(add_clean_up=False) # cannot delete enabled domain, disable it first - raw_output = self.openstack('domain set --disable %s' % domain_name) + raw_output = self.openstack(f'domain set --disable {domain_name}') self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('domain delete %s' % domain_name) + raw_output = self.openstack(f'domain delete {domain_name}') self.assertEqual(0, len(raw_output)) def test_domain_multi_delete(self): domain_1 = self._create_dummy_domain(add_clean_up=False) domain_2 = self._create_dummy_domain(add_clean_up=False) # cannot delete enabled domain, disable it first - raw_output = self.openstack('domain set --disable %s' % domain_1) + raw_output = self.openstack(f'domain set --disable {domain_1}') self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('domain set --disable %s' % domain_2) + raw_output = self.openstack(f'domain set --disable {domain_2}') self.assertEqual(0, len(raw_output)) - raw_output = self.openstack( - 'domain delete %s %s' % (domain_1, domain_2)) + raw_output = self.openstack(f'domain delete {domain_1} {domain_2}') self.assertEqual(0, len(raw_output)) def test_domain_delete_failure(self): domain_name = self._create_dummy_domain() # cannot delete enabled domain - self.assertRaises(exceptions.CommandFailed, - self.openstack, - 'domain delete %s' % domain_name) + self.assertRaises( + exceptions.CommandFailed, + self.openstack, + f'domain delete {domain_name}', + ) def test_domain_show(self): domain_name = self._create_dummy_domain() - raw_output = self.openstack('domain show %s' % domain_name) + raw_output = self.openstack(f'domain show {domain_name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.DOMAIN_FIELDS) diff --git a/openstackclient/tests/functional/identity/v3/test_endpoint.py b/openstackclient/tests/functional/identity/v3/test_endpoint.py index 41f0b4c80e..0441fcb6ec 100644 --- a/openstackclient/tests/functional/identity/v3/test_endpoint.py +++ b/openstackclient/tests/functional/identity/v3/test_endpoint.py @@ -16,7 +16,6 @@ class EndpointTests(common.IdentityTests): - def test_endpoint_create(self): self._create_dummy_endpoint(interface='public') self._create_dummy_endpoint(interface='admin') @@ -24,15 +23,15 @@ def test_endpoint_create(self): def test_endpoint_delete(self): endpoint_id = self._create_dummy_endpoint(add_clean_up=False) - raw_output = self.openstack( - 'endpoint delete %s' % endpoint_id) + raw_output = self.openstack(f'endpoint delete {endpoint_id}') self.assertEqual(0, len(raw_output)) def test_endpoint_multi_delete(self): endpoint_1 = self._create_dummy_endpoint(add_clean_up=False) endpoint_2 = self._create_dummy_endpoint(add_clean_up=False) raw_output = self.openstack( - 'endpoint delete %s %s' % (endpoint_1, endpoint_2)) + f'endpoint delete {endpoint_1} {endpoint_2}' + ) self.assertEqual(0, len(raw_output)) def test_endpoint_list(self): @@ -46,21 +45,15 @@ def test_endpoint_list_filter(self): endpoint_id = self._create_dummy_endpoint(add_clean_up=False) project_id = self._create_dummy_project(add_clean_up=False) raw_output = self.openstack( - 'endpoint add project ' - '%(endpoint_id)s ' - '%(project_id)s' % { - 'project_id': project_id, - 'endpoint_id': endpoint_id}) + f'endpoint add project {endpoint_id} {project_id}' + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack( - 'endpoint list --endpoint %s' % endpoint_id) + raw_output = self.openstack(f'endpoint list --endpoint {endpoint_id}') self.assertIn(project_id, raw_output) items = self.parse_listing(raw_output) - self.assert_table_structure(items, - self.ENDPOINT_LIST_PROJECT_HEADERS) + self.assert_table_structure(items, self.ENDPOINT_LIST_PROJECT_HEADERS) - raw_output = self.openstack( - 'endpoint list --project %s' % project_id) + raw_output = self.openstack(f'endpoint list --project {project_id}') self.assertIn(endpoint_id, raw_output) items = self.parse_listing(raw_output) self.assert_table_structure(items, self.ENDPOINT_LIST_HEADERS) @@ -70,14 +63,17 @@ def test_endpoint_set(self): new_endpoint_url = data_utils.rand_url() raw_output = self.openstack( 'endpoint set ' - '--interface %(interface)s ' - '--url %(url)s ' + '--interface {interface} ' + '--url {url} ' '--disable ' - '%(endpoint_id)s' % {'interface': 'admin', - 'url': new_endpoint_url, - 'endpoint_id': endpoint_id}) + '{endpoint_id}'.format( + interface='admin', + url=new_endpoint_url, + endpoint_id=endpoint_id, + ) + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('endpoint show %s' % endpoint_id) + raw_output = self.openstack(f'endpoint show {endpoint_id}') endpoint = self.parse_show_as_object(raw_output) self.assertEqual('admin', endpoint['interface']) self.assertEqual(new_endpoint_url, endpoint['url']) @@ -85,7 +81,7 @@ def test_endpoint_set(self): def test_endpoint_show(self): endpoint_id = self._create_dummy_endpoint() - raw_output = self.openstack('endpoint show %s' % endpoint_id) + raw_output = self.openstack(f'endpoint show {endpoint_id}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.ENDPOINT_FIELDS) @@ -93,17 +89,11 @@ def test_endpoint_add_remove_project(self): endpoint_id = self._create_dummy_endpoint(add_clean_up=False) project_id = self._create_dummy_project(add_clean_up=False) raw_output = self.openstack( - 'endpoint add project ' - '%(endpoint_id)s ' - '%(project_id)s' % { - 'project_id': project_id, - 'endpoint_id': endpoint_id}) + f'endpoint add project {endpoint_id} {project_id}' + ) self.assertEqual(0, len(raw_output)) raw_output = self.openstack( - 'endpoint remove project ' - '%(endpoint_id)s ' - '%(project_id)s' % { - 'project_id': project_id, - 'endpoint_id': endpoint_id}) + f'endpoint remove project {endpoint_id} {project_id}' + ) self.assertEqual(0, len(raw_output)) diff --git a/openstackclient/tests/functional/identity/v3/test_group.py b/openstackclient/tests/functional/identity/v3/test_group.py index 917d5df048..a2e41d813a 100644 --- a/openstackclient/tests/functional/identity/v3/test_group.py +++ b/openstackclient/tests/functional/identity/v3/test_group.py @@ -16,7 +16,6 @@ class GroupTests(common.IdentityTests): - def test_group_create(self): self._create_dummy_group() @@ -29,8 +28,7 @@ def test_group_list(self): def test_group_list_with_domain(self): group_name = self._create_dummy_group() - raw_output = self.openstack( - 'group list --domain %s' % self.domain_name) + raw_output = self.openstack(f'group list --domain {self.domain_name}') items = self.parse_listing(raw_output) self.assert_table_structure(items, common.BASIC_LIST_HEADERS) self.assertIn(group_name, raw_output) @@ -38,19 +36,15 @@ def test_group_list_with_domain(self): def test_group_delete(self): group_name = self._create_dummy_group(add_clean_up=False) raw_output = self.openstack( - 'group delete ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': group_name}) + f'group delete --domain {self.domain_name} {group_name}' + ) self.assertEqual(0, len(raw_output)) def test_group_show(self): group_name = self._create_dummy_group() raw_output = self.openstack( - 'group show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': group_name}) + f'group show --domain {self.domain_name} {group_name}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.GROUP_FIELDS) @@ -59,27 +53,23 @@ def test_group_set(self): new_group_name = data_utils.rand_name('NewTestGroup') raw_output = self.openstack( 'group set ' - '--domain %(domain)s ' - '--name %(new_group)s ' - '%(group)s' % {'domain': self.domain_name, - 'new_group': new_group_name, - 'group': group_name}) + f'--domain {self.domain_name} ' + f'--name {new_group_name} ' + f'{group_name}' + ) self.assertEqual(0, len(raw_output)) raw_output = self.openstack( - 'group show ' - '--domain %(domain)s ' - '%(group)s' % {'domain': self.domain_name, - 'group': new_group_name}) + f'group show --domain {self.domain_name} {new_group_name}' + ) group = self.parse_show_as_object(raw_output) self.assertEqual(new_group_name, group['name']) # reset group name to make sure it will be cleaned up raw_output = self.openstack( 'group set ' - '--domain %(domain)s ' - '--name %(new_group)s ' - '%(group)s' % {'domain': self.domain_name, - 'new_group': group_name, - 'group': new_group_name}) + f'--domain {self.domain_name} ' + f'--name {group_name} ' + f'{new_group_name}' + ) self.assertEqual(0, len(raw_output)) def test_group_add_user(self): @@ -87,21 +77,17 @@ def test_group_add_user(self): username = self._create_dummy_user() raw_output = self.openstack( 'group add user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}' + ) self.addCleanup( self.openstack, 'group remove user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}', + ) self.assertOutput('', raw_output) def test_group_contains_user(self): @@ -109,53 +95,43 @@ def test_group_contains_user(self): username = self._create_dummy_user() raw_output = self.openstack( 'group add user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}' + ) self.addCleanup( self.openstack, 'group remove user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}', + ) self.assertOutput('', raw_output) raw_output = self.openstack( 'group contains user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}' + ) self.assertEqual( - '%(user)s in group %(group)s\n' % {'user': username, - 'group': group_name}, - raw_output) + f'{username} in group {group_name}\n', + raw_output, + ) def test_group_remove_user(self): group_name = self._create_dummy_group() username = self._create_dummy_user() add_raw_output = self.openstack( 'group add user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}' + ) remove_raw_output = self.openstack( 'group remove user ' - '--group-domain %(group_domain)s ' - '--user-domain %(user_domain)s ' - '%(group)s %(user)s' % {'group_domain': self.domain_name, - 'user_domain': self.domain_name, - 'group': group_name, - 'user': username}) + f'--group-domain {self.domain_name} ' + f'--user-domain {self.domain_name} ' + f'{group_name} {username}' + ) self.assertOutput('', add_raw_output) self.assertOutput('', remove_raw_output) diff --git a/openstackclient/tests/functional/identity/v3/test_idp.py b/openstackclient/tests/functional/identity/v3/test_idp.py index 5db3610a5b..c9ef01d0f1 100644 --- a/openstackclient/tests/functional/identity/v3/test_idp.py +++ b/openstackclient/tests/functional/identity/v3/test_idp.py @@ -23,21 +23,24 @@ def test_idp_create(self): def test_idp_delete(self): identity_provider = self._create_dummy_idp(add_clean_up=False) - raw_output = self.openstack('identity provider delete %s' - % identity_provider) + raw_output = self.openstack( + f'identity provider delete {identity_provider}' + ) self.assertEqual(0, len(raw_output)) def test_idp_multi_delete(self): idp_1 = self._create_dummy_idp(add_clean_up=False) idp_2 = self._create_dummy_idp(add_clean_up=False) raw_output = self.openstack( - 'identity provider delete %s %s' % (idp_1, idp_2)) + f'identity provider delete {idp_1} {idp_2}' + ) self.assertEqual(0, len(raw_output)) def test_idp_show(self): identity_provider = self._create_dummy_idp(add_clean_up=True) - raw_output = self.openstack('identity provider show %s' - % identity_provider) + raw_output = self.openstack( + f'identity provider show {identity_provider}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.IDENTITY_PROVIDER_FIELDS) @@ -50,13 +53,14 @@ def test_idp_list(self): def test_idp_set(self): identity_provider = self._create_dummy_idp(add_clean_up=True) new_remoteid = data_utils.rand_name('newRemoteId') - raw_output = self.openstack('identity provider set ' - '%(identity-provider)s ' - '--remote-id %(remote-id)s ' - % {'identity-provider': identity_provider, - 'remote-id': new_remoteid}) + raw_output = self.openstack( + f'identity provider set ' + f'{identity_provider} ' + f'--remote-id {new_remoteid}' + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('identity provider show %s' - % identity_provider) + raw_output = self.openstack( + f'identity provider show {identity_provider}' + ) updated_value = self.parse_show_as_object(raw_output) self.assertIn(new_remoteid, updated_value['remote_ids']) diff --git a/openstackclient/tests/functional/identity/v3/test_limit.py b/openstackclient/tests/functional/identity/v3/test_limit.py index b03f0f282a..8c0bbcd6a9 100644 --- a/openstackclient/tests/functional/identity/v3/test_limit.py +++ b/openstackclient/tests/functional/identity/v3/test_limit.py @@ -20,23 +20,22 @@ class LimitTestCase(common.IdentityTests): - def test_limit_create_with_service_name(self): registered_limit_id = self._create_dummy_registered_limit() raw_output = self.openstack( - 'registered limit show %s' % registered_limit_id, - cloud=SYSTEM_CLOUD + f'registered limit show {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) service_id = self._extract_value_from_items('service_id', items) resource_name = self._extract_value_from_items('resource_name', items) - raw_output = self.openstack('service show %s' % service_id) + raw_output = self.openstack(f'service show {service_id}') items = self.parse_show(raw_output) service_name = self._extract_value_from_items('name', items) project_name = self._create_dummy_project() - raw_output = self.openstack('project show %s' % project_name) + raw_output = self.openstack(f'project show {project_name}') items = self.parse_show(raw_output) project_id = self._extract_value_from_items('id', items) @@ -44,22 +43,20 @@ def test_limit_create_with_service_name(self): 'project_id': project_id, 'service_name': service_name, 'resource_name': resource_name, - 'resource_limit': 15 + 'resource_limit': 15, } raw_output = self.openstack( 'limit create' - ' --project %(project_id)s' - ' --service %(service_name)s' - ' --resource-limit %(resource_limit)s' - ' %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' --project {project_id}' + ' --service {service_name}' + ' --resource-limit {resource_limit}' + ' {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) limit_id = self._extract_value_from_items('id', items) self.addCleanup( - self.openstack, - 'limit delete %s' % limit_id, - cloud=SYSTEM_CLOUD + self.openstack, f'limit delete {limit_id}', cloud=SYSTEM_CLOUD ) self.assert_show_fields(items, self.LIMIT_FIELDS) @@ -67,14 +64,14 @@ def test_limit_create_with_service_name(self): def test_limit_create_with_project_name(self): registered_limit_id = self._create_dummy_registered_limit() raw_output = self.openstack( - 'registered limit show %s' % registered_limit_id, - cloud=SYSTEM_CLOUD + f'registered limit show {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) service_id = self._extract_value_from_items('service_id', items) resource_name = self._extract_value_from_items('resource_name', items) - raw_output = self.openstack('service show %s' % service_id) + raw_output = self.openstack(f'service show {service_id}') items = self.parse_show(raw_output) service_name = self._extract_value_from_items('name', items) @@ -84,22 +81,20 @@ def test_limit_create_with_project_name(self): 'project_name': project_name, 'service_name': service_name, 'resource_name': resource_name, - 'resource_limit': 15 + 'resource_limit': 15, } raw_output = self.openstack( 'limit create' - ' --project %(project_name)s' - ' --service %(service_name)s' - ' --resource-limit %(resource_limit)s' - ' %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' --project {project_name}' + ' --service {service_name}' + ' --resource-limit {resource_limit}' + ' {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) limit_id = self._extract_value_from_items('id', items) self.addCleanup( - self.openstack, - 'limit delete %s' % limit_id, - cloud=SYSTEM_CLOUD + self.openstack, f'limit delete {limit_id}', cloud=SYSTEM_CLOUD ) self.assert_show_fields(items, self.LIMIT_FIELDS) @@ -117,21 +112,21 @@ def test_limit_create_with_options(self): params = { 'region_id': region_id, - 'registered_limit_id': registered_limit_id + 'registered_limit_id': registered_limit_id, } raw_output = self.openstack( 'registered limit set' - ' %(registered_limit_id)s' - ' --region %(region_id)s' % params, - cloud=SYSTEM_CLOUD + ' {registered_limit_id}' + ' --region {region_id}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) service_id = self._extract_value_from_items('service_id', items) resource_name = self._extract_value_from_items('resource_name', items) project_name = self._create_dummy_project() - raw_output = self.openstack('project show %s' % project_name) + raw_output = self.openstack(f'project show {project_name}') items = self.parse_show(raw_output) project_id = self._extract_value_from_items('id', items) description = data_utils.arbitrary_string() @@ -142,24 +137,22 @@ def test_limit_create_with_options(self): 'resource_name': resource_name, 'resource_limit': 15, 'region_id': region_id, - 'description': description + 'description': description, } raw_output = self.openstack( 'limit create' - ' --project %(project_id)s' - ' --service %(service_id)s' - ' --resource-limit %(resource_limit)s' - ' --region %(region_id)s' - ' --description %(description)s' - ' %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' --project {project_id}' + ' --service {service_id}' + ' --resource-limit {resource_limit}' + ' --region {region_id}' + ' --description {description}' + ' {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) limit_id = self._extract_value_from_items('id', items) self.addCleanup( - self.openstack, - 'limit delete %s' % limit_id, - cloud=SYSTEM_CLOUD + self.openstack, f'limit delete {limit_id}', cloud=SYSTEM_CLOUD ) self.assert_show_fields(items, self.LIMIT_FIELDS) @@ -167,8 +160,7 @@ def test_limit_create_with_options(self): def test_limit_show(self): limit_id = self._create_dummy_limit() raw_output = self.openstack( - 'limit show %s' % limit_id, - cloud=SYSTEM_CLOUD + f'limit show {limit_id}', cloud=SYSTEM_CLOUD ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.LIMIT_FIELDS) @@ -178,14 +170,14 @@ def test_limit_set_description(self): params = { 'description': data_utils.arbitrary_string(), - 'limit_id': limit_id + 'limit_id': limit_id, } raw_output = self.openstack( - 'limit set' - ' --description %(description)s' - ' %(limit_id)s' % params, - cloud=SYSTEM_CLOUD + 'limit set --description {description} {limit_id}'.format( + **params + ), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.LIMIT_FIELDS) @@ -193,16 +185,13 @@ def test_limit_set_description(self): def test_limit_set_resource_limit(self): limit_id = self._create_dummy_limit() - params = { - 'resource_limit': 5, - 'limit_id': limit_id - } + params = {'resource_limit': 5, 'limit_id': limit_id} raw_output = self.openstack( - 'limit set' - ' --resource-limit %(resource_limit)s' - ' %(limit_id)s' % params, - cloud=SYSTEM_CLOUD + 'limit set --resource-limit {resource_limit} {limit_id}'.format( + **params + ), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.LIMIT_FIELDS) @@ -216,6 +205,6 @@ def test_limit_list(self): def test_limit_delete(self): limit_id = self._create_dummy_limit(add_clean_up=False) raw_output = self.openstack( - 'limit delete %s' % limit_id, - cloud=SYSTEM_CLOUD) + f'limit delete {limit_id}', cloud=SYSTEM_CLOUD + ) self.assertEqual(0, len(raw_output)) diff --git a/openstackclient/tests/functional/identity/v3/test_project.py b/openstackclient/tests/functional/identity/v3/test_project.py index b3d31aa750..7a66c18518 100644 --- a/openstackclient/tests/functional/identity/v3/test_project.py +++ b/openstackclient/tests/functional/identity/v3/test_project.py @@ -16,26 +16,21 @@ class ProjectTests(common.IdentityTests): - def test_project_create(self): project_name = data_utils.rand_name('TestProject') description = data_utils.rand_name('description') raw_output = self.openstack( 'project create ' - '--domain %(domain)s ' - '--description %(description)s ' + f'--domain {self.domain_name} ' + f'--description {description} ' '--enable ' '--property k1=v1 ' '--property k2=v2 ' - '%(name)s' % {'domain': self.domain_name, - 'description': description, - 'name': project_name}) + f'{project_name}' + ) self.addCleanup( self.openstack, - 'project delete ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': project_name} + f'project delete --domain {self.domain_name} {project_name}', ) items = self.parse_show(raw_output) show_fields = list(self.PROJECT_FIELDS) @@ -48,10 +43,8 @@ def test_project_create(self): def test_project_delete(self): project_name = self._create_dummy_project(add_clean_up=False) raw_output = self.openstack( - 'project delete ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': project_name}) + f'project delete --domain {self.domain_name} {project_name}' + ) self.assertEqual(0, len(raw_output)) def test_project_list(self): @@ -62,7 +55,8 @@ def test_project_list(self): def test_project_list_with_domain(self): project_name = self._create_dummy_project() raw_output = self.openstack( - 'project list --domain %s' % self.domain_name) + f'project list --domain {self.domain_name}' + ) items = self.parse_listing(raw_output) self.assert_table_structure(items, common.BASIC_LIST_HEADERS) self.assertIn(project_name, raw_output) @@ -73,18 +67,15 @@ def test_project_set(self): new_project_name = data_utils.rand_name('NewTestProject') raw_output = self.openstack( 'project set ' - '--name %(new_name)s ' + f'--name {new_project_name} ' '--disable ' '--property k0=v0 ' - '%(name)s' % {'new_name': new_project_name, - 'name': project_name}) + f'{project_name}' + ) self.assertEqual(0, len(raw_output)) # check project details raw_output = self.openstack( - 'project show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': new_project_name} + f'project show --domain {self.domain_name} {new_project_name}' ) items = self.parse_show(raw_output) fields = list(self.PROJECT_FIELDS) @@ -96,18 +87,13 @@ def test_project_set(self): self.assertEqual('v0', project['k0']) # reset project to make sure it will be cleaned up self.openstack( - 'project set ' - '--name %(new_name)s ' - '--enable ' - '%(name)s' % {'new_name': project_name, - 'name': new_project_name}) + f'project set --name {project_name} --enable {new_project_name}' + ) def test_project_show(self): raw_output = self.openstack( - 'project show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': self.project_name}) + f'project show --domain {self.domain_name} {self.project_name}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.PROJECT_FIELDS) @@ -115,11 +101,10 @@ def test_project_show_with_parents_children(self): output = self.openstack( 'project show ' '--parents --children ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': self.project_name}, + f'--domain {self.domain_name} ' + f'{self.project_name}', parse_output=True, ) - for attr_name in (self.PROJECT_FIELDS + ['parents', 'subtree']): + for attr_name in self.PROJECT_FIELDS + ['parents', 'subtree']: self.assertIn(attr_name, output) self.assertEqual(self.project_name, output.get('name')) diff --git a/openstackclient/tests/functional/identity/v3/test_region.py b/openstackclient/tests/functional/identity/v3/test_region.py index 2a402bd1a8..49471002ad 100644 --- a/openstackclient/tests/functional/identity/v3/test_region.py +++ b/openstackclient/tests/functional/identity/v3/test_region.py @@ -14,7 +14,6 @@ class RegionTests(common.IdentityTests): - def test_region_create(self): self._create_dummy_region() @@ -24,14 +23,13 @@ def test_region_create_with_parent_region(self): def test_region_delete(self): region_id = self._create_dummy_region(add_clean_up=False) - raw_output = self.openstack('region delete %s' % region_id) + raw_output = self.openstack(f'region delete {region_id}') self.assertEqual(0, len(raw_output)) def test_region_multi_delete(self): region_1 = self._create_dummy_region(add_clean_up=False) region_2 = self._create_dummy_region(add_clean_up=False) - raw_output = self.openstack( - 'region delete %s %s' % (region_1, region_2)) + raw_output = self.openstack(f'region delete {region_1} {region_2}') self.assertEqual(0, len(raw_output)) def test_region_list(self): @@ -45,26 +43,24 @@ def test_region_set(self): new_parent_region_id = self._create_dummy_region() region_id = self._create_dummy_region(parent_region_id) # check region details - raw_output = self.openstack('region show %s' % region_id) + raw_output = self.openstack(f'region show {region_id}') region = self.parse_show_as_object(raw_output) self.assertEqual(parent_region_id, region['parent_region']) self.assertEqual(region_id, region['region']) # update parent-region raw_output = self.openstack( - 'region set ' - '--parent-region %(parent_region)s ' - '%(region)s' % {'parent_region': new_parent_region_id, - 'region': region_id}) + f'region set --parent-region {new_parent_region_id} {region_id}' + ) self.assertEqual(0, len(raw_output)) # check updated region details - raw_output = self.openstack('region show %s' % region_id) + raw_output = self.openstack(f'region show {region_id}') region = self.parse_show_as_object(raw_output) self.assertEqual(new_parent_region_id, region['parent_region']) self.assertEqual(region_id, region['region']) def test_region_show(self): region_id = self._create_dummy_region() - raw_output = self.openstack('region show %s' % region_id) + raw_output = self.openstack(f'region show {region_id}') region = self.parse_show_as_object(raw_output) self.assertEqual(region_id, region['region']) self.assertEqual('None', region['parent_region']) diff --git a/openstackclient/tests/functional/identity/v3/test_registered_limit.py b/openstackclient/tests/functional/identity/v3/test_registered_limit.py index 80f51ad99f..54ee7f4f8f 100644 --- a/openstackclient/tests/functional/identity/v3/test_registered_limit.py +++ b/openstackclient/tests/functional/identity/v3/test_registered_limit.py @@ -20,39 +20,32 @@ class RegisteredLimitTestCase(common.IdentityTests): - def test_registered_limit_create_with_service_name(self): self._create_dummy_registered_limit() def test_registered_limit_create_with_service_id(self): service_name = self._create_dummy_service() - raw_output = self.openstack( - 'service show' - ' %(service_name)s' % {'service_name': service_name} - ) + raw_output = self.openstack(f'service show {service_name}') service_items = self.parse_show(raw_output) service_id = self._extract_value_from_items('id', service_items) raw_output = self.openstack( 'registered limit create' - ' --service %(service_id)s' - ' --default-limit %(default_limit)s' - ' %(resource_name)s' % { - 'service_id': service_id, - 'default_limit': 10, - 'resource_name': 'cores' - }, - cloud=SYSTEM_CLOUD + ' --service {service_id}' + ' --default-limit {default_limit}' + ' {resource_name}'.format( + service_id=service_id, + default_limit=10, + resource_name='cores', + ), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) registered_limit_id = self._extract_value_from_items('id', items) self.addCleanup( self.openstack, - 'registered limit delete' - ' %(registered_limit_id)s' % { - 'registered_limit_id': registered_limit_id - }, - cloud=SYSTEM_CLOUD + f'registered limit delete {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -65,26 +58,24 @@ def test_registered_limit_create_with_options(self): 'resource_name': 'cores', 'default_limit': 10, 'description': 'default limit for cores', - 'region_id': region_id + 'region_id': region_id, } raw_output = self.openstack( 'registered limit create' - ' --description \'%(description)s\'' - ' --region %(region_id)s' - ' --service %(service_name)s' - ' --default-limit %(default_limit)s' - ' %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' --description \'{description}\'' + ' --region {region_id}' + ' --service {service_name}' + ' --default-limit {default_limit}' + ' {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) registered_limit_id = self._extract_value_from_items('id', items) self.addCleanup( self.openstack, - 'registered limit delete %(registered_limit_id)s' % { - 'registered_limit_id': registered_limit_id - }, - cloud=SYSTEM_CLOUD + f'registered limit delete {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -92,9 +83,7 @@ def test_registered_limit_create_with_options(self): def test_registered_limit_show(self): registered_limit_id = self._create_dummy_registered_limit() raw_output = self.openstack( - 'registered limit show %(registered_limit_id)s' % { - 'registered_limit_id': registered_limit_id - } + f'registered limit show {registered_limit_id}' ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -105,13 +94,13 @@ def test_registered_limit_set_region_id(self): params = { 'registered_limit_id': registered_limit_id, - 'region_id': region_id + 'region_id': region_id, } raw_output = self.openstack( 'registered limit set' - ' %(registered_limit_id)s' - ' --region %(region_id)s' % params, - cloud=SYSTEM_CLOUD + ' {registered_limit_id}' + ' --region {region_id}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -120,13 +109,13 @@ def test_registered_limit_set_description(self): registered_limit_id = self._create_dummy_registered_limit() params = { 'registered_limit_id': registered_limit_id, - 'description': 'updated description' + 'description': 'updated description', } raw_output = self.openstack( 'registered limit set' - ' %(registered_limit_id)s' - ' --description \'%(description)s\'' % params, - cloud=SYSTEM_CLOUD + ' {registered_limit_id}' + ' --description \'{description}\''.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -136,13 +125,13 @@ def test_registered_limit_set_service(self): service_name = self._create_dummy_service() params = { 'registered_limit_id': registered_limit_id, - 'service': service_name + 'service': service_name, } raw_output = self.openstack( 'registered limit set' - ' %(registered_limit_id)s' - ' --service %(service)s' % params, - cloud=SYSTEM_CLOUD + ' {registered_limit_id}' + ' --service {service}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -151,13 +140,13 @@ def test_registered_limit_set_default_limit(self): registered_limit_id = self._create_dummy_registered_limit() params = { 'registered_limit_id': registered_limit_id, - 'default_limit': 20 + 'default_limit': 20, } raw_output = self.openstack( 'registered limit set' - ' %(registered_limit_id)s' - ' --default-limit %(default_limit)s' % params, - cloud=SYSTEM_CLOUD + ' {registered_limit_id}' + ' --default-limit {default_limit}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -167,13 +156,13 @@ def test_registered_limit_set_resource_name(self): resource_name = data_utils.rand_name('resource_name') params = { 'registered_limit_id': registered_limit_id, - 'resource_name': resource_name + 'resource_name': resource_name, } raw_output = self.openstack( 'registered limit set' - ' %(registered_limit_id)s' - ' --resource-name %(resource_name)s' % params, - cloud=SYSTEM_CLOUD + ' {registered_limit_id}' + ' --resource-name {resource_name}'.format(**params), + cloud=SYSTEM_CLOUD, ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS) @@ -189,10 +178,7 @@ def test_registered_limit_delete(self): add_clean_up=False ) raw_output = self.openstack( - 'registered limit delete' - ' %(registered_limit_id)s' % { - 'registered_limit_id': registered_limit_id - }, - cloud=SYSTEM_CLOUD + f'registered limit delete {registered_limit_id}', + cloud=SYSTEM_CLOUD, ) self.assertEqual(0, len(raw_output)) diff --git a/openstackclient/tests/functional/identity/v3/test_role.py b/openstackclient/tests/functional/identity/v3/test_role.py index 3954c4e301..3237c0bfb4 100644 --- a/openstackclient/tests/functional/identity/v3/test_role.py +++ b/openstackclient/tests/functional/identity/v3/test_role.py @@ -16,7 +16,6 @@ class RoleTests(common.IdentityTests): - def test_role_create(self): self._create_dummy_role() @@ -24,12 +23,10 @@ def test_role_create_with_description(self): role_name = data_utils.rand_name('TestRole') description = data_utils.rand_name('description') raw_output = self.openstack( - 'role create ' - '--description %(description)s ' - '%(name)s' % {'description': description, - 'name': role_name}) + f'role create --description {description} {role_name}' + ) role = self.parse_show_as_object(raw_output) - self.addCleanup(self.openstack, 'role delete %s' % role['id']) + self.addCleanup(self.openstack, 'role delete {}'.format(role['id'])) items = self.parse_show(raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) self.assertEqual(description, role['description']) @@ -37,7 +34,7 @@ def test_role_create_with_description(self): def test_role_delete(self): role_name = self._create_dummy_role(add_clean_up=False) - raw_output = self.openstack('role delete %s' % role_name) + raw_output = self.openstack(f'role delete {role_name}') self.assertEqual(0, len(raw_output)) def test_role_list(self): @@ -48,7 +45,7 @@ def test_role_list(self): def test_role_show(self): role_name = self._create_dummy_role() - raw_output = self.openstack('role show %s' % role_name) + raw_output = self.openstack(f'role show {role_name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.ROLE_FIELDS) @@ -56,19 +53,21 @@ def test_role_set(self): role_name = self._create_dummy_role() new_role_name = data_utils.rand_name('NewTestRole') raw_output = self.openstack( - 'role set --name %s %s' % (new_role_name, role_name)) + f'role set --name {new_role_name} {role_name}' + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('role show %s' % new_role_name) + raw_output = self.openstack(f'role show {new_role_name}') role = self.parse_show_as_object(raw_output) self.assertEqual(new_role_name, role['name']) def test_role_set_description(self): role_name = self._create_dummy_role() description = data_utils.rand_name("NewDescription") - raw_output = self.openstack('role set --description %s %s' - % (description, role_name)) + raw_output = self.openstack( + f'role set --description {description} {role_name}' + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('role show %s' % role_name) + raw_output = self.openstack(f'role show {role_name}') role = self.parse_show_as_object(raw_output) self.assertEqual(description, role['description']) @@ -77,27 +76,45 @@ def test_role_add(self): username = self._create_dummy_user() raw_output = self.openstack( 'role add ' - '--project %(project)s ' - '--project-domain %(project_domain)s ' - '--user %(user)s ' - '--user-domain %(user_domain)s ' - '%(role)s' % {'project': self.project_name, - 'project_domain': self.domain_name, - 'user': username, - 'user_domain': self.domain_name, - 'role': role_name}) + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--user {username} ' + f'--user-domain {self.domain_name} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--user {username} ' + f'--user-domain {self.domain_name} ' + f'{role_name}', + ) + self.assertEqual(0, len(raw_output)) + + def test_role_add_inherited(self): + role_name = self._create_dummy_role() + username = self._create_dummy_user() + raw_output = self.openstack( + 'role add ' + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--user {username} ' + f'--user-domain {self.domain_name} ' + '--inherited ' + f'{role_name}' + ) self.addCleanup( self.openstack, 'role remove ' - '--project %(project)s ' - '--project-domain %(project_domain)s ' - '--user %(user)s ' - '--user-domain %(user_domain)s ' - '%(role)s' % {'project': self.project_name, - 'project_domain': self.domain_name, - 'user': username, - 'user_domain': self.domain_name, - 'role': role_name}) + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--user {username} ' + f'--user-domain {self.domain_name} ' + '--inherited ' + f'{role_name}', + ) self.assertEqual(0, len(raw_output)) def test_role_remove(self): @@ -105,50 +122,52 @@ def test_role_remove(self): username = self._create_dummy_user() add_raw_output = self.openstack( 'role add ' - '--project %(project)s ' - '--project-domain %(project_domain)s ' - '--user %(user)s ' - '--user-domain %(user_domain)s ' - '%(role)s' % {'project': self.project_name, - 'project_domain': self.domain_name, - 'user': username, - 'user_domain': self.domain_name, - 'role': role_name}) + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--user {username} ' + f'--user-domain {self.domain_name} ' + f'{role_name}' + ) remove_raw_output = self.openstack( 'role remove ' - '--project %(project)s ' - '--project-domain %(project_domain)s ' - '--user %(user)s ' - '--user-domain %(user_domain)s ' - '%(role)s' % {'project': self.project_name, - 'project_domain': self.domain_name, - 'user': username, - 'user_domain': self.domain_name, - 'role': role_name}) + f'--project {self.project_name} ' + f'--project-domain {self.domain_name} ' + f'--user {username} ' + f'--user-domain {self.domain_name} ' + f'{role_name}' + ) self.assertEqual(0, len(add_raw_output)) self.assertEqual(0, len(remove_raw_output)) def test_implied_role_list(self): + raw_output = self.openstack('implied role list') + default_roles = self.parse_listing(raw_output) + self.assert_table_structure( + default_roles, self.IMPLIED_ROLE_LIST_HEADERS + ) + self._create_dummy_implied_role() raw_output = self.openstack('implied role list') - items = self.parse_listing(raw_output) - self.assert_table_structure(items, self.IMPLIED_ROLE_LIST_HEADERS) - self.assertEqual(3, len(items)) + current_roles = self.parse_listing(raw_output) + self.assert_table_structure( + current_roles, self.IMPLIED_ROLE_LIST_HEADERS + ) + self.assertEqual(len(default_roles) + 1, len(current_roles)) def test_implied_role_create(self): role_name = self._create_dummy_role() implied_role_name = self._create_dummy_role() self.openstack( 'implied role create ' - '--implied-role %(implied_role)s ' - '%(role)s' % {'implied_role': implied_role_name, - 'role': role_name}) + f'--implied-role {implied_role_name} ' + f'{role_name}' + ) def test_implied_role_delete(self): implied_role_name, role_name = self._create_dummy_implied_role() raw_output = self.openstack( 'implied role delete ' - '--implied-role %(implied_role)s ' - '%(role)s' % {'implied_role': implied_role_name, - 'role': role_name}) + f'--implied-role {implied_role_name} ' + f'{role_name}' + ) self.assertEqual(0, len(raw_output)) diff --git a/openstackclient/tests/functional/identity/v3/test_role_assignment.py b/openstackclient/tests/functional/identity/v3/test_role_assignment.py new file mode 100644 index 0000000000..1255841afe --- /dev/null +++ b/openstackclient/tests/functional/identity/v3/test_role_assignment.py @@ -0,0 +1,327 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstackclient.tests.functional.identity.v3 import common + + +class RoleAssignmentTests(common.IdentityTests): + def test_role_assignment_list_no_filters(self): + raw_output = self.openstack('role assignment list') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_user_role_system(self): + role_name = self._create_dummy_role() + username = self._create_dummy_user() + system = 'all' + raw_output = self.openstack( + f'role add --user {username} --system {system} {role_name}' + ) + self.addCleanup( + self.openstack, + f'role remove --user {username} --system {system} {role_name}', + ) + self.assertEqual(0, len(raw_output)) + + raw_output = self.openstack(f'role assignment list --user {username} ') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + raw_output = self.openstack( + f'role assignment list --role {role_name} ' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + raw_output = self.openstack(f'role assignment list --system {system} ') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_group(self): + role_name = self._create_dummy_role() + group = self._create_dummy_group() + system = 'all' + raw_output = self.openstack( + f'role add --group {group} --system {system} {role_name}' + ) + self.addCleanup( + self.openstack, + f'role remove --group {group} --system {system} {role_name}', + ) + self.assertEqual(0, len(raw_output)) + raw_output = self.openstack(f'role assignment list --group {group} ') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_group_domain(self): + domain_name_A = self._create_dummy_domain() + domain_name_B = self._create_dummy_domain() + role_name = self._create_dummy_role() + group_name = 'group_name' + self.openstack(f'group create --domain {domain_name_A} {group_name}') + self.addCleanup( + self.openstack, + f'group delete --domain {domain_name_A} {group_name}', + ) + self.openstack(f'group create --domain {domain_name_B} {group_name}') + self.addCleanup( + self.openstack, + f'group delete --domain {domain_name_B} {group_name}', + ) + raw_output = self.openstack( + 'role add ' + f'--project {self.project_name} ' + f'--group {group_name} --group-domain {domain_name_A} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--project {self.project_name} ' + f'--group {group_name} --group-domain {domain_name_A} ' + f'{role_name}', + ) + self.assertEqual('', raw_output.strip()) + raw_output = self.openstack( + f'role assignment list ' + f'--group {group_name} --group-domain {domain_name_A} ' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + raw_output = self.openstack( + f'role assignment list ' + f'--group {group_name} --group-domain {domain_name_B} ' + ) + self.assertEqual('', raw_output.strip()) + + def test_role_assignment_list_domain(self): + role_name = self._create_dummy_role() + username = self._create_dummy_user() + raw_output = self.openstack( + 'role add ' + f'--domain {self.domain_name} ' + f'--user {username} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--domain {self.domain_name} ' + f'--user {username} ' + f'{role_name}', + ) + self.assertEqual(0, len(raw_output)) + raw_output = self.openstack( + f'role assignment list --domain {self.domain_name} ' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_user_domain(self): + domain_name_A = self._create_dummy_domain() + domain_name_B = self._create_dummy_domain() + role_name = self._create_dummy_role() + username = 'username' + self.openstack(f'user create --domain {domain_name_A} {username}') + self.addCleanup( + self.openstack, f'user delete --domain {domain_name_A} {username}' + ) + self.openstack(f'user create --domain {domain_name_B} {username}') + self.addCleanup( + self.openstack, f'user delete --domain {domain_name_B} {username}' + ) + raw_output = self.openstack( + 'role add ' + f'--project {self.project_name} ' + f'--user {username} --user-domain {domain_name_A} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--project {self.project_name} ' + f'--user {username} --user-domain {domain_name_A} ' + f'{role_name}', + ) + self.assertEqual('', raw_output.strip()) + raw_output = self.openstack( + f'role assignment list ' + f'--user {username} --user-domain {domain_name_A} ' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + raw_output = self.openstack( + f'role assignment list ' + f'--user {username} --user-domain {domain_name_B} ' + ) + self.assertEqual('', raw_output.strip()) + + def test_role_assignment_list_role_domain(self): + domain_name_A = self._create_dummy_domain() + domain_name_B = self._create_dummy_domain() + role_name = 'role_name' + username = 'username' + self.openstack(f'role create --domain {domain_name_A} {role_name}') + self.addCleanup( + self.openstack, f'role delete --domain {domain_name_A} {role_name}' + ) + self.openstack(f'role create --domain {domain_name_B} {role_name}') + self.addCleanup( + self.openstack, f'role delete --domain {domain_name_B} {role_name}' + ) + self.openstack(f'user create --domain {domain_name_A} {username}') + self.addCleanup( + self.openstack, f'user delete --domain {domain_name_A} {username}' + ) + raw_output = self.openstack( + 'role add ' + f'--user {username} --domain {domain_name_A} ' + f'--role-domain {domain_name_A} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--user {username} --domain {domain_name_A} ' + f'--role-domain {domain_name_A} ' + f'{role_name}', + ) + self.assertEqual('', raw_output.strip()) + raw_output = self.openstack( + f'role assignment list ' + f'--role {role_name} --role-domain {domain_name_A}' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + raw_output = self.openstack( + f'role assignment list ' + f'--role {role_name} --role-domain {domain_name_B}' + ) + items = self.parse_listing(raw_output) + self.assertEqual('', raw_output.strip()) + + def test_role_assignment_list_project(self): + role_name = self._create_dummy_role() + username = self._create_dummy_user() + raw_output = self.openstack( + 'role add ' + f'--project {self.project_name} ' + f'--user {username} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--project {self.project_name} ' + f'--user {username} ' + f'{role_name}', + ) + self.assertEqual(0, len(raw_output)) + raw_output = self.openstack( + f'role assignment list --project {self.project_name} ' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_project_domain(self): + domain_name_A = self._create_dummy_domain() + domain_name_B = self._create_dummy_domain() + role_name = self._create_dummy_role() + project_name = 'project_name' + username = 'username' + self.openstack( + f'project create --domain {domain_name_A} {project_name}' + ) + self.addCleanup( + self.openstack, + f'project delete --domain {domain_name_A} {project_name}', + ) + self.openstack( + f'project create --domain {domain_name_B} {project_name}' + ) + self.addCleanup( + self.openstack, + f'project delete --domain {domain_name_B} {project_name}', + ) + self.openstack(f'user create --domain {domain_name_A} {username}') + self.addCleanup( + self.openstack, f'user delete --domain {domain_name_A} {username}' + ) + raw_output = self.openstack( + 'role add ' + f'--project {project_name} --project-domain {domain_name_A} ' + f'--user {username} --user-domain {domain_name_A} ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--project {project_name} --project-domain {domain_name_A} ' + f'--user {username} --user-domain {domain_name_A} ' + f'{role_name}', + ) + self.assertEqual('', raw_output.strip()) + raw_output = self.openstack( + f'role assignment list ' + f'--project {project_name} --project-domain {domain_name_A} ' + ) + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + raw_output = self.openstack( + f'role assignment list ' + f'--project {project_name} --project-domain {domain_name_B} ' + ) + self.assertEqual('', raw_output.strip()) + + def test_role_assignment_list_effective(self): + raw_output = self.openstack('role assignment list --effective') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_auth_user(self): + raw_output = self.openstack('role assignment list --auth-user') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_auth_project(self): + raw_output = self.openstack('role assignment list --auth-project') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_inherited(self): + role_name = self._create_dummy_role() + username = self._create_dummy_user() + raw_output = self.openstack( + 'role add ' + f'--project {self.project_name} ' + f'--user {username} ' + '--inherited ' + f'{role_name}' + ) + self.addCleanup( + self.openstack, + 'role remove ' + f'--project {self.project_name} ' + f'--user {username} ' + '--inherited ' + f'{role_name}', + ) + self.assertEqual(0, len(raw_output)) + + raw_output = self.openstack('role assignment list --inherited') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) + + def test_role_assignment_list_names(self): + raw_output = self.openstack('role assignment list --names') + items = self.parse_listing(raw_output) + self.assert_table_structure(items, self.ROLE_ASSIGNMENT_LIST_HEADERS) diff --git a/openstackclient/tests/functional/identity/v3/test_service.py b/openstackclient/tests/functional/identity/v3/test_service.py index 1ecda45af2..7e102bacb7 100644 --- a/openstackclient/tests/functional/identity/v3/test_service.py +++ b/openstackclient/tests/functional/identity/v3/test_service.py @@ -16,20 +16,18 @@ class ServiceTests(common.IdentityTests): - def test_service_create(self): self._create_dummy_service() def test_service_delete(self): service_name = self._create_dummy_service(add_clean_up=False) - raw_output = self.openstack('service delete %s' % service_name) + raw_output = self.openstack(f'service delete {service_name}') self.assertEqual(0, len(raw_output)) def test_service_multi_delete(self): service_1 = self._create_dummy_service(add_clean_up=False) service_2 = self._create_dummy_service(add_clean_up=False) - raw_output = self.openstack( - 'service delete %s %s' % (service_1, service_2)) + raw_output = self.openstack(f'service delete {service_1} {service_2}') self.assertEqual(0, len(raw_output)) def test_service_list(self): @@ -46,17 +44,15 @@ def test_service_set(self): new_service_type = data_utils.rand_name('NewTestType') raw_output = self.openstack( 'service set ' - '--type %(type)s ' - '--name %(name)s ' - '--description %(description)s ' + f'--type {new_service_type} ' + f'--name {new_service_name} ' + f'--description {new_service_description} ' '--disable ' - '%(service)s' % {'type': new_service_type, - 'name': new_service_name, - 'description': new_service_description, - 'service': service_name}) + f'{service_name}' + ) self.assertEqual(0, len(raw_output)) # get service details - raw_output = self.openstack('service show %s' % new_service_name) + raw_output = self.openstack(f'service show {new_service_name}') # assert service details service = self.parse_show_as_object(raw_output) self.assertEqual(new_service_type, service['type']) @@ -65,7 +61,6 @@ def test_service_set(self): def test_service_show(self): service_name = self._create_dummy_service() - raw_output = self.openstack( - 'service show %s' % service_name) + raw_output = self.openstack(f'service show {service_name}') items = self.parse_show(raw_output) self.assert_show_fields(items, self.SERVICE_FIELDS) diff --git a/openstackclient/tests/functional/identity/v3/test_service_provider.py b/openstackclient/tests/functional/identity/v3/test_service_provider.py index 32b7a463d1..330f938c8b 100644 --- a/openstackclient/tests/functional/identity/v3/test_service_provider.py +++ b/openstackclient/tests/functional/identity/v3/test_service_provider.py @@ -23,21 +23,22 @@ def test_sp_create(self): def test_sp_delete(self): service_provider = self._create_dummy_sp(add_clean_up=False) - raw_output = self.openstack('service provider delete %s' - % service_provider) + raw_output = self.openstack( + f'service provider delete {service_provider}' + ) self.assertEqual(0, len(raw_output)) def test_sp_multi_delete(self): sp1 = self._create_dummy_sp(add_clean_up=False) sp2 = self._create_dummy_sp(add_clean_up=False) - raw_output = self.openstack( - 'service provider delete %s %s' % (sp1, sp2)) + raw_output = self.openstack(f'service provider delete {sp1} {sp2}') self.assertEqual(0, len(raw_output)) def test_sp_show(self): service_provider = self._create_dummy_sp(add_clean_up=True) - raw_output = self.openstack('service provider show %s' - % service_provider) + raw_output = self.openstack( + f'service provider show {service_provider}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.SERVICE_PROVIDER_FIELDS) @@ -50,13 +51,10 @@ def test_sp_list(self): def test_sp_set(self): service_provider = self._create_dummy_sp(add_clean_up=True) new_description = data_utils.rand_name('newDescription') - raw_output = self.openstack('service provider set ' - '%(service-provider)s ' - '--description %(description)s ' - % {'service-provider': service_provider, - 'description': new_description}) - self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('service provider show %s' - % service_provider) + raw_output = self.openstack( + f'service provider set ' + f'{service_provider} ' + f'--description {new_description}' + ) updated_value = self.parse_show_as_object(raw_output) - self.assertIn(new_description, updated_value['description']) + self.assertEqual(new_description, updated_value.get('description')) diff --git a/openstackclient/tests/functional/identity/v3/test_token.py b/openstackclient/tests/functional/identity/v3/test_token.py index 62e90003fc..b6b0d8b155 100644 --- a/openstackclient/tests/functional/identity/v3/test_token.py +++ b/openstackclient/tests/functional/identity/v3/test_token.py @@ -14,7 +14,6 @@ class TokenTests(common.IdentityTests): - def test_token_issue(self): raw_output = self.openstack('token issue') items = self.parse_show(raw_output) diff --git a/openstackclient/tests/functional/identity/v3/test_user.py b/openstackclient/tests/functional/identity/v3/test_user.py index 9e9bde96c0..dd56293e63 100644 --- a/openstackclient/tests/functional/identity/v3/test_user.py +++ b/openstackclient/tests/functional/identity/v3/test_user.py @@ -16,16 +16,14 @@ class UserTests(common.IdentityTests): - def test_user_create(self): self._create_dummy_user() def test_user_delete(self): username = self._create_dummy_user(add_clean_up=False) - raw_output = self.openstack('user delete ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': username}) + raw_output = self.openstack( + f'user delete --domain {self.domain_name} {username}' + ) self.assertEqual(0, len(raw_output)) def test_user_list(self): @@ -35,24 +33,21 @@ def test_user_list(self): def test_user_set(self): username = self._create_dummy_user() - raw_output = self.openstack('user show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': username}) + raw_output = self.openstack( + f'user show --domain {self.domain_name} {username}' + ) user = self.parse_show_as_object(raw_output) new_username = data_utils.rand_name('NewTestUser') new_email = data_utils.rand_name() + '@example.com' - raw_output = self.openstack('user set ' - '--email %(email)s ' - '--name %(new_name)s ' - '%(id)s' % {'email': new_email, - 'new_name': new_username, - 'id': user['id']}) + raw_output = self.openstack( + 'user set --email {email} --name {new_name} {id}'.format( + email=new_email, new_name=new_username, id=user['id'] + ) + ) self.assertEqual(0, len(raw_output)) - raw_output = self.openstack('user show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': new_username}) + raw_output = self.openstack( + f'user show --domain {self.domain_name} {new_username}' + ) updated_user = self.parse_show_as_object(raw_output) self.assertEqual(user['id'], updated_user['id']) self.assertEqual(new_email, updated_user['email']) @@ -61,31 +56,31 @@ def test_user_set_default_project_id(self): username = self._create_dummy_user() project_name = self._create_dummy_project() # get original user details - raw_output = self.openstack('user show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': username}) + raw_output = self.openstack( + f'user show --domain {self.domain_name} {username}' + ) user = self.parse_show_as_object(raw_output) # update user - raw_output = self.openstack('user set ' - '--project %(project)s ' - '--project-domain %(project_domain)s ' - '%(id)s' % {'project': project_name, - 'project_domain': - self.domain_name, - 'id': user['id']}) + raw_output = self.openstack( + 'user set ' + '--project {project} ' + '--project-domain {project_domain} ' + '{id}'.format( + project=project_name, + project_domain=self.domain_name, + id=user['id'], + ) + ) self.assertEqual(0, len(raw_output)) # get updated user details - raw_output = self.openstack('user show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': username}) + raw_output = self.openstack( + f'user show --domain {self.domain_name} {username}' + ) updated_user = self.parse_show_as_object(raw_output) # get project details - raw_output = self.openstack('project show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': project_name}) + raw_output = self.openstack( + f'project show --domain {self.domain_name} {project_name}' + ) project = self.parse_show_as_object(raw_output) # check updated user details self.assertEqual(user['id'], updated_user['id']) @@ -93,9 +88,8 @@ def test_user_set_default_project_id(self): def test_user_show(self): username = self._create_dummy_user() - raw_output = self.openstack('user show ' - '--domain %(domain)s ' - '%(name)s' % {'domain': self.domain_name, - 'name': username}) + raw_output = self.openstack( + f'user show --domain {self.domain_name} {username}' + ) items = self.parse_show(raw_output) self.assert_show_fields(items, self.USER_FIELDS) diff --git a/openstackclient/tests/functional/image/base.py b/openstackclient/tests/functional/image/base.py index 4b2ab64b73..d948f81551 100644 --- a/openstackclient/tests/functional/image/base.py +++ b/openstackclient/tests/functional/image/base.py @@ -18,7 +18,7 @@ class BaseImageTests(base.TestCase): @classmethod def setUpClass(cls): - super(BaseImageTests, cls).setUpClass() + super().setUpClass() # TODO(dtroyer): maybe do image API discovery here to determine # what is available, it isn't in the service catalog cls.haz_v1_api = False diff --git a/openstackclient/tests/functional/image/v1/test_image.py b/openstackclient/tests/functional/image/v1/test_image.py index 2b4d8f41fc..c4118babba 100644 --- a/openstackclient/tests/functional/image/v1/test_image.py +++ b/openstackclient/tests/functional/image/v1/test_image.py @@ -26,9 +26,7 @@ def setUp(self): if not self.haz_v1_api: self.skipTest('No Image v1 API present') - ver_fixture = fixtures.EnvironmentVariable( - 'OS_IMAGE_API_VERSION', '1' - ) + ver_fixture = fixtures.EnvironmentVariable('OS_IMAGE_API_VERSION', '1') self.useFixture(ver_fixture) self.name = uuid.uuid4().hex @@ -46,25 +44,20 @@ def tearDown(self): super().tearDown() def test_image_list(self): - output = self.openstack( - 'image list' - ) - self.assertIn( - self.name, - [img['Name'] for img in output] - ) + output = self.openstack('image list') + self.assertIn(self.name, [img['Name'] for img in output]) def test_image_attributes(self): """Test set, unset, show on attributes, tags and properties""" # Test explicit attributes self.openstack( - 'image set ' + - '--min-disk 4 ' + - '--min-ram 5 ' + - '--disk-format qcow2 ' + - '--public ' + - self.name + 'image set ' + + '--min-disk 4 ' + + '--min-ram 5 ' + + '--disk-format qcow2 ' + + '--public ' + + self.name ) output = self.openstack( 'image show ' + self.name, @@ -88,11 +81,11 @@ def test_image_attributes(self): # Test properties self.openstack( - 'image set ' + - '--property a=b ' + - '--property c=d ' + - '--public ' + - self.name + 'image set ' + + '--property a=b ' + + '--property c=d ' + + '--public ' + + self.name ) output = self.openstack( 'image show ' + self.name, diff --git a/openstackclient/tests/functional/image/v2/test_cache.py b/openstackclient/tests/functional/image/v2/test_cache.py new file mode 100644 index 0000000000..58245e5ca8 --- /dev/null +++ b/openstackclient/tests/functional/image/v2/test_cache.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstackclient.tests.functional.image import base + + +class CacheTests(base.BaseImageTests): + """Functional tests for Cache commands""" + + def test_cached_image(self): + """Test cached image operations including queue and clear""" + # Create test image + name = uuid.uuid4().hex + output = self.openstack( + f'image create {name}', + parse_output=True, + ) + image_id = output["id"] + self.assertOutput(name, output['name']) + + # Register cleanup for created image + self.addCleanup( + self.openstack, 'cached image delete ' + image_id, fail_ok=True + ) + self.addCleanup(self.openstack, 'image delete ' + image_id) + + # Queue image for caching + self.openstack('cached image queue ' + image_id) + + # Verify queuing worked + cache_output = self.openstack('cached image list', parse_output=True) + self.assertIsInstance(cache_output, list) + image_ids = [img['ID'] for img in cache_output] + self.assertIn(image_id, image_ids) + + # Clear cached images + self.openstack('cached image clear') + + # Verify clearing worked + output = self.openstack('cached image list', parse_output=True) + if output: + image_ids = [img['ID'] for img in output] + self.assertNotIn(image_id, image_ids) diff --git a/openstackclient/tests/functional/image/v2/test_image.py b/openstackclient/tests/functional/image/v2/test_image.py index 3535bd7eee..828488c42c 100644 --- a/openstackclient/tests/functional/image/v2/test_image.py +++ b/openstackclient/tests/functional/image/v2/test_image.py @@ -21,19 +21,16 @@ class ImageTests(base.BaseImageTests): """Functional tests for Image commands""" def setUp(self): - super(ImageTests, self).setUp() + super().setUp() - ver_fixture = fixtures.EnvironmentVariable( - 'OS_IMAGE_API_VERSION', '2' - ) + ver_fixture = fixtures.EnvironmentVariable('OS_IMAGE_API_VERSION', '2') self.useFixture(ver_fixture) self.name = uuid.uuid4().hex self.image_tag = 'my_tag' self.image_tag1 = 'random' output = self.openstack( - 'image create --tag {tag} {name}'.format( - tag=self.image_tag, name=self.name), + f'image create --tag {self.image_tag} {self.name}', parse_output=True, ) self.image_id = output["id"] @@ -47,57 +44,45 @@ def tearDown(self): def test_image_list(self): output = self.openstack('image list', parse_output=True) - self.assertIn( - self.name, - [img['Name'] for img in output] - ) + self.assertIn(self.name, [img['Name'] for img in output]) def test_image_list_with_name_filter(self): output = self.openstack( 'image list --name ' + self.name, parse_output=True, ) - self.assertIn( - self.name, - [img['Name'] for img in output] - ) + self.assertIn(self.name, [img['Name'] for img in output]) def test_image_list_with_status_filter(self): output = self.openstack( 'image list --status active', parse_output=True, ) - self.assertIn( - 'active', - [img['Status'] for img in output] - ) + self.assertIn('active', [img['Status'] for img in output]) def test_image_list_with_tag_filter(self): output = self.openstack( - 'image list --tag ' + self.image_tag + ' --tag ' + - self.image_tag1 + ' --long', + 'image list --tag ' + + self.image_tag + + ' --tag ' + + self.image_tag1 + + ' --long', parse_output=True, ) for taglist in [img['Tags'] for img in output]: - self.assertIn( - self.image_tag, - taglist - ) - self.assertIn( - self.image_tag1, - taglist - ) + self.assertIn(self.image_tag, taglist) + self.assertIn(self.image_tag1, taglist) def test_image_attributes(self): """Test set, unset, show on attributes, tags and properties""" # Test explicit attributes self.openstack( - 'image set ' + - '--min-disk 4 ' + - '--min-ram 5 ' + - '--public ' + - self.name + 'image set ' + + '--min-disk 4 ' + + '--min-ram 5 ' + + '--public ' + + self.name ) output = self.openstack( 'image show ' + self.name, @@ -118,12 +103,12 @@ def test_image_attributes(self): # Test properties self.openstack( - 'image set ' + - '--property a=b ' + - '--property c=d ' + - '--property hw_rng_model=virtio ' + - '--public ' + - self.name + 'image set ' + + '--property a=b ' + + '--property c=d ' + + '--property hw_rng_model=virtio ' + + '--public ' + + self.name ) output = self.openstack( 'image show ' + self.name, @@ -133,11 +118,11 @@ def test_image_attributes(self): self.assertIn("c", output["properties"]) self.openstack( - 'image unset ' + - '--property a ' + - '--property c ' + - '--property hw_rng_model ' + - self.name + 'image unset ' + + '--property a ' + + '--property c ' + + '--property hw_rng_model ' + + self.name ) output = self.openstack( 'image show ' + self.name, @@ -147,37 +132,20 @@ def test_image_attributes(self): self.assertNotIn("c", output["properties"]) # Test tags - self.assertNotIn( - '01', - output["tags"] - ) - self.openstack( - 'image set ' + - '--tag 01 ' + - self.name - ) + self.assertNotIn('01', output["tags"]) + self.openstack('image set ' + '--tag 01 ' + self.name) output = self.openstack( 'image show ' + self.name, parse_output=True, ) - self.assertIn( - '01', - output["tags"] - ) + self.assertIn('01', output["tags"]) - self.openstack( - 'image unset ' + - '--tag 01 ' + - self.name - ) + self.openstack('image unset ' + '--tag 01 ' + self.name) output = self.openstack( 'image show ' + self.name, parse_output=True, ) - self.assertNotIn( - '01', - output["tags"] - ) + self.assertNotIn('01', output["tags"]) def test_image_set_rename(self): name = uuid.uuid4().hex @@ -190,11 +158,7 @@ def test_image_set_rename(self): name, output["name"], ) - self.openstack( - 'image set ' + - '--name ' + name + 'xx ' + - image_id - ) + self.openstack('image set ' + '--name ' + name + 'xx ' + image_id) output = self.openstack( 'image show ' + name + 'xx', parse_output=True, @@ -216,8 +180,7 @@ def test_image_members(self): my_project_id = output['project_id'] output = self.openstack( - 'image show -f json ' + - self.name, + 'image show -f json ' + self.name, parse_output=True, ) # NOTE(dtroyer): Until OSC supports --shared flags in create and set @@ -225,9 +188,7 @@ def test_image_members(self): # images are shared and sometimes they are not. if output["visibility"] == 'shared': self.openstack( - 'image add project ' + - self.name + ' ' + - my_project_id + 'image add project ' + self.name + ' ' + my_project_id ) # self.addCleanup( # self.openstack, @@ -236,29 +197,16 @@ def test_image_members(self): # my_project_id # ) - self.openstack( - 'image set ' + - '--accept ' + - self.name - ) + self.openstack('image set ' + '--accept ' + self.name) output = self.openstack( - 'image list -f json ' + - '--shared', + 'image list -f json ' + '--shared', parse_output=True, ) - self.assertIn( - self.name, - [img['Name'] for img in output] - ) + self.assertIn(self.name, [img['Name'] for img in output]) - self.openstack( - 'image set ' + - '--reject ' + - self.name - ) + self.openstack('image set ' + '--reject ' + self.name) output = self.openstack( - 'image list -f json ' + - '--shared', + 'image list -f json ' + '--shared', parse_output=True, ) # self.assertNotIn( @@ -267,22 +215,42 @@ def test_image_members(self): # ) self.openstack( - 'image remove project ' + - self.name + ' ' + - my_project_id + 'image remove project ' + self.name + ' ' + my_project_id ) - # else: - # # Test not shared - # self.assertRaises( - # image_exceptions.HTTPForbidden, - # self.openstack, - # 'image add project ' + - # self.name + ' ' + - # my_project_id - # ) - # self.openstack( - # 'image set ' + - # '--share ' + - # self.name - # ) + def test_image_hidden(self): + # Test image is shown in list + output = self.openstack( + 'image list', + parse_output=True, + ) + self.assertIn( + self.name, + [img['Name'] for img in output], + ) + + # Hide the image and test image not show in the list + self.openstack('image set ' + '--hidden ' + self.name) + output = self.openstack( + 'image list', + parse_output=True, + ) + self.assertNotIn(self.name, [img['Name'] for img in output]) + + # Test image show in the list with flag + output = self.openstack( + 'image list', + parse_output=True, + ) + self.assertNotIn(self.name, [img['Name'] for img in output]) + + # Unhide the image and test image is again visible in regular list + self.openstack('image set ' + '--unhidden ' + self.name) + output = self.openstack( + 'image list', + parse_output=True, + ) + self.assertIn( + self.name, + [img['Name'] for img in output], + ) diff --git a/openstackclient/tests/functional/volume/v1/common.py b/openstackclient/tests/functional/image/v2/test_info.py similarity index 52% rename from openstackclient/tests/functional/volume/v1/common.py rename to openstackclient/tests/functional/image/v2/test_info.py index 755874785d..c601ea082c 100644 --- a/openstackclient/tests/functional/volume/v1/common.py +++ b/openstackclient/tests/functional/image/v2/test_info.py @@ -1,3 +1,6 @@ +# Copyright 2023 Red Hat. +# All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -10,26 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. -import fixtures - -from openstackclient.tests.functional.volume import base as volume_base - +from openstackclient.tests.functional.image import base -class BaseVolumeTests(volume_base.BaseVolumeTests): - """Base class for Volume functional tests""" - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.haz_volume_v1 = cls.is_service_enabled('block-storage', '1.0') +class InfoTests(base.BaseImageTests): + """Functional tests for Info commands""" def setUp(self): super().setUp() - if not self.haz_volume_v1: - self.skipTest("No Volume v1 service present") + def tearDown(self): + super().tearDown() - ver_fixture = fixtures.EnvironmentVariable( - 'OS_VOLUME_API_VERSION', '1' - ) - self.useFixture(ver_fixture) + def test_image_import_info(self): + output = self.openstack('image import info', parse_output=True) + self.assertIsNotNone(output['import-methods']) diff --git a/openstackclient/tests/functional/image/v2/test_metadef_objects.py b/openstackclient/tests/functional/image/v2/test_metadef_objects.py new file mode 100644 index 0000000000..5216c933fd --- /dev/null +++ b/openstackclient/tests/functional/image/v2/test_metadef_objects.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstackclient.tests.functional import base + + +class MetadefObjectTests(base.TestCase): + def setUp(self): + super().setUp() + self.obj_name = self.getUniqueString('metadef-obj') + self.ns_name = self.getUniqueString('metadef-ns') + self.openstack(f"image metadef namespace create {self.ns_name}") + self.addCleanup( + lambda: self.openstack( + f"image metadef namespace delete {self.ns_name}" + ) + ) + + def test_metadef_objects(self): + # CREATE + created = self.openstack( + ( + "image metadef object create " + f"--namespace {self.ns_name} " + f"{self.obj_name}" + ), + parse_output=True, + ) + self.addCleanup( + lambda: self.openstack( + f"image metadef object delete {self.ns_name} {self.obj_name}" + ) + ) + self.assertEqual(self.obj_name, created["name"]) + self.assertEqual(self.ns_name, created["namespace_name"]) + + # UPDATE + new_name = f"{self.obj_name}-updated" + self.openstack( + "image metadef object update " + f"{self.ns_name} {self.obj_name} " + f"--name {new_name}" + ) + self.obj_name = new_name + + # READ (get) + shown = self.openstack( + f"image metadef object show {self.ns_name} {self.obj_name}", + parse_output=True, + ) + self.assertEqual(self.obj_name, shown["name"]) + self.assertEqual(self.ns_name, shown["namespace_name"]) + + # READ (list) + rows = self.openstack( + f"image metadef object list {self.ns_name}", + parse_output=True, + ) + names = {row["name"] for row in rows} + self.assertIn(self.obj_name, names) diff --git a/openstackclient/tests/functional/image/v2/test_metadef_resource_type.py b/openstackclient/tests/functional/image/v2/test_metadef_resource_type.py new file mode 100644 index 0000000000..ab8dc13ef0 --- /dev/null +++ b/openstackclient/tests/functional/image/v2/test_metadef_resource_type.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstackclient.tests.functional.image import base + + +class ImageMetadefResourceTypeTests(base.BaseImageTests): + """Functional tests for image metadef resource type commands.""" + + def setUp(self): + super().setUp() + + # Create unique namespace name using UUID + self.namespace_name = 'test-mdef-ns-' + uuid.uuid4().hex + self.resource_type_name = 'test-mdef-rt-' + uuid.uuid4().hex + + # Create namespace + self.openstack('image metadef namespace create ' + self.namespace_name) + self.addCleanup( + self.openstack, + 'image metadef namespace delete ' + self.namespace_name, + ) + + def test_metadef_resource_type(self): + """Test image metadef resource type commands""" + + self.openstack( + 'image metadef resource type association create ' + f'{self.namespace_name} {self.resource_type_name}', + ) + self.addCleanup( + self.openstack, + 'image metadef resource type association delete ' + f'{self.namespace_name} {self.resource_type_name}', + ) + + output = self.openstack( + 'image metadef resource type list', + parse_output=True, + ) + + self.assertIn( + self.resource_type_name, [item['Name'] for item in output] + ) diff --git a/openstackclient/tests/functional/network/v2/common.py b/openstackclient/tests/functional/network/v2/common.py index a9c5b8303f..248758a843 100644 --- a/openstackclient/tests/functional/network/v2/common.py +++ b/openstackclient/tests/functional/network/v2/common.py @@ -20,14 +20,20 @@ class NetworkTests(base.TestCase): @classmethod def setUpClass(cls): - super(NetworkTests, cls).setUpClass() + super().setUpClass() cls.haz_network = cls.is_service_enabled('network') + def setUp(self): + super().setUp() + + if not self.haz_network: + self.skipTest("No Network service present") + class NetworkTagTests(NetworkTests): """Functional tests with tag operation""" - base_command = None + base_command: str def test_tag_operation(self): # Get project IDs @@ -40,34 +46,41 @@ def test_tag_operation(self): # Network create with no options name1 = self._create_resource_and_tag_check('', []) # Network create with tags - name2 = self._create_resource_and_tag_check('--tag red --tag blue', - ['red', 'blue']) + name2 = self._create_resource_and_tag_check( + '--tag red --tag blue', ['red', 'blue'] + ) # Network create with no tag explicitly name3 = self._create_resource_and_tag_check('--no-tag', []) - self._set_resource_and_tag_check('set', name1, '--tag red --tag green', - ['red', 'green']) + self._set_resource_and_tag_check( + 'set', name1, '--tag red --tag green', ['red', 'green'] + ) - list_expected = ((name1, ['red', 'green']), - (name2, ['red', 'blue']), - (name3, [])) + list_expected: tuple[tuple[str, list[str]], ...] = ( + (name1, ['red', 'green']), + (name2, ['red', 'blue']), + (name3, []), + ) self._list_tag_check(auth_project_id, list_expected) - self._set_resource_and_tag_check('set', name1, '--tag blue', - ['red', 'green', 'blue']) self._set_resource_and_tag_check( - 'set', name1, + 'set', name1, '--tag blue', ['red', 'green', 'blue'] + ) + self._set_resource_and_tag_check( + 'set', + name1, '--no-tag --tag yellow --tag orange --tag purple', - ['yellow', 'orange', 'purple']) - self._set_resource_and_tag_check('unset', name1, '--tag yellow', - ['orange', 'purple']) + ['yellow', 'orange', 'purple'], + ) + self._set_resource_and_tag_check( + 'unset', name1, '--tag yellow', ['orange', 'purple'] + ) self._set_resource_and_tag_check('unset', name1, '--all-tag', []) self._set_resource_and_tag_check('set', name2, '--no-tag', []) def _list_tag_check(self, project_id, expected): cmd_output = self.openstack( - '{} list --long --project {}'.format(self.base_command, - project_id), + f'{self.base_command} list --long --project {project_id}', parse_output=True, ) for name, tags in expected: @@ -76,26 +89,29 @@ def _list_tag_check(self, project_id, expected): def _create_resource_for_tag_test(self, name, args): return self.openstack( - '{} create {} {}'.format(self.base_command, args, name), + f'{self.base_command} create {args} {name}', parse_output=True, ) - def _create_resource_and_tag_check(self, args, expected): + def _create_resource_and_tag_check( + self, + args: str, + expected: list[str], + ) -> str: name = uuid.uuid4().hex cmd_output = self._create_resource_for_tag_test(name, args) - self.addCleanup( - self.openstack, '{} delete {}'.format(self.base_command, name)) + self.addCleanup(self.openstack, f'{self.base_command} delete {name}') self.assertIsNotNone(cmd_output["id"]) self.assertEqual(set(expected), set(cmd_output['tags'])) return name def _set_resource_and_tag_check(self, command, name, args, expected): cmd_output = self.openstack( - '{} {} {} {}'.format(self.base_command, command, args, name) + f'{self.base_command} {command} {args} {name}' ) self.assertFalse(cmd_output) cmd_output = self.openstack( - '{} show {}'.format(self.base_command, name), + f'{self.base_command} show {name}', parse_output=True, ) self.assertEqual(set(expected), set(cmd_output['tags'])) diff --git a/openstackclient/tests/functional/network/v2/test_address_group.py b/openstackclient/tests/functional/network/v2/test_address_group.py index 17ab2362b4..6bc13c75d0 100644 --- a/openstackclient/tests/functional/network/v2/test_address_group.py +++ b/openstackclient/tests/functional/network/v2/test_address_group.py @@ -19,10 +19,8 @@ class AddressGroupTests(common.NetworkTests): """Functional tests for address group""" def setUp(self): - super(AddressGroupTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + if not self.is_extension_enabled('address-group'): self.skipTest("No address-group extension present") @@ -30,8 +28,7 @@ def test_address_group_create_and_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'address group create ' + - name1, + 'address group create ' + name1, parse_output=True, ) self.assertEqual( @@ -41,8 +38,7 @@ def test_address_group_create_and_delete(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'address group create ' + - name2, + 'address group create ' + name2, parse_output=True, ) self.assertEqual( @@ -79,10 +75,13 @@ def test_address_group_list(self): self.assertNotEqual(admin_project_id, demo_project_id) self.assertEqual(admin_project_id, auth_project_id) + # type narrow + assert admin_project_id is not None + assert demo_project_id is not None + name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'address group create ' + - name1, + 'address group create ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'address group delete ' + name1) @@ -93,9 +92,11 @@ def test_address_group_list(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'address group create ' + - '--project ' + demo_project_id + - ' ' + name2, + 'address group create ' + + '--project ' + + demo_project_id + + ' ' + + name2, parse_output=True, ) self.addCleanup(self.openstack, 'address group delete ' + name2) @@ -115,8 +116,7 @@ def test_address_group_list(self): # Test list --project cmd_output = self.openstack( - 'address group list ' + - '--project ' + demo_project_id, + 'address group list ' + '--project ' + demo_project_id, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -125,8 +125,7 @@ def test_address_group_list(self): # Test list --name cmd_output = self.openstack( - 'address group list ' + - '--name ' + name1, + 'address group list ' + '--name ' + name1, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -138,10 +137,10 @@ def test_address_group_set_unset_and_show(self): name = uuid.uuid4().hex newname = name + "_" cmd_output = self.openstack( - 'address group create ' + - '--description aaaa ' + - '--address 10.0.0.1 --address 2001::/16 ' + - name, + 'address group create ' + + '--description aaaa ' + + '--address 10.0.0.1 --address 2001::/16 ' + + name, parse_output=True, ) self.addCleanup(self.openstack, 'address group delete ' + newname) @@ -151,18 +150,19 @@ def test_address_group_set_unset_and_show(self): # Test set name, description and address raw_output = self.openstack( - 'address group set ' + - '--name ' + newname + ' ' + - '--description bbbb ' + - '--address 10.0.0.2 --address 192.0.0.0/8 ' + - name, + 'address group set ' + + '--name ' + + newname + + ' ' + + '--description bbbb ' + + '--address 10.0.0.2 --address 192.0.0.0/8 ' + + name, ) self.assertOutput('', raw_output) # Show the updated address group cmd_output = self.openstack( - 'address group show ' + - newname, + 'address group show ' + newname, parse_output=True, ) self.assertEqual(newname, cmd_output['name']) @@ -171,16 +171,15 @@ def test_address_group_set_unset_and_show(self): # Test unset address raw_output = self.openstack( - 'address group unset ' + - '--address 10.0.0.1 --address 2001::/16 ' + - '--address 10.0.0.2 --address 192.0.0.0/8 ' + - newname, + 'address group unset ' + + '--address 10.0.0.1 --address 2001::/16 ' + + '--address 10.0.0.2 --address 192.0.0.0/8 ' + + newname, ) self.assertEqual('', raw_output) cmd_output = self.openstack( - 'address group show ' + - newname, + 'address group show ' + newname, parse_output=True, ) self.assertEqual(0, len(cmd_output['addresses'])) diff --git a/openstackclient/tests/functional/network/v2/test_address_scope.py b/openstackclient/tests/functional/network/v2/test_address_scope.py index 8ebb9522b2..6aabae624a 100644 --- a/openstackclient/tests/functional/network/v2/test_address_scope.py +++ b/openstackclient/tests/functional/network/v2/test_address_scope.py @@ -23,18 +23,11 @@ class AddressScopeTests(common.NetworkTests): # has its own needs and there are collisions when running # tests in parallel. - def setUp(self): - super(AddressScopeTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_address_scope_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'address scope create ' + - name1, + 'address scope create ' + name1, parse_output=True, ) self.assertEqual( @@ -46,8 +39,7 @@ def test_address_scope_delete(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'address scope create ' + - name2, + 'address scope create ' + name2, parse_output=True, ) self.assertEqual( @@ -64,10 +56,7 @@ def test_address_scope_list(self): """Test create defaults, list filters, delete""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'address scope create ' + - '--ip-version 4 ' + - '--share ' + - name1, + 'address scope create ' + '--ip-version 4 ' + '--share ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'address scope delete ' + name1) @@ -83,10 +72,10 @@ def test_address_scope_list(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'address scope create ' + - '--ip-version 6 ' + - '--no-share ' + - name2, + 'address scope create ' + + '--ip-version 6 ' + + '--no-share ' + + name2, parse_output=True, ) self.addCleanup(self.openstack, 'address scope delete ' + name2) @@ -132,10 +121,7 @@ def test_address_scope_set(self): name = uuid.uuid4().hex newname = name + "_" cmd_output = self.openstack( - 'address scope create ' + - '--ip-version 4 ' + - '--no-share ' + - name, + 'address scope create ' + '--ip-version 4 ' + '--no-share ' + name, parse_output=True, ) self.addCleanup(self.openstack, 'address scope delete ' + newname) @@ -150,16 +136,12 @@ def test_address_scope_set(self): self.assertFalse(cmd_output['shared']) raw_output = self.openstack( - 'address scope set ' + - '--name ' + newname + - ' --share ' + - name, + 'address scope set ' + '--name ' + newname + ' --share ' + name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'address scope show ' + - newname, + 'address scope show ' + newname, parse_output=True, ) self.assertEqual( diff --git a/openstackclient/tests/functional/network/v2/test_default_security_group_rule.py b/openstackclient/tests/functional/network/v2/test_default_security_group_rule.py new file mode 100644 index 0000000000..1481c9a417 --- /dev/null +++ b/openstackclient/tests/functional/network/v2/test_default_security_group_rule.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from openstackclient.tests.functional.network.v2 import common + + +class SecurityGroupRuleTests(common.NetworkTests): + """Functional tests for security group rule""" + + def setUp(self): + super().setUp() + + if not self.is_extension_enabled("security-groups-default-rules"): + self.skipTest("No security-groups-default-rules extension present") + + self.port = random.randint(1, 65535) + self.protocol = random.choice(["tcp", "udp"]) + self.direction = random.choice(["ingress", "egress"]) + # Create the default security group rule. + cmd_output = self.openstack( + 'default security group rule create ' + f'--protocol {self.protocol} ' + f'--dst-port {self.port}:{self.port} ' + f'--{self.direction} --ethertype IPv4 ', + parse_output=True, + ) + self.addCleanup( + self.openstack, + 'default security group rule delete ' + cmd_output['id'], + ) + self.DEFAULT_SG_RULE_ID = cmd_output['id'] + + def test_security_group_rule_list(self): + cmd_output = self.openstack( + 'default security group rule list ', + parse_output=True, + ) + self.assertIn( + self.DEFAULT_SG_RULE_ID, [rule['ID'] for rule in cmd_output] + ) + + def test_security_group_rule_show(self): + cmd_output = self.openstack( + 'default security group rule show ' + self.DEFAULT_SG_RULE_ID, + parse_output=True, + ) + self.assertEqual(self.DEFAULT_SG_RULE_ID, cmd_output['id']) + self.assertEqual(self.protocol, cmd_output['protocol']) + self.assertEqual(self.port, cmd_output['port_range_min']) + self.assertEqual(self.port, cmd_output['port_range_max']) + self.assertEqual(self.direction, cmd_output['direction']) diff --git a/openstackclient/tests/functional/network/v2/test_floating_ip.py b/openstackclient/tests/functional/network/v2/test_floating_ip.py index 871cab2d54..a1b11a44a3 100644 --- a/openstackclient/tests/functional/network/v2/test_floating_ip.py +++ b/openstackclient/tests/functional/network/v2/test_floating_ip.py @@ -21,7 +21,7 @@ class FloatingIpTests(common.NetworkTests): @classmethod def setUpClass(cls): - common.NetworkTests.setUpClass() + super().setUpClass() if cls.haz_network: # Create common networks that all tests share cls.EXTERNAL_NETWORK_NAME = uuid.uuid4().hex @@ -29,17 +29,14 @@ def setUpClass(cls): # Create a network for the floating ip json_output = cls.openstack( - 'network create ' + - '--external ' + - cls.EXTERNAL_NETWORK_NAME, + 'network create ' + '--external ' + cls.EXTERNAL_NETWORK_NAME, parse_output=True, ) cls.external_network_id = json_output["id"] # Create a private network for the port json_output = cls.openstack( - 'network create ' + - cls.PRIVATE_NETWORK_NAME, + 'network create ' + cls.PRIVATE_NETWORK_NAME, parse_output=True, ) cls.private_network_id = json_output["id"] @@ -49,19 +46,17 @@ def tearDownClass(cls): try: if cls.haz_network: del_output = cls.openstack( - 'network delete ' + - cls.EXTERNAL_NETWORK_NAME + ' ' + - cls.PRIVATE_NETWORK_NAME + 'network delete ' + + cls.EXTERNAL_NETWORK_NAME + + ' ' + + cls.PRIVATE_NETWORK_NAME ) cls.assertOutput('', del_output) finally: - super(FloatingIpTests, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(FloatingIpTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() # Verify setup self.assertIsNotNone(self.external_network_id) @@ -76,23 +71,27 @@ def _create_subnet(self, network_name, subnet_name): # try 4 times for i in range(4): # Make a random subnet - subnet = ".".join(map( - str, - (random.randint(0, 223) for _ in range(3)) - )) + ".0/26" + subnet = ( + ".".join(map(str, (random.randint(0, 223) for _ in range(3)))) + + ".0/26" + ) try: # Create a subnet for the network json_output = self.openstack( - 'subnet create ' + - '--network ' + network_name + ' ' + - '--subnet-range ' + subnet + ' ' + - subnet_name, + 'subnet create ' + + '--network ' + + network_name + + ' ' + + '--subnet-range ' + + subnet + + ' ' + + subnet_name, parse_output=True, ) self.assertIsNotNone(json_output["id"]) subnet_id = json_output["id"] except Exception: - if (i == 3): + if i == 3: # raise the exception at the last time raise pass @@ -106,15 +105,14 @@ def test_floating_ip_delete(self): # Subnets must exist even if not directly referenced here ext_subnet_id = self._create_subnet( - self.EXTERNAL_NETWORK_NAME, - "ext-test-delete" + self.EXTERNAL_NETWORK_NAME, "ext-test-delete" ) self.addCleanup(self.openstack, 'subnet delete ' + ext_subnet_id) json_output = self.openstack( - 'floating ip create ' + - '--description aaaa ' + - self.EXTERNAL_NETWORK_NAME, + 'floating ip create ' + + '--description aaaa ' + + self.EXTERNAL_NETWORK_NAME, parse_output=True, ) self.assertIsNotNone(json_output["id"]) @@ -125,9 +123,9 @@ def test_floating_ip_delete(self): ) json_output = self.openstack( - 'floating ip create ' + - '--description bbbb ' + - self.EXTERNAL_NETWORK_NAME, + 'floating ip create ' + + '--description bbbb ' + + self.EXTERNAL_NETWORK_NAME, parse_output=True, ) self.assertIsNotNone(json_output["id"]) @@ -148,15 +146,14 @@ def test_floating_ip_list(self): # Subnets must exist even if not directly referenced here ext_subnet_id = self._create_subnet( - self.EXTERNAL_NETWORK_NAME, - "ext-test-delete" + self.EXTERNAL_NETWORK_NAME, "ext-test-delete" ) self.addCleanup(self.openstack, 'subnet delete ' + ext_subnet_id) json_output = self.openstack( - 'floating ip create ' + - '--description aaaa ' + - self.EXTERNAL_NETWORK_NAME, + 'floating ip create ' + + '--description aaaa ' + + self.EXTERNAL_NETWORK_NAME, parse_output=True, ) self.assertIsNotNone(json_output["id"]) @@ -170,9 +167,9 @@ def test_floating_ip_list(self): fip1 = json_output["floating_ip_address"] json_output = self.openstack( - 'floating ip create ' + - '--description bbbb ' + - self.EXTERNAL_NETWORK_NAME, + 'floating ip create ' + + '--description bbbb ' + + self.EXTERNAL_NETWORK_NAME, parse_output=True, ) self.assertIsNotNone(json_output["id"]) @@ -191,8 +188,8 @@ def test_floating_ip_list(self): parse_output=True, ) fip_map = { - item.get('ID'): - item.get('Floating IP Address') for item in json_output + item.get('ID'): item.get('Floating IP Address') + for item in json_output } # self.assertEqual(item_map, json_output) self.assertIn(ip1, fip_map.keys()) @@ -202,13 +199,12 @@ def test_floating_ip_list(self): # Test list --long json_output = self.openstack( - 'floating ip list ' + - '--long', + 'floating ip list ' + '--long', parse_output=True, ) fip_map = { - item.get('ID'): - item.get('Floating IP Address') for item in json_output + item.get('ID'): item.get('Floating IP Address') + for item in json_output } self.assertIn(ip1, fip_map.keys()) self.assertIn(ip2, fip_map.keys()) @@ -223,8 +219,7 @@ def test_floating_ip_list(self): # TODO(dtroyer): add more filter tests json_output = self.openstack( - 'floating ip show ' + - ip1, + 'floating ip show ' + ip1, parse_output=True, ) self.assertIsNotNone(json_output["id"]) @@ -247,13 +242,11 @@ def test_floating_ip_set_and_unset_port(self): # Subnets must exist even if not directly referenced here ext_subnet_id = self._create_subnet( - self.EXTERNAL_NETWORK_NAME, - "ext-test-delete" + self.EXTERNAL_NETWORK_NAME, "ext-test-delete" ) self.addCleanup(self.openstack, 'subnet delete ' + ext_subnet_id) priv_subnet_id = self._create_subnet( - self.PRIVATE_NETWORK_NAME, - "priv-test-delete" + self.PRIVATE_NETWORK_NAME, "priv-test-delete" ) self.addCleanup(self.openstack, 'subnet delete ' + priv_subnet_id) @@ -261,9 +254,9 @@ def test_floating_ip_set_and_unset_port(self): self.PORT_NAME = uuid.uuid4().hex json_output = self.openstack( - 'floating ip create ' + - '--description aaaa ' + - self.EXTERNAL_NETWORK_NAME, + 'floating ip create ' + + '--description aaaa ' + + self.EXTERNAL_NETWORK_NAME, parse_output=True, ) self.assertIsNotNone(json_output["id"]) @@ -275,33 +268,34 @@ def test_floating_ip_set_and_unset_port(self): ) json_output = self.openstack( - 'port create ' + - '--network ' + self.PRIVATE_NETWORK_NAME + ' ' + - '--fixed-ip subnet=' + priv_subnet_id + ' ' + - self.PORT_NAME, + 'port create ' + + '--network ' + + self.PRIVATE_NETWORK_NAME + + ' ' + + '--fixed-ip subnet=' + + priv_subnet_id + + ' ' + + self.PORT_NAME, parse_output=True, ) self.assertIsNotNone(json_output["id"]) port_id = json_output["id"] json_output = self.openstack( - 'router create ' + - self.ROUTER, + 'router create ' + self.ROUTER, parse_output=True, ) self.assertIsNotNone(json_output["id"]) self.addCleanup(self.openstack, 'router delete ' + self.ROUTER) - self.openstack( - 'router add port ' + - self.ROUTER + ' ' + - port_id - ) + self.openstack('router add port ' + self.ROUTER + ' ' + port_id) self.openstack( - 'router set ' + - '--external-gateway ' + self.EXTERNAL_NETWORK_NAME + ' ' + - self.ROUTER + 'router set ' + + '--external-gateway ' + + self.EXTERNAL_NETWORK_NAME + + ' ' + + self.ROUTER ) self.addCleanup( self.openstack, @@ -312,19 +306,14 @@ def test_floating_ip_set_and_unset_port(self): 'router remove port ' + self.ROUTER + ' ' + port_id, ) - self.openstack( - 'floating ip set ' + - '--port ' + port_id + ' ' + - ip1 - ) + self.openstack('floating ip set ' + '--port ' + port_id + ' ' + ip1) self.addCleanup( self.openstack, 'floating ip unset --port ' + ip1, ) json_output = self.openstack( - 'floating ip show ' + - ip1, + 'floating ip show ' + ip1, parse_output=True, ) diff --git a/openstackclient/tests/functional/network/v2/test_ip_availability.py b/openstackclient/tests/functional/network/v2/test_ip_availability.py index 6697ed3639..1cdbd487a5 100644 --- a/openstackclient/tests/functional/network/v2/test_ip_availability.py +++ b/openstackclient/tests/functional/network/v2/test_ip_availability.py @@ -20,21 +20,21 @@ class IPAvailabilityTests(common.NetworkTests): @classmethod def setUpClass(cls): - common.NetworkTests.setUpClass() + super().setUpClass() + if cls.haz_network: cls.NAME = uuid.uuid4().hex cls.NETWORK_NAME = uuid.uuid4().hex # Create a network for the subnet - cls.openstack( - 'network create ' + - cls.NETWORK_NAME - ) + cls.openstack('network create ' + cls.NETWORK_NAME) cmd_output = cls.openstack( - 'subnet create ' + - '--network ' + cls.NETWORK_NAME + ' ' + - '--subnet-range 10.10.10.0/24 ' + - cls.NAME, + 'subnet create ' + + '--network ' + + cls.NETWORK_NAME + + ' ' + + '--subnet-range 10.10.10.0/24 ' + + cls.NAME, parse_output=True, ) cls.assertOutput(cls.NAME, cmd_output['name']) @@ -43,30 +43,21 @@ def setUpClass(cls): def tearDownClass(cls): try: if cls.haz_network: - raw_subnet = cls.openstack( - 'subnet delete ' + - cls.NAME - ) + raw_subnet = cls.openstack('subnet delete ' + cls.NAME) raw_network = cls.openstack( - 'network delete ' + - cls.NETWORK_NAME + 'network delete ' + cls.NETWORK_NAME ) cls.assertOutput('', raw_subnet) cls.assertOutput('', raw_network) finally: - super(IPAvailabilityTests, cls).tearDownClass() - - def setUp(self): - super(IPAvailabilityTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().tearDownClass() def test_ip_availability_list(self): """Test ip availability list""" cmd_output = self.openstack( 'ip availability list', - parse_output=True,) + parse_output=True, + ) names = [x['Network Name'] for x in cmd_output] self.assertIn(self.NETWORK_NAME, names) @@ -74,7 +65,8 @@ def test_ip_availability_show(self): """Test ip availability show""" cmd_output = self.openstack( 'ip availability show ' + self.NETWORK_NAME, - parse_output=True,) + parse_output=True, + ) self.assertEqual( self.NETWORK_NAME, cmd_output['network_name'], diff --git a/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py b/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py index 2563bcf980..ab0cb53e8a 100644 --- a/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py +++ b/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py @@ -17,14 +17,12 @@ class L3ConntrackHelperTests(common.NetworkTests): - def setUp(self): - super(L3ConntrackHelperTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + if not self.is_extension_enabled('l3-conntrack-helper'): self.skipTest("No l3-conntrack-helper extension present") + if not self.is_extension_enabled('expose-l3-conntrack-helper'): self.skipTest("No expose-l3-conntrack-helper extension present") @@ -43,14 +41,14 @@ def _create_helpers(self, router_id, helpers): created_helpers = [] for helper in helpers: output = self.openstack( - 'network l3 conntrack helper create %(router)s ' - '--helper %(helper)s --protocol %(protocol)s ' - '--port %(port)s ' % { - 'router': router_id, - 'helper': helper['helper'], - 'protocol': helper['protocol'], - 'port': helper['port'], - }, + 'network l3 conntrack helper create {router} ' + '--helper {helper} --protocol {protocol} ' + '--port {port} '.format( + router=router_id, + helper=helper['helper'], + protocol=helper['protocol'], + port=helper['port'], + ), parse_output=True, ) self.assertEqual(helper['helper'], output['helper']) @@ -63,53 +61,31 @@ def test_l3_conntrack_helper_create_and_delete(self): """Test create, delete multiple""" helpers = [ - { - 'helper': 'tftp', - 'protocol': 'udp', - 'port': 69 - }, { - 'helper': 'ftp', - 'protocol': 'tcp', - 'port': 21 - } + {'helper': 'tftp', 'protocol': 'udp', 'port': 69}, + {'helper': 'ftp', 'protocol': 'tcp', 'port': 21}, ] router_id = self._create_router() created_helpers = self._create_helpers(router_id, helpers) ct_ids = " ".join([ct['id'] for ct in created_helpers]) raw_output = self.openstack( - '--debug network l3 conntrack helper delete %(router)s ' - '%(ct_ids)s' % { - 'router': router_id, 'ct_ids': ct_ids}) + f'--debug network l3 conntrack helper delete {router_id} {ct_ids}' + ) self.assertOutput('', raw_output) def test_l3_conntrack_helper_list(self): helpers = [ - { - 'helper': 'tftp', - 'protocol': 'udp', - 'port': 69 - }, { - 'helper': 'ftp', - 'protocol': 'tcp', - 'port': 21 - } + {'helper': 'tftp', 'protocol': 'udp', 'port': 69}, + {'helper': 'ftp', 'protocol': 'tcp', 'port': 21}, ] expected_helpers = [ - { - 'Helper': 'tftp', - 'Protocol': 'udp', - 'Port': 69 - }, { - 'Helper': 'ftp', - 'Protocol': 'tcp', - 'Port': 21 - } + {'Helper': 'tftp', 'Protocol': 'udp', 'Port': 69}, + {'Helper': 'ftp', 'Protocol': 'tcp', 'Port': 21}, ] router_id = self._create_router() self._create_helpers(router_id, helpers) output = self.openstack( - 'network l3 conntrack helper list %s ' % router_id, + f'network l3 conntrack helper list {router_id} ', parse_output=True, ) for ct in output: @@ -118,40 +94,44 @@ def test_l3_conntrack_helper_list(self): self.assertIn(ct, expected_helpers) def test_l3_conntrack_helper_set_and_show(self): - helper = { - 'helper': 'tftp', - 'protocol': 'udp', - 'port': 69} + helper = 'tftp' + proto = 'udp' + port = 69 router_id = self._create_router() - created_helper = self._create_helpers(router_id, [helper])[0] + created_helper = self._create_helpers( + router_id, + [{'helper': helper, 'protocol': proto, 'port': port}], + )[0] output = self.openstack( - 'network l3 conntrack helper show %(router_id)s %(ct_id)s ' - '-f json' % { - 'router_id': router_id, - 'ct_id': created_helper['id'], - }, + 'network l3 conntrack helper show {router_id} {ct_id} ' + '-f json'.format( + router_id=router_id, + ct_id=created_helper['id'], + ), parse_output=True, ) - self.assertEqual(helper['helper'], output['helper']) - self.assertEqual(helper['protocol'], output['protocol']) - self.assertEqual(helper['port'], output['port']) + self.assertEqual(port, output['port']) + self.assertEqual(helper, output['helper']) + self.assertEqual(proto, output['protocol']) raw_output = self.openstack( - 'network l3 conntrack helper set %(router_id)s %(ct_id)s ' - '--port %(port)s ' % { - 'router_id': router_id, - 'ct_id': created_helper['id'], - 'port': helper['port'] + 1}) + 'network l3 conntrack helper set {router_id} {ct_id} ' + '--port {port} '.format( + router_id=router_id, + ct_id=created_helper['id'], + port=port + 1, + ) + ) self.assertOutput('', raw_output) output = self.openstack( - 'network l3 conntrack helper show %(router_id)s %(ct_id)s ' - '-f json' % { - 'router_id': router_id, - 'ct_id': created_helper['id'], - }, + 'network l3 conntrack helper show {router_id} {ct_id} ' + '-f json'.format( + router_id=router_id, + ct_id=created_helper['id'], + ), parse_output=True, ) - self.assertEqual(helper['port'] + 1, output['port']) - self.assertEqual(helper['helper'], output['helper']) - self.assertEqual(helper['protocol'], output['protocol']) + self.assertEqual(port + 1, output['port']) + self.assertEqual(helper, output['helper']) + self.assertEqual(proto, output['protocol']) diff --git a/openstackclient/tests/functional/network/v2/test_local_ip.py b/openstackclient/tests/functional/network/v2/test_local_ip.py index b5672b6ded..d092be084b 100644 --- a/openstackclient/tests/functional/network/v2/test_local_ip.py +++ b/openstackclient/tests/functional/network/v2/test_local_ip.py @@ -21,10 +21,8 @@ class LocalIPTests(common.NetworkTests): """Functional tests for local IP""" def setUp(self): - super(LocalIPTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + if not self.is_extension_enabled('local-ip'): self.skipTest("No local-ip extension present") @@ -32,8 +30,7 @@ def test_local_ip_create_and_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'local ip create ' + - name1, + 'local ip create ' + name1, parse_output=True, ) self.assertEqual( @@ -43,8 +40,7 @@ def test_local_ip_create_and_delete(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'local ip create ' + - name2, + 'local ip create ' + name2, parse_output=True, ) self.assertEqual( @@ -81,10 +77,13 @@ def test_local_ip_list(self): self.assertNotEqual(admin_project_id, demo_project_id) self.assertEqual(admin_project_id, auth_project_id) + # type narrow + assert admin_project_id is not None + assert demo_project_id is not None + name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'local ip create ' + - name1, + 'local ip create ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'local ip delete ' + name1) @@ -95,9 +94,7 @@ def test_local_ip_list(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'local ip create ' + - '--project ' + demo_project_id + - ' ' + name2, + 'local ip create ' + '--project ' + demo_project_id + ' ' + name2, parse_output=True, ) self.addCleanup(self.openstack, 'local ip delete ' + name2) @@ -117,8 +114,7 @@ def test_local_ip_list(self): # Test list --project cmd_output = self.openstack( - 'local ip list ' + - '--project ' + demo_project_id, + 'local ip list ' + '--project ' + demo_project_id, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -127,8 +123,7 @@ def test_local_ip_list(self): # Test list --name cmd_output = self.openstack( - 'local ip list ' + - '--name ' + name1, + 'local ip list ' + '--name ' + name1, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -140,9 +135,7 @@ def test_local_ip_set_unset_and_show(self): name = uuid.uuid4().hex newname = name + "_" cmd_output = self.openstack( - 'local ip create ' + - '--description aaaa ' + - name, + 'local ip create ' + '--description aaaa ' + name, parse_output=True, ) self.addCleanup(self.openstack, 'local ip delete ' + newname) @@ -151,17 +144,18 @@ def test_local_ip_set_unset_and_show(self): # Test set name and description raw_output = self.openstack( - 'local ip set ' + - '--name ' + newname + ' ' + - '--description bbbb ' + - name, + 'local ip set ' + + '--name ' + + newname + + ' ' + + '--description bbbb ' + + name, ) self.assertOutput('', raw_output) # Show the updated local ip cmd_output = self.openstack( - 'local ip show ' + - newname, + 'local ip show ' + newname, parse_output=True, ) self.assertEqual(newname, cmd_output['name']) diff --git a/openstackclient/tests/functional/network/v2/test_network.py b/openstackclient/tests/functional/network/v2/test_network.py index 20be2d1ad1..a38d88bdac 100644 --- a/openstackclient/tests/functional/network/v2/test_network.py +++ b/openstackclient/tests/functional/network/v2/test_network.py @@ -20,12 +20,6 @@ class NetworkTests(common.NetworkTagTests): base_command = 'network' - def setUp(self): - super(NetworkTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_network_create_compute(self): """Test Nova-net create options, delete""" if self.haz_network: @@ -34,9 +28,7 @@ def test_network_create_compute(self): # Network create with minimum options name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--subnet 1.2.3.4/28 ' + - name1, + 'network create ' + '--subnet 1.2.3.4/28 ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'network delete ' + name1) @@ -54,10 +46,7 @@ def test_network_create_compute(self): # Network create with more options name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--subnet 1.2.4.4/28 ' + - '--share ' + - name2, + 'network create ' + '--subnet 1.2.4.4/28 ' + '--share ' + name2, parse_output=True, ) self.addCleanup(self.openstack, 'network delete ' + name2) @@ -105,8 +94,7 @@ def test_network_create_network(self): # Network create with no options name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - name1, + 'network create ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'network delete ' + name1) @@ -136,9 +124,7 @@ def test_network_create_network(self): # Network create with options name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--project demo ' + - name2, + 'network create ' + '--project demo ' + name2, parse_output=True, ) self.addCleanup(self.openstack, 'network delete ' + name2) @@ -159,9 +145,7 @@ def test_network_delete_compute(self): name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--subnet 9.8.7.6/28 ' + - name1, + 'network create ' + '--subnet 9.8.7.6/28 ' + name1, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -172,9 +156,7 @@ def test_network_delete_compute(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--subnet 8.7.6.5/28 ' + - name2, + 'network create ' + '--subnet 8.7.6.5/28 ' + name2, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -190,9 +172,7 @@ def test_network_delete_network(self): name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--description aaaa ' + - name1, + 'network create ' + '--description aaaa ' + name1, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -203,9 +183,7 @@ def test_network_delete_network(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'network create ' + - '--description bbbb ' + - name2, + 'network create ' + '--description bbbb ' + name2, parse_output=True, ) self.assertIsNotNone(cmd_output["id"]) @@ -214,7 +192,7 @@ def test_network_delete_network(self): cmd_output["description"], ) - del_output = self.openstack('network delete %s %s' % (name1, name2)) + del_output = self.openstack(f'network delete {name1} {name2}') self.assertOutput('', del_output) def test_network_list(self): @@ -225,12 +203,10 @@ def test_network_list(self): else: network_options = '--subnet 3.4.5.6/28 ' cmd_output = self.openstack( - 'network create ' + - network_options + - name1, + 'network create ' + network_options + name1, parse_output=True, ) - self.addCleanup(self.openstack, 'network delete %s' % name1) + self.addCleanup(self.openstack, f'network delete {name1}') self.assertIsNotNone(cmd_output["id"]) if self.haz_network: self.assertEqual( @@ -248,9 +224,7 @@ def test_network_list(self): cmd_output["router:external"], ) self.assertFalse(cmd_output["is_default"]) - self.assertTrue( - cmd_output["port_security_enabled"] - ) + self.assertTrue(cmd_output["port_security_enabled"]) else: self.assertEqual( '3.4.5.0/28', @@ -263,8 +237,7 @@ def test_network_list(self): else: network_options = '--subnet 4.5.6.7/28 ' cmd_output = self.openstack( - 'network create --share %s%s' % - (network_options, name2), + f'network create --share {network_options}{name2}', parse_output=True, ) self.addCleanup(self.openstack, 'network delete ' + name2) @@ -350,50 +323,54 @@ def test_network_list(self): def test_network_dhcp_agent(self): if not self.haz_network: self.skipTest("No Network service present") + + if not self.is_extension_enabled("agent"): + self.skipTest("No dhcp_agent_scheduler extension present") + if not self.is_extension_enabled("dhcp_agent_scheduler"): self.skipTest("No dhcp_agent_scheduler extension present") - name1 = uuid.uuid4().hex - cmd_output1 = self.openstack( - 'network create --description aaaa %s' % name1, + # Get DHCP Agent ID + cmd_output = self.openstack( + 'network agent list --agent-type dhcp', parse_output=True, ) + if not cmd_output: + self.skipTest("No agents with type=dhcp available") - self.addCleanup(self.openstack, 'network delete %s' % name1) - - # Get network ID - network_id = cmd_output1['id'] + agent_id = cmd_output[0]['ID'] - # Get DHCP Agent ID - cmd_output2 = self.openstack( - 'network agent list --agent-type dhcp', + name1 = uuid.uuid4().hex + cmd_output = self.openstack( + f'network create --description aaaa {name1}', parse_output=True, ) - agent_id = cmd_output2[0]['ID'] + + self.addCleanup(self.openstack, f'network delete {name1}') + + # Get network ID + network_id = cmd_output['id'] # Add Agent to Network self.openstack( - 'network agent add network --dhcp %s %s' % (agent_id, network_id) + f'network agent add network --dhcp {agent_id} {network_id}' ) # Test network list --agent - cmd_output3 = self.openstack( - 'network list --agent %s' % agent_id, + cmd_output = self.openstack( + f'network list --agent {agent_id}', parse_output=True, ) # Cleanup # Remove Agent from Network self.openstack( - 'network agent remove network --dhcp %s %s' % - (agent_id, network_id) + f'network agent remove network --dhcp {agent_id} {network_id}' ) # Assert - col_name = [x["ID"] for x in cmd_output3] - self.assertIn( - network_id, col_name - ) + col_name = [x["ID"] for x in cmd_output] + self.assertIn(network_id, col_name) def test_network_set(self): """Tests create options, set, show, delete""" @@ -408,11 +385,10 @@ def test_network_set(self): '--no-share ' '--internal ' '--no-default ' - '--enable-port-security %s' % - name, + f'--enable-port-security {name}', parse_output=True, ) - self.addCleanup(self.openstack, 'network delete %s' % name) + self.addCleanup(self.openstack, f'network delete {name}') self.assertIsNotNone(cmd_output["id"]) self.assertEqual( 'aaaa', @@ -429,9 +405,7 @@ def test_network_set(self): ) self.assertFalse(cmd_output["is_default"]) - self.assertTrue( - cmd_output["port_security_enabled"] - ) + self.assertTrue(cmd_output["port_security_enabled"]) raw_output = self.openstack( 'network set ' @@ -439,8 +413,7 @@ def test_network_set(self): '--disable ' '--share ' '--external ' - '--disable-port-security %s' % - name + f'--disable-port-security {name}' ) self.assertOutput('', raw_output) @@ -463,6 +436,4 @@ def test_network_set(self): cmd_output["router:external"], ) self.assertFalse(cmd_output["is_default"]) - self.assertFalse( - cmd_output["port_security_enabled"] - ) + self.assertFalse(cmd_output["port_security_enabled"]) diff --git a/openstackclient/tests/functional/network/v2/test_network_agent.py b/openstackclient/tests/functional/network/v2/test_network_agent.py index d3e6353ed1..013201a40e 100644 --- a/openstackclient/tests/functional/network/v2/test_network_agent.py +++ b/openstackclient/tests/functional/network/v2/test_network_agent.py @@ -15,14 +15,14 @@ from openstackclient.tests.functional.network.v2 import common -class NetworkAgentTests(common.NetworkTests): +class TestAgent(common.NetworkTests): """Functional tests for network agent""" def setUp(self): - super(NetworkAgentTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + + if not self.is_extension_enabled("agent"): + self.skipTest("No agent extension present") def test_network_agent_list_show_set(self): """Test network agent list, set, show commands @@ -42,7 +42,7 @@ def test_network_agent_list_show_set(self): # agent show cmd_output = self.openstack( - 'network agent show %s' % agent_ids[0], + f'network agent show {agent_ids[0]}', parse_output=True, ) self.assertEqual( @@ -57,12 +57,12 @@ def test_network_agent_list_show_set(self): # agent set raw_output = self.openstack( - 'network agent set --disable %s' % agent_ids[0] + f'network agent set --disable {agent_ids[0]}' ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'network agent show %s' % agent_ids[0], + f'network agent show {agent_ids[0]}', parse_output=True, ) self.assertEqual( @@ -71,12 +71,12 @@ def test_network_agent_list_show_set(self): ) raw_output = self.openstack( - 'network agent set --enable %s' % agent_ids[0] + f'network agent set --enable {agent_ids[0]}' ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'network agent show %s' % agent_ids[0], + f'network agent show {agent_ids[0]}', parse_output=True, ) self.assertEqual( @@ -85,14 +85,14 @@ def test_network_agent_list_show_set(self): ) -class NetworkAgentListTests(common.NetworkTests): +class TestAgentList(common.NetworkTests): """Functional test for network agent""" def setUp(self): - super(NetworkAgentListTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + + if not self.is_extension_enabled("agent"): + self.skipTest("No agent extension present") def test_network_dhcp_agent_list(self): """Test network agent list""" @@ -100,48 +100,47 @@ def test_network_dhcp_agent_list(self): if not self.is_extension_enabled("dhcp_agent_scheduler"): self.skipTest("No dhcp_agent_scheduler extension present") - name1 = uuid.uuid4().hex - cmd_output1 = self.openstack( - 'network create --description aaaa %s' % name1, + # Get DHCP Agent ID + cmd_output = self.openstack( + 'network agent list --agent-type dhcp', parse_output=True, ) + if not cmd_output: + self.skipTest("No agents with type=dhcp available") - self.addCleanup(self.openstack, 'network delete %s' % name1) - - # Get network ID - network_id = cmd_output1['id'] + agent_id = cmd_output[0]['ID'] - # Get DHCP Agent ID - cmd_output2 = self.openstack( - 'network agent list --agent-type dhcp', + name1 = uuid.uuid4().hex + cmd_output = self.openstack( + f'network create --description aaaa {name1}', parse_output=True, ) - agent_id = cmd_output2[0]['ID'] + + self.addCleanup(self.openstack, f'network delete {name1}') + + # Get network ID + network_id = cmd_output['id'] # Add Agent to Network self.openstack( - 'network agent add network --dhcp %s %s' % - (agent_id, network_id) + f'network agent add network --dhcp {agent_id} {network_id}' ) # Test network agent list --network - cmd_output3 = self.openstack( - 'network agent list --network %s' % network_id, + cmd_output = self.openstack( + f'network agent list --network {network_id}', parse_output=True, ) # Cleanup # Remove Agent from Network self.openstack( - 'network agent remove network --dhcp %s %s' % - (agent_id, network_id) + f'network agent remove network --dhcp {agent_id} {network_id}' ) # Assert - col_name = [x["ID"] for x in cmd_output3] - self.assertIn( - agent_id, col_name - ) + col_name = [x["ID"] for x in cmd_output] + self.assertIn(agent_id, col_name) def test_network_agent_list_routers(self): """Add agent to router, list agents on router, delete.""" @@ -151,38 +150,42 @@ def test_network_agent_list_routers(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'router create %s' % name, - parse_output=True,) + f'router create {name}', + parse_output=True, + ) - self.addCleanup(self.openstack, 'router delete %s' % name) + self.addCleanup(self.openstack, f'router delete {name}') # Get router ID router_id = cmd_output['id'] # Get l3 agent id cmd_output = self.openstack( 'network agent list --agent-type l3', - parse_output=True,) + parse_output=True, + ) # Check at least one L3 agent is included in the response. self.assertTrue(cmd_output) agent_id = cmd_output[0]['ID'] # Add router to agent - self.openstack( - 'network agent add router --l3 %s %s' % (agent_id, router_id)) + self.openstack(f'network agent add router --l3 {agent_id} {router_id}') # Test router list --agent cmd_output = self.openstack( - 'network agent list --router %s' % router_id, - parse_output=True,) + f'network agent list --router {router_id}', + parse_output=True, + ) agent_ids = [x['ID'] for x in cmd_output] self.assertIn(agent_id, agent_ids) # Remove router from agent self.openstack( - 'network agent remove router --l3 %s %s' % (agent_id, router_id)) + f'network agent remove router --l3 {agent_id} {router_id}' + ) cmd_output = self.openstack( - 'network agent list --router %s' % router_id, - parse_output=True,) + f'network agent list --router {router_id}', + parse_output=True, + ) agent_ids = [x['ID'] for x in cmd_output] self.assertNotIn(agent_id, agent_ids) diff --git a/openstackclient/tests/functional/network/v2/test_network_flavor.py b/openstackclient/tests/functional/network/v2/test_network_flavor.py index 2ac0daef54..2b88670378 100644 --- a/openstackclient/tests/functional/network/v2/test_network_flavor.py +++ b/openstackclient/tests/functional/network/v2/test_network_flavor.py @@ -18,12 +18,6 @@ class NetworkFlavorTests(common.NetworkTests): """Functional tests for network flavor""" - def setUp(self): - super(NetworkFlavorTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_network_flavor_add_remove_profile(self): """Test add and remove network flavor to/from profile""" # Create Flavor @@ -43,14 +37,17 @@ def test_network_flavor_add_remove_profile(self): ) service_profile_id = cmd_output2.get('id') - self.addCleanup(self.openstack, 'network flavor delete %s' % - flavor_id) - self.addCleanup(self.openstack, 'network flavor profile delete %s' % - service_profile_id) + self.addCleanup(self.openstack, f'network flavor delete {flavor_id}') + self.addCleanup( + self.openstack, + f'network flavor profile delete {service_profile_id}', + ) # Add flavor to service profile self.openstack( - 'network flavor add profile ' + - flavor_id + ' ' + service_profile_id + 'network flavor add profile ' + + flavor_id + + ' ' + + service_profile_id ) cmd_output4 = self.openstack( @@ -65,8 +62,10 @@ def test_network_flavor_add_remove_profile(self): # Cleanup # Remove flavor from service profile self.openstack( - 'network flavor remove profile ' + - flavor_id + ' ' + service_profile_id + 'network flavor remove profile ' + + flavor_id + + ' ' + + service_profile_id ) cmd_output6 = self.openstack( @@ -112,7 +111,8 @@ def test_network_flavor_delete(self): cmd_output['description'], ) raw_output = self.openstack( - 'network flavor delete ' + name1 + " " + name2) + 'network flavor delete ' + name1 + " " + name2 + ) self.assertOutput('', raw_output) def test_network_flavor_list(self): @@ -160,7 +160,8 @@ def test_network_flavor_list(self): # Test list cmd_output = self.openstack( 'network flavor list ', - parse_output=True,) + parse_output=True, + ) self.assertIsNotNone(cmd_output) name_list = [item.get('Name') for item in cmd_output] @@ -197,7 +198,8 @@ def test_network_flavor_set(self): cmd_output = self.openstack( 'network flavor show ' + newname, - parse_output=True,) + parse_output=True, + ) self.assertEqual( newname, cmd_output['name'], @@ -222,7 +224,8 @@ def test_network_flavor_show(self): self.addCleanup(self.openstack, "network flavor delete " + name) cmd_output = self.openstack( 'network flavor show ' + name, - parse_output=True,) + parse_output=True, + ) self.assertEqual( name, cmd_output['name'], diff --git a/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py b/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py index 60fd949b41..7a64da768e 100644 --- a/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py +++ b/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py @@ -19,17 +19,14 @@ class NetworkFlavorProfileTests(common.NetworkTests): DESCRIPTION = 'fakedescription' METAINFO = 'Extrainfo' - def setUp(self): - super(NetworkFlavorProfileTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_network_flavor_profile_create(self): json_output = self.openstack( - 'network flavor profile create ' + - '--description ' + self.DESCRIPTION + ' ' + - '--enable --metainfo ' + self.METAINFO, + 'network flavor profile create ' + + '--description ' + + self.DESCRIPTION + + ' ' + + '--enable --metainfo ' + + self.METAINFO, parse_output=True, ) ID = json_output.get('id') @@ -50,10 +47,13 @@ def test_network_flavor_profile_create(self): def test_network_flavor_profile_list(self): json_output = self.openstack( - 'network flavor profile create ' + - '--description ' + self.DESCRIPTION + ' ' + - '--enable ' + - '--metainfo ' + self.METAINFO, + 'network flavor profile create ' + + '--description ' + + self.DESCRIPTION + + ' ' + + '--enable ' + + '--metainfo ' + + self.METAINFO, parse_output=True, ) ID1 = json_output.get('id') @@ -69,10 +69,13 @@ def test_network_flavor_profile_list(self): ) json_output = self.openstack( - 'network flavor profile create ' + - '--description ' + self.DESCRIPTION + ' ' + - '--disable ' + - '--metainfo ' + self.METAINFO, + 'network flavor profile create ' + + '--description ' + + self.DESCRIPTION + + ' ' + + '--disable ' + + '--metainfo ' + + self.METAINFO, parse_output=True, ) ID2 = json_output.get('id') @@ -106,10 +109,13 @@ def test_network_flavor_profile_list(self): def test_network_flavor_profile_set(self): json_output_1 = self.openstack( - 'network flavor profile create ' + - '--description ' + self.DESCRIPTION + ' ' + - '--enable ' + - '--metainfo ' + self.METAINFO, + 'network flavor profile create ' + + '--description ' + + self.DESCRIPTION + + ' ' + + '--enable ' + + '--metainfo ' + + self.METAINFO, parse_output=True, ) ID = json_output_1.get('id') @@ -146,10 +152,13 @@ def test_network_flavor_profile_set(self): def test_network_flavor_profile_show(self): json_output_1 = self.openstack( - 'network flavor profile create ' + - '--description ' + self.DESCRIPTION + ' ' + - '--enable ' + - '--metainfo ' + self.METAINFO, + 'network flavor profile create ' + + '--description ' + + self.DESCRIPTION + + ' ' + + '--enable ' + + '--metainfo ' + + self.METAINFO, parse_output=True, ) ID = json_output_1.get('id') diff --git a/openstackclient/tests/functional/network/v2/test_network_meter.py b/openstackclient/tests/functional/network/v2/test_network_meter.py index ea9d289fdf..edd2c4415f 100644 --- a/openstackclient/tests/functional/network/v2/test_network_meter.py +++ b/openstackclient/tests/functional/network/v2/test_network_meter.py @@ -21,26 +21,28 @@ class TestMeter(common.NetworkTests): """Functional tests for network meter""" + def setUp(self): + super().setUp() + + if not self.is_extension_enabled("metering"): + self.skipTest("No metering extension present") + # NOTE(dtroyer): Do not normalize the setup and teardown of the resource # creation and deletion. Little is gained when each test # has its own needs and there are collisions when running # tests in parallel. - def setUp(self): - super(TestMeter, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_meter_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex name2 = uuid.uuid4().hex description = 'fakedescription' json_output = self.openstack( - 'network meter create ' + - ' --description ' + description + ' ' + - name1, + 'network meter create ' + + ' --description ' + + description + + ' ' + + name1, parse_output=True, ) self.assertEqual( @@ -55,9 +57,11 @@ def test_meter_delete(self): ) json_output_2 = self.openstack( - 'network meter create ' + - '--description ' + description + ' ' + - name2, + 'network meter create ' + + '--description ' + + description + + ' ' + + name2, parse_output=True, ) self.assertEqual( @@ -80,16 +84,13 @@ def test_meter_list(self): """Test create, list filters, delete""" name1 = uuid.uuid4().hex json_output = self.openstack( - 'network meter create ' + - '--description Test1 ' + - '--share ' + - name1, + 'network meter create ' + + '--description Test1 ' + + '--share ' + + name1, parse_output=True, ) - self.addCleanup( - self.openstack, - 'network meter delete ' + name1 - ) + self.addCleanup(self.openstack, 'network meter delete ' + name1) self.assertEqual( 'Test1', @@ -99,10 +100,10 @@ def test_meter_list(self): name2 = uuid.uuid4().hex json_output_2 = self.openstack( - 'network meter create ' + - '--description Test2 ' + - '--no-share ' + - name2, + 'network meter create ' + + '--description Test2 ' + + '--no-share ' + + name2, parse_output=True, ) self.addCleanup(self.openstack, 'network meter delete ' + name2) @@ -125,9 +126,11 @@ def test_meter_show(self): name1 = uuid.uuid4().hex description = 'fakedescription' json_output = self.openstack( - 'network meter create ' + - ' --description ' + description + ' ' + - name1, + 'network meter create ' + + ' --description ' + + description + + ' ' + + name1, parse_output=True, ) meter_id = json_output.get('id') diff --git a/openstackclient/tests/functional/network/v2/test_network_meter_rule.py b/openstackclient/tests/functional/network/v2/test_network_meter_rule.py index ae1bb90444..c80643e31d 100644 --- a/openstackclient/tests/functional/network/v2/test_network_meter_rule.py +++ b/openstackclient/tests/functional/network/v2/test_network_meter_rule.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import unittest import uuid from openstackclient.tests.functional.network.v2 import common @@ -21,18 +22,21 @@ class TestMeterRule(common.NetworkTests): """Functional tests for meter rule""" - METER_ID = None - METER_RULE_ID = None + METER_ID: str + METER_RULE_ID: str @classmethod def setUpClass(cls): - common.NetworkTests.setUpClass() + super().setUpClass() + + if not cls.is_extension_enabled("metering"): + raise unittest.SkipTest("No metering extension present") + if cls.haz_network: cls.METER_NAME = uuid.uuid4().hex json_output = cls.openstack( - 'network meter create ' + - cls.METER_NAME, + 'network meter create ' + cls.METER_NAME, parse_output=True, ) cls.METER_ID = json_output.get('id') @@ -42,73 +46,53 @@ def tearDownClass(cls): try: if cls.haz_network: raw_output = cls.openstack( - 'network meter delete ' + - cls.METER_ID + 'network meter delete ' + cls.METER_ID ) cls.assertOutput('', raw_output) finally: - common.NetworkTests.tearDownClass() - - def setUp(self): - super(TestMeterRule, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().tearDownClass() def test_meter_rule_delete(self): """test create, delete""" json_output = self.openstack( - 'network meter rule create ' + - '--remote-ip-prefix 10.0.0.0/8 ' + - self.METER_ID, + 'network meter rule create ' + + '--remote-ip-prefix 10.0.0.0/8 ' + + self.METER_ID, parse_output=True, ) rule_id = json_output.get('id') re_ip = json_output.get('remote_ip_prefix') - self.addCleanup( - self.openstack, - 'network meter rule delete ' + rule_id - ) + self.addCleanup(self.openstack, 'network meter rule delete ' + rule_id) self.assertIsNotNone(re_ip) self.assertIsNotNone(rule_id) - self.assertEqual( - '10.0.0.0/8', re_ip - ) + self.assertEqual('10.0.0.0/8', re_ip) def test_meter_rule_list(self): """Test create, list, delete""" json_output = self.openstack( - 'network meter rule create ' + - '--remote-ip-prefix 10.0.0.0/8 ' + - self.METER_ID, + 'network meter rule create ' + + '--remote-ip-prefix 10.0.0.0/8 ' + + self.METER_ID, parse_output=True, ) rule_id_1 = json_output.get('id') self.addCleanup( - self.openstack, - 'network meter rule delete ' + rule_id_1 - ) - self.assertEqual( - '10.0.0.0/8', - json_output.get('remote_ip_prefix') + self.openstack, 'network meter rule delete ' + rule_id_1 ) + self.assertEqual('10.0.0.0/8', json_output.get('remote_ip_prefix')) json_output_1 = self.openstack( - 'network meter rule create ' + - '--remote-ip-prefix 11.0.0.0/8 ' + - self.METER_ID, + 'network meter rule create ' + + '--remote-ip-prefix 11.0.0.0/8 ' + + self.METER_ID, parse_output=True, ) rule_id_2 = json_output_1.get('id') self.addCleanup( - self.openstack, - 'network meter rule delete ' + rule_id_2 - ) - self.assertEqual( - '11.0.0.0/8', - json_output_1.get('remote_ip_prefix') + self.openstack, 'network meter rule delete ' + rule_id_2 ) + self.assertEqual('11.0.0.0/8', json_output_1.get('remote_ip_prefix')) json_output = self.openstack( 'network meter rule list', @@ -124,30 +108,21 @@ def test_meter_rule_list(self): def test_meter_rule_show(self): """Test create, show, delete""" json_output = self.openstack( - 'network meter rule create ' + - '--remote-ip-prefix 10.0.0.0/8 ' + - '--egress ' + - self.METER_ID, + 'network meter rule create ' + + '--remote-ip-prefix 10.0.0.0/8 ' + + '--egress ' + + self.METER_ID, parse_output=True, ) rule_id = json_output.get('id') - self.assertEqual( - 'egress', - json_output.get('direction') - ) + self.assertEqual('egress', json_output.get('direction')) json_output = self.openstack( 'network meter rule show ' + rule_id, parse_output=True, ) - self.assertEqual( - '10.0.0.0/8', - json_output.get('remote_ip_prefix') - ) + self.assertEqual('10.0.0.0/8', json_output.get('remote_ip_prefix')) self.assertIsNotNone(rule_id) - self.addCleanup( - self.openstack, - 'network meter rule delete ' + rule_id - ) + self.addCleanup(self.openstack, 'network meter rule delete ' + rule_id) diff --git a/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py b/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py index 588b1f56ae..120f53d28d 100644 --- a/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py +++ b/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py @@ -14,12 +14,9 @@ class L3NDPProxyTests(common.NetworkTests): - def setUp(self): super().setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + if not self.is_extension_enabled('l3-ndp-proxy'): self.skipTest("No l3-ndp-proxy extension present") @@ -34,19 +31,15 @@ def setUp(self): self.created_ndp_proxies = [] json_output = self.openstack( - 'address scope create --ip-version 6 ' - '%(address_s_name)s' % {'address_s_name': self.ADDR_SCOPE_NAME}, + f'address scope create --ip-version 6 {self.ADDR_SCOPE_NAME}', parse_output=True, ) self.assertIsNotNone(json_output['id']) self.ADDRESS_SCOPE_ID = json_output['id'] json_output = self.openstack( - 'subnet pool create %(subnet_p_name)s ' - '--address-scope %(address_scope)s ' - '--pool-prefix 2001:db8::/96 --default-prefix-length 112' % { - 'subnet_p_name': self.SUBNET_P_NAME, - 'address_scope': self.ADDRESS_SCOPE_ID, - }, + f'subnet pool create {self.SUBNET_P_NAME} ' + f'--address-scope {self.ADDRESS_SCOPE_ID} ' + '--pool-prefix 2001:db8::/96 --default-prefix-length 112', parse_output=True, ) self.assertIsNotNone(json_output['id']) @@ -59,11 +52,7 @@ def setUp(self): self.EXT_NET_ID = json_output['id'] json_output = self.openstack( 'subnet create --ip-version 6 --subnet-pool ' - '%(subnet_pool)s --network %(net_id)s %(sub_name)s' % { - 'subnet_pool': self.SUBNET_POOL_ID, - 'net_id': self.EXT_NET_ID, - 'sub_name': self.EXT_SUB_NAME, - }, + f'{self.SUBNET_POOL_ID} --network {self.EXT_NET_ID} {self.EXT_SUB_NAME}', parse_output=True, ) self.assertIsNotNone(json_output['id']) @@ -75,9 +64,8 @@ def setUp(self): self.assertIsNotNone(json_output['id']) self.ROT_ID = json_output['id'] output = self.openstack( - 'router set %(router_id)s --external-gateway %(net_id)s' % { - 'router_id': self.ROT_ID, - 'net_id': self.EXT_NET_ID}) + f'router set {self.ROT_ID} --external-gateway {self.EXT_NET_ID}' + ) self.assertEqual('', output) output = self.openstack('router set --enable-ndp-proxy ' + self.ROT_ID) self.assertEqual('', output) @@ -94,49 +82,46 @@ def setUp(self): self.INT_NET_ID = json_output['id'] json_output = self.openstack( 'subnet create --ip-version 6 --subnet-pool ' - '%(subnet_pool)s --network %(net_id)s %(sub_name)s' % { - 'subnet_pool': self.SUBNET_POOL_ID, - 'net_id': self.INT_NET_ID, - 'sub_name': self.INT_SUB_NAME, - }, + f'{self.SUBNET_POOL_ID} --network {self.INT_NET_ID} {self.INT_SUB_NAME}', parse_output=True, ) self.assertIsNotNone(json_output['id']) self.INT_SUB_ID = json_output['id'] json_output = self.openstack( - 'port create --network %(net_id)s ' - '%(port_name)s' % { - 'net_id': self.INT_NET_ID, - 'port_name': self.INT_PORT_NAME, - }, + f'port create --network {self.INT_NET_ID} {self.INT_PORT_NAME}', parse_output=True, ) self.assertIsNotNone(json_output['id']) self.INT_PORT_ID = json_output['id'] self.INT_PORT_ADDRESS = json_output['fixed_ips'][0]['ip_address'] output = self.openstack( - 'router add subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID) + 'router add subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID + ) self.assertEqual('', output) def tearDown(self): for ndp_proxy in self.created_ndp_proxies: output = self.openstack( - 'router ndp proxy delete ' + ndp_proxy['id']) + 'router ndp proxy delete ' + ndp_proxy['id'] + ) self.assertEqual('', output) output = self.openstack('port delete ' + self.INT_PORT_ID) self.assertEqual('', output) output = self.openstack( - 'router set --disable-ndp-proxy ' + self.ROT_ID) + 'router set --disable-ndp-proxy ' + self.ROT_ID + ) self.assertEqual('', output) output = self.openstack( - 'router remove subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID) + 'router remove subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID + ) self.assertEqual('', output) output = self.openstack('subnet delete ' + self.INT_SUB_ID) self.assertEqual('', output) output = self.openstack('network delete ' + self.INT_NET_ID) self.assertEqual('', output) output = self.openstack( - 'router unset ' + self.ROT_ID + ' ' + '--external-gateway') + 'router unset ' + self.ROT_ID + ' ' + '--external-gateway' + ) self.assertEqual('', output) output = self.openstack('router delete ' + self.ROT_ID) self.assertEqual('', output) @@ -146,21 +131,22 @@ def tearDown(self): self.assertEqual('', output) output = self.openstack('subnet pool delete ' + self.SUBNET_POOL_ID) self.assertEqual('', output) - output = self.openstack('address scope delete ' + - self.ADDRESS_SCOPE_ID) + output = self.openstack( + 'address scope delete ' + self.ADDRESS_SCOPE_ID + ) self.assertEqual('', output) super().tearDown() def _create_ndp_proxies(self, ndp_proxies): for ndp_proxy in ndp_proxies: output = self.openstack( - 'router ndp proxy create %(router)s --name %(name)s ' - '--port %(port)s --ip-address %(address)s' % { - 'router': ndp_proxy['router_id'], - 'name': ndp_proxy['name'], - 'port': ndp_proxy['port_id'], - 'address': ndp_proxy['address'], - }, + 'router ndp proxy create {router} --name {name} ' + '--port {port} --ip-address {address}'.format( + router=ndp_proxy['router_id'], + name=ndp_proxy['name'], + port=ndp_proxy['port_id'], + address=ndp_proxy['address'], + ), parse_output=True, ) self.assertEqual(ndp_proxy['router_id'], output['router_id']) @@ -174,7 +160,7 @@ def test_create_ndp_proxy(self): 'name': self.getUniqueString(), 'router_id': self.ROT_ID, 'port_id': self.INT_PORT_ID, - 'address': self.INT_PORT_ADDRESS + 'address': self.INT_PORT_ADDRESS, } ] self._create_ndp_proxies(ndp_proxies) @@ -184,11 +170,13 @@ def test_ndp_proxy_list(self): 'name': self.getUniqueString(), 'router_id': self.ROT_ID, 'port_id': self.INT_PORT_ID, - 'address': self.INT_PORT_ADDRESS} + 'address': self.INT_PORT_ADDRESS, + } self._create_ndp_proxies([ndp_proxies]) ndp_proxy = self.openstack( 'router ndp proxy list', - parse_output=True,)[0] + parse_output=True, + )[0] self.assertEqual(ndp_proxies['name'], ndp_proxy['Name']) self.assertEqual(ndp_proxies['router_id'], ndp_proxy['Router ID']) self.assertEqual(ndp_proxies['address'], ndp_proxy['IP Address']) @@ -198,13 +186,14 @@ def test_ndp_proxy_set_and_show(self): 'name': self.getUniqueString(), 'router_id': self.ROT_ID, 'port_id': self.INT_PORT_ID, - 'address': self.INT_PORT_ADDRESS} + 'address': self.INT_PORT_ADDRESS, + } description = 'balala' self._create_ndp_proxies([ndp_proxies]) ndp_proxy_id = self.created_ndp_proxies[0]['id'] output = self.openstack( - 'router ndp proxy set --description %s %s' % ( - description, ndp_proxy_id)) + f'router ndp proxy set --description {description} {ndp_proxy_id}' + ) self.assertEqual('', output) json_output = self.openstack( 'router ndp proxy show ' + ndp_proxy_id, diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_policy.py b/openstackclient/tests/functional/network/v2/test_network_qos_policy.py index b603cf1f69..3f59ce35f2 100644 --- a/openstackclient/tests/functional/network/v2/test_network_qos_policy.py +++ b/openstackclient/tests/functional/network/v2/test_network_qos_policy.py @@ -22,30 +22,27 @@ class NetworkQosPolicyTests(common.NetworkTests): """Functional tests for QoS policy""" def setUp(self): - super(NetworkQosPolicyTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + + if not self.is_extension_enabled("qos"): + self.skipTest("No qos extension present") def test_qos_rule_create_delete(self): # This is to check the output of qos policy delete policy_name = uuid.uuid4().hex self.openstack('network qos policy create ' + policy_name) - raw_output = self.openstack( - 'network qos policy delete ' + - policy_name - ) + raw_output = self.openstack('network qos policy delete ' + policy_name) self.assertEqual('', raw_output) def test_qos_policy_list(self): policy_name = uuid.uuid4().hex json_output = self.openstack( - 'network qos policy create ' + - policy_name, + 'network qos policy create ' + policy_name, parse_output=True, ) - self.addCleanup(self.openstack, - 'network qos policy delete ' + policy_name) + self.addCleanup( + self.openstack, 'network qos policy delete ' + policy_name + ) self.assertEqual(policy_name, json_output['name']) json_output = self.openstack( @@ -57,36 +54,30 @@ def test_qos_policy_list(self): def test_qos_policy_set(self): policy_name = uuid.uuid4().hex json_output = self.openstack( - 'network qos policy create ' + - policy_name, + 'network qos policy create ' + policy_name, parse_output=True, ) - self.addCleanup(self.openstack, - 'network qos policy delete ' + policy_name) + self.addCleanup( + self.openstack, 'network qos policy delete ' + policy_name + ) self.assertEqual(policy_name, json_output['name']) - self.openstack( - 'network qos policy set ' + - '--share ' + - policy_name - ) + self.openstack('network qos policy set ' + '--share ' + policy_name) json_output = self.openstack( - 'network qos policy show ' + - policy_name, + 'network qos policy show ' + policy_name, parse_output=True, ) self.assertTrue(json_output['shared']) self.openstack( - 'network qos policy set ' + - '--no-share ' + - '--no-default ' + - policy_name + 'network qos policy set ' + + '--no-share ' + + '--no-default ' + + policy_name ) json_output = self.openstack( - 'network qos policy show ' + - policy_name, + 'network qos policy show ' + policy_name, parse_output=True, ) self.assertFalse(json_output['shared']) diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_rule.py b/openstackclient/tests/functional/network/v2/test_network_qos_rule.py index 0fe1854bc0..28c38e5a45 100644 --- a/openstackclient/tests/functional/network/v2/test_network_qos_rule.py +++ b/openstackclient/tests/functional/network/v2/test_network_qos_rule.py @@ -18,294 +18,293 @@ from openstackclient.tests.functional.network.v2 import common -class NetworkQosRuleTestsMinimumBandwidth(common.NetworkTests): +class NetworkQosTests(common.NetworkTests): + def setUp(self): + super().setUp() + + if not self.is_extension_enabled("qos"): + self.skipTest("No qos extension present") + + +class NetworkQosRuleTestsMinimumBandwidth(NetworkQosTests): """Functional tests for QoS minimum bandwidth rule""" def setUp(self): - super(NetworkQosRuleTestsMinimumBandwidth, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() - self.QOS_POLICY_NAME = 'qos_policy_%s' % uuid.uuid4().hex + self.QOS_POLICY_NAME = f'qos_policy_{uuid.uuid4().hex}' - self.openstack( - 'network qos policy create %s' % self.QOS_POLICY_NAME + self.openstack(f'network qos policy create {self.QOS_POLICY_NAME}') + self.addCleanup( + self.openstack, + f'network qos policy delete {self.QOS_POLICY_NAME}', ) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % self.QOS_POLICY_NAME) cmd_output = self.openstack( 'network qos rule create ' '--type minimum-bandwidth ' '--min-kbps 2800 ' - '--egress %s' % - self.QOS_POLICY_NAME, + f'--egress {self.QOS_POLICY_NAME}', parse_output=True, ) self.RULE_ID = cmd_output['id'] - self.addCleanup(self.openstack, - 'network qos rule delete %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.addCleanup( + self.openstack, + f'network qos rule delete {self.QOS_POLICY_NAME} {self.RULE_ID}', + ) self.assertTrue(self.RULE_ID) def test_qos_rule_create_delete(self): # This is to check the output of qos rule delete policy_name = uuid.uuid4().hex - self.openstack('network qos policy create %s' % policy_name) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % policy_name) + self.openstack(f'network qos policy create {policy_name}') + self.addCleanup( + self.openstack, f'network qos policy delete {policy_name}' + ) rule = self.openstack( 'network qos rule create ' '--type minimum-bandwidth ' '--min-kbps 2800 ' - '--egress %s' % policy_name, + f'--egress {policy_name}', parse_output=True, ) raw_output = self.openstack( - 'network qos rule delete %s %s' % - (policy_name, rule['id'])) + 'network qos rule delete {} {}'.format(policy_name, rule['id']) + ) self.assertEqual('', raw_output) def test_qos_rule_list(self): cmd_output = self.openstack( - 'network qos rule list %s' % self.QOS_POLICY_NAME, - parse_output=True,) + f'network qos rule list {self.QOS_POLICY_NAME}', + parse_output=True, + ) self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output]) def test_qos_rule_show(self): cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(self.RULE_ID, cmd_output['id']) def test_qos_rule_set(self): - self.openstack('network qos rule set --min-kbps 7500 %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.openstack( + f'network qos rule set --min-kbps 7500 {self.QOS_POLICY_NAME} {self.RULE_ID}' + ) cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(7500, cmd_output['min_kbps']) -class NetworkQosRuleTestsMinimumPacketRate(common.NetworkTests): +class NetworkQosRuleTestsMinimumPacketRate(NetworkQosTests): """Functional tests for QoS minimum packet rate rule""" def setUp(self): - super(NetworkQosRuleTestsMinimumPacketRate, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() - self.QOS_POLICY_NAME = 'qos_policy_%s' % uuid.uuid4().hex + self.QOS_POLICY_NAME = f'qos_policy_{uuid.uuid4().hex}' - self.openstack( - 'network qos policy create %s' % self.QOS_POLICY_NAME + self.openstack(f'network qos policy create {self.QOS_POLICY_NAME}') + self.addCleanup( + self.openstack, + f'network qos policy delete {self.QOS_POLICY_NAME}', ) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % self.QOS_POLICY_NAME) cmd_output = self.openstack( 'network qos rule create ' '--type minimum-packet-rate ' '--min-kpps 2800 ' - '--egress %s' % - self.QOS_POLICY_NAME, + f'--egress {self.QOS_POLICY_NAME}', parse_output=True, ) self.RULE_ID = cmd_output['id'] - self.addCleanup(self.openstack, - 'network qos rule delete %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.addCleanup( + self.openstack, + f'network qos rule delete {self.QOS_POLICY_NAME} {self.RULE_ID}', + ) self.assertTrue(self.RULE_ID) def test_qos_rule_create_delete(self): # This is to check the output of qos rule delete policy_name = uuid.uuid4().hex - self.openstack('network qos policy create %s' % policy_name) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % policy_name) + self.openstack(f'network qos policy create {policy_name}') + self.addCleanup( + self.openstack, f'network qos policy delete {policy_name}' + ) rule = self.openstack( 'network qos rule create ' '--type minimum-packet-rate ' '--min-kpps 2800 ' - '--egress %s' % policy_name, + f'--egress {policy_name}', parse_output=True, ) raw_output = self.openstack( - 'network qos rule delete %s %s' % - (policy_name, rule['id'])) + 'network qos rule delete {} {}'.format(policy_name, rule['id']) + ) self.assertEqual('', raw_output) def test_qos_rule_list(self): cmd_output = self.openstack( - 'network qos rule list %s' % self.QOS_POLICY_NAME, - parse_output=True,) + f'network qos rule list {self.QOS_POLICY_NAME}', + parse_output=True, + ) self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output]) def test_qos_rule_show(self): cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(self.RULE_ID, cmd_output['id']) def test_qos_rule_set(self): - self.openstack('network qos rule set --min-kpps 7500 --any %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.openstack( + f'network qos rule set --min-kpps 7500 --any {self.QOS_POLICY_NAME} {self.RULE_ID}' + ) cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(7500, cmd_output['min_kpps']) self.assertEqual('any', cmd_output['direction']) -class NetworkQosRuleTestsDSCPMarking(common.NetworkTests): +class NetworkQosRuleTestsDSCPMarking(NetworkQosTests): """Functional tests for QoS DSCP marking rule""" def setUp(self): - super(NetworkQosRuleTestsDSCPMarking, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() - self.QOS_POLICY_NAME = 'qos_policy_%s' % uuid.uuid4().hex - self.openstack( - 'network qos policy create %s' % self.QOS_POLICY_NAME + self.QOS_POLICY_NAME = f'qos_policy_{uuid.uuid4().hex}' + self.openstack(f'network qos policy create {self.QOS_POLICY_NAME}') + self.addCleanup( + self.openstack, + f'network qos policy delete {self.QOS_POLICY_NAME}', ) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % self.QOS_POLICY_NAME) cmd_output = self.openstack( 'network qos rule create ' '--type dscp-marking ' - '--dscp-mark 8 %s' % - self.QOS_POLICY_NAME, + f'--dscp-mark 8 {self.QOS_POLICY_NAME}', parse_output=True, ) self.RULE_ID = cmd_output['id'] - self.addCleanup(self.openstack, - 'network qos rule delete %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.addCleanup( + self.openstack, + f'network qos rule delete {self.QOS_POLICY_NAME} {self.RULE_ID}', + ) self.assertTrue(self.RULE_ID) def test_qos_rule_create_delete(self): # This is to check the output of qos rule delete policy_name = uuid.uuid4().hex - self.openstack('network qos policy create %s' % policy_name) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % policy_name) + self.openstack(f'network qos policy create {policy_name}') + self.addCleanup( + self.openstack, f'network qos policy delete {policy_name}' + ) rule = self.openstack( 'network qos rule create ' '--type dscp-marking ' - '--dscp-mark 8 %s' % policy_name, + f'--dscp-mark 8 {policy_name}', parse_output=True, ) raw_output = self.openstack( - 'network qos rule delete %s %s' % - (policy_name, rule['id'])) + 'network qos rule delete {} {}'.format(policy_name, rule['id']) + ) self.assertEqual('', raw_output) def test_qos_rule_list(self): cmd_output = self.openstack( - 'network qos rule list %s' % self.QOS_POLICY_NAME, - parse_output=True,) + f'network qos rule list {self.QOS_POLICY_NAME}', + parse_output=True, + ) self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output]) def test_qos_rule_show(self): cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(self.RULE_ID, cmd_output['id']) def test_qos_rule_set(self): - self.openstack('network qos rule set --dscp-mark 32 %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.openstack( + f'network qos rule set --dscp-mark 32 {self.QOS_POLICY_NAME} {self.RULE_ID}' + ) cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(32, cmd_output['dscp_mark']) -class NetworkQosRuleTestsBandwidthLimit(common.NetworkTests): +class NetworkQosRuleTestsBandwidthLimit(NetworkQosTests): """Functional tests for QoS bandwidth limit rule""" def setUp(self): - super(NetworkQosRuleTestsBandwidthLimit, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() - self.QOS_POLICY_NAME = 'qos_policy_%s' % uuid.uuid4().hex - self.openstack( - 'network qos policy create %s' % self.QOS_POLICY_NAME + self.QOS_POLICY_NAME = f'qos_policy_{uuid.uuid4().hex}' + self.openstack(f'network qos policy create {self.QOS_POLICY_NAME}') + self.addCleanup( + self.openstack, + f'network qos policy delete {self.QOS_POLICY_NAME}', ) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % self.QOS_POLICY_NAME) cmd_output = self.openstack( 'network qos rule create ' '--type bandwidth-limit ' '--max-kbps 10000 ' - '--egress %s' % - self.QOS_POLICY_NAME, + f'--egress {self.QOS_POLICY_NAME}', parse_output=True, ) self.RULE_ID = cmd_output['id'] - self.addCleanup(self.openstack, - 'network qos rule delete %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.addCleanup( + self.openstack, + f'network qos rule delete {self.QOS_POLICY_NAME} {self.RULE_ID}', + ) self.assertTrue(self.RULE_ID) def test_qos_rule_create_delete(self): # This is to check the output of qos rule delete policy_name = uuid.uuid4().hex - self.openstack('network qos policy create %s' % policy_name) - self.addCleanup(self.openstack, - 'network qos policy delete %s' % policy_name) + self.openstack(f'network qos policy create {policy_name}') + self.addCleanup( + self.openstack, f'network qos policy delete {policy_name}' + ) rule = self.openstack( 'network qos rule create ' '--type bandwidth-limit ' '--max-kbps 10000 ' '--max-burst-kbits 1400 ' - '--egress %s' % policy_name, + f'--egress {policy_name}', parse_output=True, ) raw_output = self.openstack( - 'network qos rule delete %s %s' % - (policy_name, rule['id'])) + 'network qos rule delete {} {}'.format(policy_name, rule['id']) + ) self.assertEqual('', raw_output) def test_qos_rule_list(self): cmd_output = self.openstack( - 'network qos rule list %s' % - self.QOS_POLICY_NAME, - parse_output=True,) + f'network qos rule list {self.QOS_POLICY_NAME}', + parse_output=True, + ) self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output]) def test_qos_rule_show(self): cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(self.RULE_ID, cmd_output['id']) def test_qos_rule_set(self): - self.openstack('network qos rule set --max-kbps 15000 ' - '--max-burst-kbits 1800 ' - '--ingress %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID)) + self.openstack( + 'network qos rule set --max-kbps 15000 ' + '--max-burst-kbits 1800 ' + f'--ingress {self.QOS_POLICY_NAME} {self.RULE_ID}' + ) cmd_output = self.openstack( - 'network qos rule show %s %s' % - (self.QOS_POLICY_NAME, self.RULE_ID), + f'network qos rule show {self.QOS_POLICY_NAME} {self.RULE_ID}', parse_output=True, ) self.assertEqual(15000, cmd_output['max_kbps']) diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py index 4ead65cc29..745191b8cb 100644 --- a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py +++ b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py @@ -17,23 +17,23 @@ class NetworkQosRuleTypeTests(common.NetworkTests): - """Functional tests for Network QoS rule type. """ + """Functional tests for Network QoS rule type.""" - AVAILABLE_RULE_TYPES = ['dscp_marking', - 'bandwidth_limit'] + AVAILABLE_RULE_TYPES = ['dscp_marking', 'bandwidth_limit'] # NOTE(ralonsoh): this list was updated in Yoga (February 2022) - ALL_AVAILABLE_RULE_TYPES = ['dscp_marking', - 'bandwidth_limit', - 'minimum_bandwidth', - 'packet_rate_limit', - 'minimum_packet_rate', - ] + ALL_AVAILABLE_RULE_TYPES = [ + 'dscp_marking', + 'bandwidth_limit', + 'minimum_bandwidth', + 'packet_rate_limit', + 'minimum_packet_rate', + ] def setUp(self): - super(NetworkQosRuleTypeTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + + if not self.is_extension_enabled("qos"): + self.skipTest("No qos extension present") def test_qos_rule_type_list(self): cmd_output = self.openstack( @@ -49,7 +49,7 @@ def test_qos_rule_type_list_all_supported(self): cmd_output = self.openstack( 'network qos rule type list --all-supported -f json', - parse_output=True + parse_output=True, ) for rule_type in self.AVAILABLE_RULE_TYPES: self.assertIn(rule_type, [x['Type'] for x in cmd_output]) @@ -59,8 +59,7 @@ def test_qos_rule_type_list_all_rules(self): self.skipTest('No "qos-rule-type-filter" extension present') cmd_output = self.openstack( - 'network qos rule type list --all-rules -f json', - parse_output=True + 'network qos rule type list --all-rules -f json', parse_output=True ) for rule_type in self.ALL_AVAILABLE_RULE_TYPES: self.assertIn(rule_type, [x['Type'] for x in cmd_output]) @@ -68,7 +67,7 @@ def test_qos_rule_type_list_all_rules(self): def test_qos_rule_type_details(self): for rule_type in self.AVAILABLE_RULE_TYPES: cmd_output = self.openstack( - 'network qos rule type show %s -f json' % rule_type, + f'network qos rule type show {rule_type} -f json', parse_output=True, ) self.assertEqual(rule_type, cmd_output['rule_type_name']) diff --git a/openstackclient/tests/functional/network/v2/test_network_rbac.py b/openstackclient/tests/functional/network/v2/test_network_rbac.py index cb66759afc..d09b6e9b4e 100644 --- a/openstackclient/tests/functional/network/v2/test_network_rbac.py +++ b/openstackclient/tests/functional/network/v2/test_network_rbac.py @@ -17,16 +17,14 @@ class NetworkRBACTests(common.NetworkTests): """Functional tests for network rbac""" - OBJECT_ID = None - ID = None + + OBJECT_ID: str + ID: str HEADERS = ['ID'] FIELDS = ['id'] def setUp(self): - super(NetworkRBACTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() self.NET_NAME = uuid.uuid4().hex self.PROJECT_NAME = uuid.uuid4().hex @@ -35,20 +33,20 @@ def setUp(self): 'network create ' + self.NET_NAME, parse_output=True, ) - self.addCleanup(self.openstack, - 'network delete ' + cmd_output['id']) + self.addCleanup(self.openstack, 'network delete ' + cmd_output['id']) self.OBJECT_ID = cmd_output['id'] cmd_output = self.openstack( - 'network rbac create ' + - self.OBJECT_ID + - ' --action access_as_shared' + - ' --target-project admin' + - ' --type network', + 'network rbac create ' + + self.OBJECT_ID + + ' --action access_as_shared' + + ' --target-project admin' + + ' --type network', parse_output=True, ) - self.addCleanup(self.openstack, - 'network rbac delete ' + cmd_output['id']) + self.addCleanup( + self.openstack, 'network rbac delete ' + cmd_output['id'] + ) self.ID = cmd_output['id'] self.assertEqual(self.OBJECT_ID, cmd_output['object_id']) @@ -59,20 +57,27 @@ def test_network_rbac_list(self): def test_network_rbac_show(self): cmd_output = self.openstack( 'network rbac show ' + self.ID, - parse_output=True,) + parse_output=True, + ) self.assertEqual(self.ID, cmd_output['id']) def test_network_rbac_set(self): project_id = self.openstack( 'project create ' + self.PROJECT_NAME, - parse_output=True,)['id'] - self.openstack('network rbac set ' + self.ID + - ' --target-project ' + self.PROJECT_NAME) + parse_output=True, + )['id'] + self.openstack( + 'network rbac set ' + + self.ID + + ' --target-project ' + + self.PROJECT_NAME + ) cmd_output_rbac = self.openstack( 'network rbac show ' + self.ID, parse_output=True, ) self.assertEqual(project_id, cmd_output_rbac['target_project_id']) raw_output_project = self.openstack( - 'project delete ' + self.PROJECT_NAME) + 'project delete ' + self.PROJECT_NAME + ) self.assertEqual('', raw_output_project) diff --git a/openstackclient/tests/functional/network/v2/test_network_segment.py b/openstackclient/tests/functional/network/v2/test_network_segment.py index 111c4dc3f8..03f5daf743 100644 --- a/openstackclient/tests/functional/network/v2/test_network_segment.py +++ b/openstackclient/tests/functional/network/v2/test_network_segment.py @@ -20,14 +20,15 @@ class NetworkSegmentTests(common.NetworkTests): @classmethod def setUpClass(cls): - common.NetworkTests.setUpClass() + super().setUpClass() if cls.haz_network: cls.NETWORK_NAME = uuid.uuid4().hex cls.PHYSICAL_NETWORK_NAME = uuid.uuid4().hex # Create a network for the all subnet tests cmd_output = cls.openstack( - 'network create ' + cls.NETWORK_NAME, parse_output=True, + 'network create ' + cls.NETWORK_NAME, + parse_output=True, ) # Get network_id for assertEqual cls.NETWORK_ID = cmd_output["id"] @@ -37,27 +38,28 @@ def tearDownClass(cls): try: if cls.haz_network: raw_output = cls.openstack( - 'network delete ' + - cls.NETWORK_NAME + 'network delete ' + cls.NETWORK_NAME ) cls.assertOutput('', raw_output) finally: - super(NetworkSegmentTests, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(NetworkSegmentTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + + if not self.is_extension_enabled("segment"): + self.skipTest("No segment extension present") def test_network_segment_create_delete(self): name = uuid.uuid4().hex json_output = self.openstack( - ' network segment create ' + - '--network ' + self.NETWORK_ID + ' ' + - '--network-type geneve ' + - '--segment 2055 ' + - name, + ' network segment create ' + + '--network ' + + self.NETWORK_ID + + ' ' + + '--network-type geneve ' + + '--segment 2055 ' + + name, parse_output=True, ) self.assertEqual( @@ -73,18 +75,19 @@ def test_network_segment_create_delete(self): def test_network_segment_list(self): name = uuid.uuid4().hex json_output = self.openstack( - ' network segment create ' + - '--network ' + self.NETWORK_ID + ' ' + - '--network-type geneve ' + - '--segment 2055 ' + - name, + ' network segment create ' + + '--network ' + + self.NETWORK_ID + + ' ' + + '--network-type geneve ' + + '--segment 2055 ' + + name, parse_output=True, ) network_segment_id = json_output.get('id') network_segment_name = json_output.get('name') self.addCleanup( - self.openstack, - 'network segment delete ' + network_segment_id + self.openstack, 'network segment delete ' + network_segment_id ) self.assertEqual( name, @@ -95,33 +98,25 @@ def test_network_segment_list(self): 'network segment list', parse_output=True, ) - item_map = { - item.get('ID'): item.get('Name') for item in json_output - } + item_map = {item.get('ID'): item.get('Name') for item in json_output} self.assertIn(network_segment_id, item_map.keys()) self.assertIn(network_segment_name, item_map.values()) def test_network_segment_set_show(self): name = uuid.uuid4().hex json_output = self.openstack( - ' network segment create ' + - '--network ' + self.NETWORK_ID + ' ' + - '--network-type geneve ' + - '--segment 2055 ' + - name, + ' network segment create ' + + '--network ' + + self.NETWORK_ID + + ' ' + + '--network-type geneve ' + + '--segment 2055 ' + + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'network segment delete ' + name - ) + self.addCleanup(self.openstack, 'network segment delete ' + name) - extension_output = self.openstack( - "extension list ", - parse_output=True, - ) - ext_alias = [x["Alias"] for x in extension_output] - if "standard-attr-segment" in ext_alias: + if self.is_extension_enabled('standard-attr-segment'): self.assertEqual( '', json_output["description"], @@ -133,15 +128,16 @@ def test_network_segment_set_show(self): new_description = 'new_description' cmd_output = self.openstack( - 'network segment set ' + - '--description ' + new_description + ' ' + - name + 'network segment set ' + + '--description ' + + new_description + + ' ' + + name ) self.assertOutput('', cmd_output) json_output = self.openstack( - 'network segment show ' + - name, + 'network segment show ' + name, parse_output=True, ) self.assertEqual( diff --git a/openstackclient/tests/functional/network/v2/test_network_segment_range.py b/openstackclient/tests/functional/network/v2/test_network_segment_range.py index 5cdf581241..3f07948b5c 100644 --- a/openstackclient/tests/functional/network/v2/test_network_segment_range.py +++ b/openstackclient/tests/functional/network/v2/test_network_segment_range.py @@ -23,28 +23,30 @@ class NetworkSegmentRangeTests(common.NetworkTests): """Functional tests for network segment range""" def setUp(self): - super(NetworkSegmentRangeTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + if not self.is_extension_enabled('network-segment-range'): self.skipTest("No network-segment-range extension present") + self.PROJECT_NAME = uuid.uuid4().hex def test_network_segment_range_create_delete(self): # Make a project project_id = self.openstack( 'project create ' + self.PROJECT_NAME, - parse_output=True,)['id'] + parse_output=True, + )['id'] name = uuid.uuid4().hex json_output = self.openstack( - ' network segment range create ' + - '--private ' + - "--project " + self.PROJECT_NAME + " " + - '--network-type vxlan ' + - '--minimum 2005 ' + - '--maximum 2009 ' + - name, + ' network segment range create ' + + '--private ' + + "--project " + + self.PROJECT_NAME + + " " + + '--network-type vxlan ' + + '--minimum 2005 ' + + '--maximum 2009 ' + + name, parse_output=True, ) self.assertEqual( @@ -61,25 +63,26 @@ def test_network_segment_range_create_delete(self): ) self.assertOutput('', raw_output) raw_output_project = self.openstack( - 'project delete ' + self.PROJECT_NAME) + 'project delete ' + self.PROJECT_NAME + ) self.assertEqual('', raw_output_project) def test_network_segment_range_list(self): name = uuid.uuid4().hex json_output = self.openstack( - ' network segment range create ' + - '--shared ' + - '--network-type geneve ' + - '--minimum 2013 ' + - '--maximum 2017 ' + - name, + ' network segment range create ' + + '--shared ' + + '--network-type geneve ' + + '--minimum 2013 ' + + '--maximum 2017 ' + + name, parse_output=True, ) network_segment_range_id = json_output.get('id') network_segment_range_name = json_output.get('name') self.addCleanup( self.openstack, - 'network segment range delete ' + network_segment_range_id + 'network segment range delete ' + network_segment_range_id, ) self.assertEqual( name, @@ -90,31 +93,29 @@ def test_network_segment_range_list(self): 'network segment range list', parse_output=True, ) - item_map = { - item.get('ID'): item.get('Name') for item in json_output - } + item_map = {item.get('ID'): item.get('Name') for item in json_output} self.assertIn(network_segment_range_id, item_map.keys()) self.assertIn(network_segment_range_name, item_map.values()) def test_network_segment_range_set_show(self): project_id = self.openstack( 'project create ' + self.PROJECT_NAME, - parse_output=True,)['id'] + parse_output=True, + )['id'] name = uuid.uuid4().hex json_output = self.openstack( - ' network segment range create ' + - '--private ' + - "--project " + self.PROJECT_NAME + " " + - '--network-type geneve ' + - '--minimum 2021 ' + - '--maximum 2025 ' + - name, + ' network segment range create ' + + '--private ' + + "--project " + + self.PROJECT_NAME + + " " + + '--network-type geneve ' + + '--minimum 2021 ' + + '--maximum 2025 ' + + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'network segment range delete ' + name - ) + self.addCleanup(self.openstack, 'network segment range delete ' + name) self.assertEqual( name, json_output["name"], @@ -127,14 +128,13 @@ def test_network_segment_range_set_show(self): new_minimum = 2020 new_maximum = 2029 cmd_output = self.openstack( - 'network segment range set --minimum {min} --maximum {max} {name}' - .format(min=new_minimum, max=new_maximum, name=name) + f'network segment range set --minimum {new_minimum} --maximum {new_maximum} ' + f'{name}' ) self.assertOutput('', cmd_output) json_output = self.openstack( - 'network segment range show ' + - name, + 'network segment range show ' + name, parse_output=True, ) self.assertEqual( @@ -147,5 +147,6 @@ def test_network_segment_range_set_show(self): ) raw_output_project = self.openstack( - 'project delete ' + self.PROJECT_NAME) + 'project delete ' + self.PROJECT_NAME + ) self.assertEqual('', raw_output_project) diff --git a/openstackclient/tests/functional/network/v2/test_network_service_provider.py b/openstackclient/tests/functional/network/v2/test_network_service_provider.py index 9d513564d5..9be2827b1a 100644 --- a/openstackclient/tests/functional/network/v2/test_network_service_provider.py +++ b/openstackclient/tests/functional/network/v2/test_network_service_provider.py @@ -20,10 +20,8 @@ class TestNetworkServiceProvider(common.NetworkTests): """Functional tests for network service provider""" def setUp(self): - super(TestNetworkServiceProvider, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() + # NOTE(slaweq): # that tests should works only when "standard" Neutron L3 agent is # used, as e.g. OVN L3 plugin don't supports that. @@ -37,5 +35,6 @@ def setUp(self): def test_network_service_provider_list(self): cmd_output = self.openstack( 'network service provider list', - parse_output=True,) + parse_output=True, + ) self.assertIn('L3_ROUTER_NAT', [x['Service Type'] for x in cmd_output]) diff --git a/openstackclient/tests/functional/network/v2/test_network_trunk.py b/openstackclient/tests/functional/network/v2/test_network_trunk.py index bbb77a0d61..1621973596 100644 --- a/openstackclient/tests/functional/network/v2/test_network_trunk.py +++ b/openstackclient/tests/functional/network/v2/test_network_trunk.py @@ -23,127 +23,126 @@ class NetworkTrunkTests(common.NetworkTests): def setUp(self): super().setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + + if not self.is_extension_enabled("trunk"): + self.skipTest("No trunk extension present") network_name = uuid.uuid4().hex subnet_name = uuid.uuid4().hex self.parent_port_name = uuid.uuid4().hex self.sub_port_name = uuid.uuid4().hex - self.openstack('network create %s' % network_name) - self.addCleanup(self.openstack, 'network delete %s' % network_name) + self.openstack(f'network create {network_name}') + self.addCleanup(self.openstack, f'network delete {network_name}') self.openstack( - 'subnet create %s ' - '--network %s --subnet-range 10.0.0.0/24' % ( - subnet_name, network_name)) - self.openstack('port create %s --network %s' % - (self.parent_port_name, network_name)) - self.addCleanup(self.openstack, 'port delete %s' % - self.parent_port_name) - json_out = self.openstack('port create %s --network %s -f json' % - (self.sub_port_name, network_name)) + f'subnet create {subnet_name} ' + f'--network {network_name} --subnet-range 10.0.0.0/24' + ) + self.openstack( + f'port create {self.parent_port_name} --network {network_name}' + ) + self.addCleanup(self.openstack, f'port delete {self.parent_port_name}') + json_out = self.openstack( + f'port create {self.sub_port_name} --network {network_name} -f json' + ) self.sub_port_id = json.loads(json_out)['id'] - self.addCleanup(self.openstack, 'port delete %s' % self.sub_port_name) + self.addCleanup(self.openstack, f'port delete {self.sub_port_name}') def test_network_trunk_create_delete(self): trunk_name = uuid.uuid4().hex - self.openstack('network trunk create %s --parent-port %s -f json ' % - (trunk_name, self.parent_port_name)) - raw_output = self.openstack( - 'network trunk delete ' + - trunk_name + self.openstack( + f'network trunk create {trunk_name} --parent-port {self.parent_port_name} -f json ' ) + raw_output = self.openstack('network trunk delete ' + trunk_name) self.assertEqual('', raw_output) def test_network_trunk_list(self): trunk_name = uuid.uuid4().hex - json_output = json.loads(self.openstack( - 'network trunk create %s --parent-port %s -f json ' % - (trunk_name, self.parent_port_name))) - self.addCleanup(self.openstack, - 'network trunk delete ' + trunk_name) + json_output = json.loads( + self.openstack( + f'network trunk create {trunk_name} --parent-port {self.parent_port_name} -f json ' + ) + ) + self.addCleanup(self.openstack, 'network trunk delete ' + trunk_name) self.assertEqual(trunk_name, json_output['name']) - json_output = json.loads(self.openstack( - 'network trunk list -f json' - )) + json_output = json.loads(self.openstack('network trunk list -f json')) self.assertIn(trunk_name, [tr['Name'] for tr in json_output]) def test_network_trunk_set_unset(self): trunk_name = uuid.uuid4().hex - json_output = json.loads(self.openstack( - 'network trunk create %s --parent-port %s -f json ' % - (trunk_name, self.parent_port_name))) - self.addCleanup(self.openstack, - 'network trunk delete ' + trunk_name) + json_output = json.loads( + self.openstack( + f'network trunk create {trunk_name} --parent-port {self.parent_port_name} -f json ' + ) + ) + self.addCleanup(self.openstack, 'network trunk delete ' + trunk_name) self.assertEqual(trunk_name, json_output['name']) - self.openstack( - 'network trunk set ' - '--enable ' + - trunk_name - ) + self.openstack('network trunk set --enable ' + trunk_name) - json_output = json.loads(self.openstack( - 'network trunk show -f json ' + - trunk_name - )) + json_output = json.loads( + self.openstack('network trunk show -f json ' + trunk_name) + ) self.assertTrue(json_output['is_admin_state_up']) # Add subport to trunk self.openstack( - 'network trunk set ' + - '--subport port=%s,segmentation-type=vlan,segmentation-id=42 ' % - (self.sub_port_name) + - trunk_name + 'network trunk set ' + + f'--subport port={self.sub_port_name},segmentation-type=vlan,segmentation-id=42 ' + + trunk_name + ) + json_output = json.loads( + self.openstack('network trunk show -f json ' + trunk_name) ) - json_output = json.loads(self.openstack( - 'network trunk show -f json ' + - trunk_name - )) self.assertEqual( - [{ - 'port_id': self.sub_port_id, - 'segmentation_id': 42, - 'segmentation_type': 'vlan' - }], - json_output['sub_ports']) + [ + { + 'port_id': self.sub_port_id, + 'segmentation_id': 42, + 'segmentation_type': 'vlan', + } + ], + json_output['sub_ports'], + ) # Remove subport from trunk self.openstack( - 'network trunk unset ' + - trunk_name + - ' --subport ' + - self.sub_port_name + 'network trunk unset ' + + trunk_name + + ' --subport ' + + self.sub_port_name ) - json_output = json.loads(self.openstack( - 'network trunk show -f json ' + - trunk_name - )) - self.assertEqual( - [], - json_output['sub_ports']) + json_output = json.loads( + self.openstack('network trunk show -f json ' + trunk_name) + ) + self.assertEqual([], json_output['sub_ports']) def test_network_trunk_list_subports(self): trunk_name = uuid.uuid4().hex - json_output = json.loads(self.openstack( - 'network trunk create %s --parent-port %s ' - '--subport port=%s,segmentation-type=vlan,segmentation-id=42 ' - '-f json ' % - (trunk_name, self.parent_port_name, self.sub_port_name))) - self.addCleanup(self.openstack, - 'network trunk delete ' + trunk_name) + json_output = json.loads( + self.openstack( + f'network trunk create {trunk_name} --parent-port {self.parent_port_name} ' + f'--subport port={self.sub_port_name},segmentation-type=vlan,segmentation-id=42 ' + '-f json ' + ) + ) + self.addCleanup(self.openstack, 'network trunk delete ' + trunk_name) self.assertEqual(trunk_name, json_output['name']) - json_output = json.loads(self.openstack( - 'network subport list --trunk %s -f json' % trunk_name)) + json_output = json.loads( + self.openstack( + f'network subport list --trunk {trunk_name} -f json' + ) + ) self.assertEqual( - [{ - 'Port': self.sub_port_id, - 'Segmentation ID': 42, - 'Segmentation Type': 'vlan' - }], - json_output) + [ + { + 'Port': self.sub_port_id, + 'Segmentation ID': 42, + 'Segmentation Type': 'vlan', + } + ], + json_output, + ) diff --git a/openstackclient/tests/functional/network/v2/test_port.py b/openstackclient/tests/functional/network/v2/test_port.py index f5bc9c4ae5..7f1336801f 100644 --- a/openstackclient/tests/functional/network/v2/test_port.py +++ b/openstackclient/tests/functional/network/v2/test_port.py @@ -12,6 +12,8 @@ import uuid +from tempest.lib import exceptions as tempest_exc + from openstackclient.tests.functional.network.v2 import common @@ -25,38 +27,29 @@ class PortTests(common.NetworkTagTests): @classmethod def setUpClass(cls): - common.NetworkTests.setUpClass() + super().setUpClass() if cls.haz_network: cls.NAME = uuid.uuid4().hex cls.NETWORK_NAME = uuid.uuid4().hex # Create a network for the port tests - cls.openstack( - 'network create %s' % cls.NETWORK_NAME - ) + cls.openstack(f'network create {cls.NETWORK_NAME}') @classmethod def tearDownClass(cls): try: if cls.haz_network: raw_output = cls.openstack( - 'network delete %s' % cls.NETWORK_NAME + f'network delete {cls.NETWORK_NAME}' ) cls.assertOutput('', raw_output) finally: - super(PortTests, cls).tearDownClass() - - def setUp(self): - super(PortTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().tearDownClass() def test_port_delete(self): """Test create, delete multiple""" json_output = self.openstack( - 'port create --network %s %s' % - (self.NETWORK_NAME, self.NAME), + f'port create --network {self.NETWORK_NAME} {self.NAME}', parse_output=True, ) id1 = json_output.get('id') @@ -65,8 +58,7 @@ def test_port_delete(self): self.assertEqual(self.NAME, json_output.get('name')) json_output = self.openstack( - 'port create --network %s %sx' % - (self.NETWORK_NAME, self.NAME), + f'port create --network {self.NETWORK_NAME} {self.NAME}x', parse_output=True, ) id2 = json_output.get('id') @@ -75,33 +67,41 @@ def test_port_delete(self): self.assertEqual(self.NAME + 'x', json_output.get('name')) # Clean up after ourselves - raw_output = self.openstack('port delete %s %s' % (id1, id2)) + raw_output = self.openstack(f'port delete {id1} {id2}') self.assertOutput('', raw_output) def test_port_list(self): """Test create defaults, list, delete""" json_output = self.openstack( - 'port create --network %s %s' % - (self.NETWORK_NAME, self.NAME), + f'port create --network {self.NETWORK_NAME} {self.NAME}', parse_output=True, ) id1 = json_output.get('id') self.assertIsNotNone(id1) mac1 = json_output.get('mac_address') self.assertIsNotNone(mac1) - self.addCleanup(self.openstack, 'port delete %s' % id1) + self.addCleanup(self.openstack, f'port delete {id1}') self.assertEqual(self.NAME, json_output.get('name')) + # sg for port2 + sg_name1 = uuid.uuid4().hex + json_output = self.openstack( + f'security group create {sg_name1}', + parse_output=True, + ) + sg_id1 = json_output.get('id') + self.addCleanup(self.openstack, f'security group delete {sg_id1}') json_output = self.openstack( - 'port create --network %s %sx' % - (self.NETWORK_NAME, self.NAME), + f'port create --network {self.NETWORK_NAME} ' + f'--security-group {sg_name1} {self.NAME}x', parse_output=True, ) + id2 = json_output.get('id') self.assertIsNotNone(id2) mac2 = json_output.get('mac_address') self.assertIsNotNone(mac2) - self.addCleanup(self.openstack, 'port delete %s' % id2) + self.addCleanup(self.openstack, f'port delete {id2}') self.assertEqual(self.NAME + 'x', json_output.get('name')) # Test list @@ -109,8 +109,9 @@ def test_port_list(self): 'port list', parse_output=True, ) - item_map = {item.get('ID'): item.get('MAC Address') for item in - json_output} + item_map = { + item.get('ID'): item.get('MAC Address') for item in json_output + } self.assertIn(id1, item_map.keys()) self.assertIn(id2, item_map.keys()) self.assertIn(mac1, item_map.values()) @@ -124,19 +125,37 @@ def test_port_list(self): id_list = [item.get('ID') for item in json_output] self.assertIn(id1, id_list) self.assertIn(id2, id_list) + item_sg_map = { + item.get('ID'): item.get('Security Groups') for item in json_output + } + self.assertIn(id1, item_sg_map.keys()) + self.assertIn(id2, item_sg_map.keys()) + self.assertIn([sg_id1], item_sg_map.values()) # Test list --mac-address json_output = self.openstack( - 'port list --mac-address %s' % mac2, + f'port list --mac-address {mac2}', parse_output=True, ) - item_map = {item.get('ID'): item.get('MAC Address') for item in - json_output} + item_map = { + item.get('ID'): item.get('MAC Address') for item in json_output + } self.assertNotIn(id1, item_map.keys()) self.assertIn(id2, item_map.keys()) self.assertNotIn(mac1, item_map.values()) self.assertIn(mac2, item_map.values()) + # Test list --security-group + json_output = self.openstack( + f'port list --security-group {sg_id1}', + parse_output=True, + ) + item_map = { + item.get('ID'): item.get('Security Groups') for item in json_output + } + self.assertNotIn(id1, item_map.keys()) + self.assertIn(id2, item_map.keys()) + # Test list with unknown fields json_output = self.openstack( 'port list -c ID -c Name -c device_id', @@ -145,34 +164,38 @@ def test_port_list(self): id_list = [p['ID'] for p in json_output] self.assertIn(id1, id_list) self.assertIn(id2, id_list) - # Check an unknown field exists - self.assertIn('device_id', json_output[0]) + # Check an unknown field does not exist + self.assertNotIn('device_id', json_output[0]) + + # Test list with only unknown fields + exc = self.assertRaises( + tempest_exc.CommandFailed, + self.openstack, + 'port list -c device_id', + ) + self.assertIn("No recognized column names in ['device_id']", str(exc)) def test_port_set(self): """Test create, set, show, delete""" name = uuid.uuid4().hex json_output = self.openstack( 'port create ' - '--network %s ' + f'--network {self.NETWORK_NAME} ' '--description xyzpdq ' - '--disable %s' % - (self.NETWORK_NAME, name), + f'--disable {name}', parse_output=True, ) id1 = json_output.get('id') - self.addCleanup(self.openstack, 'port delete %s' % id1) + self.addCleanup(self.openstack, f'port delete {id1}') self.assertEqual(name, json_output.get('name')) self.assertEqual('xyzpdq', json_output.get('description')) self.assertEqual(False, json_output.get('admin_state_up')) - raw_output = self.openstack( - 'port set --enable %s' % - name - ) + raw_output = self.openstack(f'port set --enable {name}') self.assertOutput('', raw_output) json_output = self.openstack( - 'port show %s' % name, + f'port show {name}', parse_output=True, ) sg_id = json_output.get('security_group_ids')[0] @@ -183,11 +206,12 @@ def test_port_set(self): self.assertIsNotNone(json_output.get('mac_address')) raw_output = self.openstack( - 'port unset --security-group %s %s' % (sg_id, id1)) + f'port unset --security-group {sg_id} {id1}' + ) self.assertOutput('', raw_output) json_output = self.openstack( - 'port show %s' % name, + f'port show {name}', parse_output=True, ) self.assertEqual([], json_output.get('security_group_ids')) @@ -195,20 +219,19 @@ def test_port_set(self): def test_port_admin_set(self): """Test create, set (as admin), show, delete""" json_output = self.openstack( - 'port create ' - '--network %s %s' % (self.NETWORK_NAME, self.NAME), + f'port create --network {self.NETWORK_NAME} {self.NAME}', parse_output=True, ) id_ = json_output.get('id') - self.addCleanup(self.openstack, 'port delete %s' % id_) + self.addCleanup(self.openstack, f'port delete {id_}') raw_output = self.openstack( '--os-username admin ' - 'port set --mac-address 11:22:33:44:55:66 %s' % - self.NAME) + f'port set --mac-address 11:22:33:44:55:66 {self.NAME}' + ) self.assertOutput('', raw_output) json_output = self.openstack( - 'port show %s' % self.NAME, + f'port show {self.NAME}', parse_output=True, ) self.assertEqual(json_output.get('mac_address'), '11:22:33:44:55:66') @@ -217,68 +240,141 @@ def test_port_set_sg(self): """Test create, set, show, delete""" sg_name1 = uuid.uuid4().hex json_output = self.openstack( - 'security group create %s' % - sg_name1, + f'security group create {sg_name1}', parse_output=True, ) sg_id1 = json_output.get('id') - self.addCleanup(self.openstack, 'security group delete %s' % sg_id1) + self.addCleanup(self.openstack, f'security group delete {sg_id1}') sg_name2 = uuid.uuid4().hex json_output = self.openstack( - 'security group create %s' % - sg_name2, + f'security group create {sg_name2}', parse_output=True, ) sg_id2 = json_output.get('id') - self.addCleanup(self.openstack, 'security group delete %s' % sg_id2) + self.addCleanup(self.openstack, f'security group delete {sg_id2}') name = uuid.uuid4().hex json_output = self.openstack( 'port create ' - '--network %s ' - '--security-group %s %s' % - (self.NETWORK_NAME, sg_name1, name), + f'--network {self.NETWORK_NAME} ' + f'--security-group {sg_name1} {name}', parse_output=True, ) id1 = json_output.get('id') - self.addCleanup(self.openstack, 'port delete %s' % id1) + self.addCleanup(self.openstack, f'port delete {id1}') self.assertEqual(name, json_output.get('name')) self.assertEqual([sg_id1], json_output.get('security_group_ids')) raw_output = self.openstack( - 'port set ' - '--security-group %s %s' % - (sg_name2, name) + f'port set --security-group {sg_name2} {name}' ) self.assertOutput('', raw_output) json_output = self.openstack( - 'port show %s' % name, + f'port show {name}', parse_output=True, ) self.assertEqual(name, json_output.get('name')) # NOTE(amotoki): The order of the field is not predictable, self.assertIsInstance(json_output.get('security_group_ids'), list) - self.assertEqual(sorted([sg_id1, sg_id2]), - sorted(json_output.get('security_group_ids'))) + self.assertEqual( + sorted([sg_id1, sg_id2]), + sorted(json_output.get('security_group_ids')), + ) raw_output = self.openstack( - 'port unset --security-group %s %s' % (sg_id1, id1)) + f'port unset --security-group {sg_id1} {id1}' + ) self.assertOutput('', raw_output) json_output = self.openstack( - 'port show %s' % name, + f'port show {name}', parse_output=True, ) - self.assertEqual( - [sg_id2], - json_output.get('security_group_ids') - ) + self.assertEqual([sg_id2], json_output.get('security_group_ids')) def _create_resource_for_tag_test(self, name, args): return self.openstack( - '{} create --network {} {} {}' - .format(self.base_command, self.NETWORK_NAME, args, name), + f'{self.base_command} create --network {self.NETWORK_NAME} {args} {name}', + parse_output=True, + ) + + def _trunk_creation(self): + pport = uuid.uuid4().hex + sport1 = uuid.uuid4().hex + sport2 = uuid.uuid4().hex + trunk = uuid.uuid4().hex + json_output = self.openstack( + f'port create --network {self.NETWORK_NAME} {pport}', + parse_output=True, + ) + pport_id = json_output.get('id') + json_output = self.openstack( + f'port create --network {self.NETWORK_NAME} {sport1}', + parse_output=True, + ) + sport1_id = json_output.get('id') + json_output = self.openstack( + f'port create --network {self.NETWORK_NAME} {sport2}', + parse_output=True, + ) + sport2_id = json_output.get('id') + + self.openstack( + f'network trunk create --parent-port {pport} {trunk}', + ) + self.openstack( + f'network trunk set --subport port={sport1},' + f'segmentation-type=vlan,segmentation-id=100 {trunk}', + ) + self.openstack( + f'network trunk set --subport port={sport2},' + f'segmentation-type=vlan,segmentation-id=101 {trunk}', + ) + + # NOTE(ralonsoh): keep this order to first delete the trunk and then + # the ports. + self.addCleanup(self.openstack, f'port delete {pport_id}') + self.addCleanup(self.openstack, f'port delete {sport1_id}') + self.addCleanup(self.openstack, f'port delete {sport2_id}') + self.addCleanup(self.openstack, f'network trunk delete {trunk}') + + return pport_id, sport1_id, sport2_id + + def check_subports(self, subports, pport_id, sport1_id, sport2_id): + self.assertEqual(2, len(subports)) + for subport in subports: + if subport['port_id'] == sport1_id: + self.assertEqual(100, subport['segmentation_id']) + elif subport['port_id'] == sport2_id: + self.assertEqual(101, subport['segmentation_id']) + else: + self.fail( + f'Port {pport_id} does not have subport ' + f'{subport["port_id"]}' + ) + self.assertEqual('vlan', subport['segmentation_type']) + + def test_port_list_with_trunk(self): + pport_id, sport1_id, sport2_id = self._trunk_creation() + + # List all ports with "--long" flag to retrieve the trunk details + json_output = self.openstack( + 'port list --long', + parse_output=True, + ) + port = next(port for port in json_output if port['ID'] == pport_id) + subports = port['Trunk subports'] + self.check_subports(subports, pport_id, sport1_id, sport2_id) + + def test_port_show_with_trunk(self): + pport_id, sport1_id, sport2_id = self._trunk_creation() + + # List all ports with "--long" flag to retrieve the trunk details + port = self.openstack( + f'port show {pport_id}', parse_output=True, ) + subports = port['trunk_details']['sub_ports'] + self.check_subports(subports, pport_id, sport1_id, sport2_id) diff --git a/openstackclient/tests/functional/network/v2/test_router.py b/openstackclient/tests/functional/network/v2/test_router.py index 07a5a633d9..cc7c8b1b70 100644 --- a/openstackclient/tests/functional/network/v2/test_router.py +++ b/openstackclient/tests/functional/network/v2/test_router.py @@ -20,19 +20,12 @@ class RouterTests(common.NetworkTagTests): base_command = 'router' - def setUp(self): - super(RouterTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_router_create_and_delete(self): """Test create options, delete multiple""" name1 = uuid.uuid4().hex name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'router create ' + - name1, + 'router create ' + name1, parse_output=True, ) self.assertEqual( @@ -40,8 +33,7 @@ def test_router_create_and_delete(self): cmd_output["name"], ) cmd_output = self.openstack( - 'router create ' + - name2, + 'router create ' + name2, parse_output=True, ) self.assertEqual( @@ -49,10 +41,47 @@ def test_router_create_and_delete(self): cmd_output["name"], ) - del_output = self.openstack( - 'router delete ' + name1 + ' ' + name2) + del_output = self.openstack('router delete ' + name1 + ' ' + name2) self.assertOutput('', del_output) + def test_router_create_with_external_gateway(self): + network_name = uuid.uuid4().hex + subnet_name = uuid.uuid4().hex + qos_policy = uuid.uuid4().hex + router_name = uuid.uuid4().hex + + cmd_net = self.openstack( + f'network create --external {network_name}', parse_output=True + ) + self.addCleanup(self.openstack, f'network delete {network_name}') + network_id = cmd_net['id'] + + self.openstack( + f'subnet create {subnet_name} ' + f'--network {network_name} --subnet-range 10.0.0.0/24' + ) + + cmd_qos = self.openstack( + f'network qos policy create {qos_policy}', parse_output=True + ) + self.addCleanup( + self.openstack, f'network qos policy delete {qos_policy}' + ) + qos_id = cmd_qos['id'] + + self.openstack( + f'router create --external-gateway {network_name} ' + f'--qos-policy {qos_policy} {router_name}' + ) + self.addCleanup(self.openstack, f'router delete {router_name}') + + cmd_output = self.openstack( + f'router show {router_name}', parse_output=True + ) + gw_info = cmd_output['external_gateway_info'] + self.assertEqual(network_id, gw_info['network_id']) + self.assertEqual(qos_id, gw_info['qos_policy_id']) + def test_router_list(self): """Test create, list filter""" # Get project IDs @@ -77,12 +106,14 @@ def test_router_list(self): self.assertNotEqual(admin_project_id, demo_project_id) self.assertEqual(admin_project_id, auth_project_id) + # type narrow + assert admin_project_id is not None + assert demo_project_id is not None + name1 = uuid.uuid4().hex name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'router create ' + - '--disable ' + - name1, + 'router create ' + '--disable ' + name1, parse_output=True, ) @@ -100,9 +131,7 @@ def test_router_list(self): cmd_output["project_id"], ) cmd_output = self.openstack( - 'router create ' + - '--project ' + demo_project_id + - ' ' + name2, + 'router create ' + '--project ' + demo_project_id + ' ' + name2, parse_output=True, ) @@ -122,8 +151,7 @@ def test_router_list(self): # Test list --project cmd_output = self.openstack( - 'router list ' + - '--project ' + demo_project_id, + 'router list ' + '--project ' + demo_project_id, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -132,8 +160,7 @@ def test_router_list(self): # Test list --disable cmd_output = self.openstack( - 'router list ' + - '--disable ', + 'router list ' + '--disable ', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -142,8 +169,7 @@ def test_router_list(self): # Test list --name cmd_output = self.openstack( - 'router list ' + - '--name ' + name1, + 'router list ' + '--name ' + name1, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -152,8 +178,7 @@ def test_router_list(self): # Test list --long cmd_output = self.openstack( - 'router list ' + - '--long ', + 'router list ' + '--long ', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -169,7 +194,8 @@ def test_router_list_l3_agent(self): name = uuid.uuid4().hex cmd_output = self.openstack( 'router create ' + name, - parse_output=True,) + parse_output=True, + ) self.addCleanup(self.openstack, 'router delete ' + name) # Get router ID @@ -177,7 +203,8 @@ def test_router_list_l3_agent(self): # Get l3 agent id cmd_output = self.openstack( 'network agent list --agent-type l3', - parse_output=True,) + parse_output=True, + ) # Check at least one L3 agent is included in the response. self.assertTrue(cmd_output) @@ -185,20 +212,24 @@ def test_router_list_l3_agent(self): # Add router to agent self.openstack( - 'network agent add router --l3 ' + agent_id + ' ' + router_id) + 'network agent add router --l3 ' + agent_id + ' ' + router_id + ) cmd_output = self.openstack( 'router list --agent ' + agent_id, - parse_output=True,) + parse_output=True, + ) router_ids = [x['ID'] for x in cmd_output] self.assertIn(router_id, router_ids) # Remove router from agent self.openstack( - 'network agent remove router --l3 ' + agent_id + ' ' + router_id) + 'network agent remove router --l3 ' + agent_id + ' ' + router_id + ) cmd_output = self.openstack( 'router list --agent ' + agent_id, - parse_output=True,) + parse_output=True, + ) router_ids = [x['ID'] for x in cmd_output] self.assertNotIn(router_id, router_ids) @@ -208,9 +239,7 @@ def test_router_set_show_unset(self): name = uuid.uuid4().hex new_name = name + "_" cmd_output = self.openstack( - 'router create ' + - '--description aaaa ' + - name, + 'router create ' + '--description aaaa ' + name, parse_output=True, ) self.addCleanup(self.openstack, 'router delete ' + new_name) @@ -225,17 +254,17 @@ def test_router_set_show_unset(self): # Test set --disable cmd_output = self.openstack( - 'router set ' + - '--name ' + new_name + - ' --description bbbb ' + - '--disable ' + - name + 'router set ' + + '--name ' + + new_name + + ' --description bbbb ' + + '--disable ' + + name ) self.assertOutput('', cmd_output) cmd_output = self.openstack( - 'router show ' + - new_name, + 'router show ' + new_name, parse_output=True, ) self.assertEqual( @@ -256,13 +285,10 @@ def test_router_set_show_unset(self): # Test unset cmd_output = self.openstack( - 'router unset ' + - '--external-gateway ' + - new_name + 'router unset ' + '--external-gateway ' + new_name ) cmd_output = self.openstack( - 'router show ' + - new_name, + 'router show ' + new_name, parse_output=True, ) self.assertIsNone(cmd_output["external_gateway_info"]) @@ -272,16 +298,15 @@ def _test_set_router_distributed(self, router_name): return cmd_output = self.openstack( - 'router set ' + - '--distributed ' + - '--external-gateway public ' + - router_name + 'router set ' + + '--distributed ' + + '--external-gateway public ' + + router_name ) self.assertOutput('', cmd_output) cmd_output = self.openstack( - 'router show ' + - router_name, + 'router show ' + router_name, parse_output=True, ) self.assertTrue(cmd_output["distributed"]) @@ -292,43 +317,50 @@ def test_router_add_remove_route(self): subnet_name = uuid.uuid4().hex router_name = uuid.uuid4().hex - self.openstack('network create %s' % network_name) - self.addCleanup(self.openstack, 'network delete %s' % network_name) + self.openstack(f'network create {network_name}') + self.addCleanup(self.openstack, f'network delete {network_name}') self.openstack( - 'subnet create %s ' - '--network %s --subnet-range 10.0.0.0/24' % ( - subnet_name, network_name)) - - self.openstack('router create %s' % router_name) - self.addCleanup(self.openstack, 'router delete %s' % router_name) - - self.openstack('router add subnet %s %s' % (router_name, subnet_name)) - self.addCleanup(self.openstack, 'router remove subnet %s %s' % ( - router_name, subnet_name)) - - out1 = self.openstack( - 'router add route %s ' - '--route destination=10.0.10.0/24,gateway=10.0.0.10' % - router_name, - parse_output=True,), - self.assertEqual(1, len(out1[0]['routes'])) + f'subnet create {subnet_name} ' + f'--network {network_name} --subnet-range 10.0.0.0/24' + ) + self.openstack(f'router create {router_name}') + self.addCleanup(self.openstack, f'router delete {router_name}') + + self.openstack(f'router add subnet {router_name} {subnet_name}') self.addCleanup( - self.openstack, 'router set %s --no-route' % router_name) - - out2 = self.openstack( - 'router add route %s ' - '--route destination=10.0.10.0/24,gateway=10.0.0.10 ' - '--route destination=10.0.11.0/24,gateway=10.0.0.11' % - router_name, - parse_output=True,), + self.openstack, + f'router remove subnet {router_name} {subnet_name}', + ) + + out1 = ( + self.openstack( + f'router add route {router_name} ' + '--route destination=10.0.10.0/24,gateway=10.0.0.10', + parse_output=True, + ), + ) + self.assertEqual(1, len(out1[0]['routes'])) + + self.addCleanup(self.openstack, f'router set {router_name} --no-route') + + out2 = ( + self.openstack( + f'router add route {router_name} ' + '--route destination=10.0.10.0/24,gateway=10.0.0.10 ' + '--route destination=10.0.11.0/24,gateway=10.0.0.11', + parse_output=True, + ), + ) self.assertEqual(2, len(out2[0]['routes'])) - out3 = self.openstack( - 'router remove route %s ' - '--route destination=10.0.11.0/24,gateway=10.0.0.11 ' - '--route destination=10.0.12.0/24,gateway=10.0.0.12' % - router_name, - parse_output=True,), + out3 = ( + self.openstack( + f'router remove route {router_name} ' + '--route destination=10.0.11.0/24,gateway=10.0.0.11 ' + '--route destination=10.0.12.0/24,gateway=10.0.0.12', + parse_output=True, + ), + ) self.assertEqual(1, len(out3[0]['routes'])) diff --git a/openstackclient/tests/functional/network/v2/test_security_group.py b/openstackclient/tests/functional/network/v2/test_security_group.py index c9d929f57e..1eb03a5a2e 100644 --- a/openstackclient/tests/functional/network/v2/test_security_group.py +++ b/openstackclient/tests/functional/network/v2/test_security_group.py @@ -19,20 +19,17 @@ class SecurityGroupTests(common.NetworkTests): """Functional tests for security group""" def setUp(self): - super(SecurityGroupTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() self.NAME = uuid.uuid4().hex self.OTHER_NAME = uuid.uuid4().hex cmd_output = self.openstack( - 'security group create ' + - self.NAME, + 'security group create ' + self.NAME, parse_output=True, ) - self.addCleanup(self.openstack, - 'security group delete ' + cmd_output['id']) + self.addCleanup( + self.openstack, 'security group delete ' + cmd_output['id'] + ) self.assertEqual(self.NAME, cmd_output['name']) def test_security_group_list(self): @@ -42,8 +39,10 @@ def test_security_group_list(self): def test_security_group_set(self): other_name = uuid.uuid4().hex raw_output = self.openstack( - 'security group set --description NSA --stateless --name ' + - other_name + ' ' + self.NAME + 'security group set --description NSA --stateless --name ' + + other_name + + ' ' + + self.NAME ) self.assertEqual('', raw_output) diff --git a/openstackclient/tests/functional/network/v2/test_security_group_rule.py b/openstackclient/tests/functional/network/v2/test_security_group_rule.py index d64fb420fa..0a55697d3a 100644 --- a/openstackclient/tests/functional/network/v2/test_security_group_rule.py +++ b/openstackclient/tests/functional/network/v2/test_security_group_rule.py @@ -19,33 +19,32 @@ class SecurityGroupRuleTests(common.NetworkTests): """Functional tests for security group rule""" def setUp(self): - super(SecurityGroupRuleTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().setUp() self.SECURITY_GROUP_NAME = uuid.uuid4().hex # Create the security group to hold the rule cmd_output = self.openstack( - 'security group create ' + - self.SECURITY_GROUP_NAME, + 'security group create ' + self.SECURITY_GROUP_NAME, parse_output=True, ) - self.addCleanup(self.openstack, - 'security group delete ' + self.SECURITY_GROUP_NAME) + self.addCleanup( + self.openstack, 'security group delete ' + self.SECURITY_GROUP_NAME + ) self.assertEqual(self.SECURITY_GROUP_NAME, cmd_output['name']) # Create the security group rule. cmd_output = self.openstack( - 'security group rule create ' + - self.SECURITY_GROUP_NAME + ' ' + - '--protocol tcp --dst-port 80:80 ' + - '--ingress --ethertype IPv4 ', + 'security group rule create ' + + self.SECURITY_GROUP_NAME + + ' ' + + '--protocol tcp --dst-port 80:80 ' + + '--ingress --ethertype IPv4 ', parse_output=True, ) - self.addCleanup(self.openstack, - 'security group rule delete ' + cmd_output['id']) + self.addCleanup( + self.openstack, 'security group rule delete ' + cmd_output['id'] + ) self.SECURITY_GROUP_RULE_ID = cmd_output['id'] def test_security_group_rule_list(self): @@ -53,8 +52,9 @@ def test_security_group_rule_list(self): 'security group rule list ' + self.SECURITY_GROUP_NAME, parse_output=True, ) - self.assertIn(self.SECURITY_GROUP_RULE_ID, - [rule['ID'] for rule in cmd_output]) + self.assertIn( + self.SECURITY_GROUP_RULE_ID, [rule['ID'] for rule in cmd_output] + ) def test_security_group_rule_show(self): cmd_output = self.openstack( diff --git a/openstackclient/tests/functional/network/v2/test_subnet.py b/openstackclient/tests/functional/network/v2/test_subnet.py index 041ec9f04b..2ec987e9b8 100644 --- a/openstackclient/tests/functional/network/v2/test_subnet.py +++ b/openstackclient/tests/functional/network/v2/test_subnet.py @@ -23,14 +23,13 @@ class SubnetTests(common.NetworkTagTests): @classmethod def setUpClass(cls): - common.NetworkTests.setUpClass() + super().setUpClass() if cls.haz_network: cls.NETWORK_NAME = uuid.uuid4().hex # Create a network for the all subnet tests cmd_output = cls.openstack( - 'network create ' + - cls.NETWORK_NAME, + 'network create ' + cls.NETWORK_NAME, parse_output=True, ) # Get network_id for assertEqual @@ -41,25 +40,18 @@ def tearDownClass(cls): try: if cls.haz_network: raw_output = cls.openstack( - 'network delete ' + - cls.NETWORK_NAME + 'network delete ' + cls.NETWORK_NAME ) cls.assertOutput('', raw_output) finally: - super(SubnetTests, cls).tearDownClass() - - def setUp(self): - super(SubnetTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") + super().tearDownClass() def test_subnet_create_and_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex - cmd = ('subnet create --network ' + - self.NETWORK_NAME + - ' --subnet-range') + cmd = ( + 'subnet create --network ' + self.NETWORK_NAME + ' --subnet-range' + ) cmd_output = self._subnet_create(cmd, name1) self.assertEqual( name1, @@ -70,9 +62,9 @@ def test_subnet_create_and_delete(self): cmd_output["network_id"], ) name2 = uuid.uuid4().hex - cmd = ('subnet create --network ' + - self.NETWORK_NAME + - ' --subnet-range') + cmd = ( + 'subnet create --network ' + self.NETWORK_NAME + ' --subnet-range' + ) cmd_output = self._subnet_create(cmd, name2) self.assertEqual( name2, @@ -83,17 +75,19 @@ def test_subnet_create_and_delete(self): cmd_output["network_id"], ) - del_output = self.openstack( - 'subnet delete ' + name1 + ' ' + name2) + del_output = self.openstack('subnet delete ' + name1 + ' ' + name2) self.assertOutput('', del_output) def test_subnet_list(self): """Test create, list filter""" name1 = uuid.uuid4().hex name2 = uuid.uuid4().hex - cmd = ('subnet create ' + - '--network ' + self.NETWORK_NAME + - ' --dhcp --subnet-range') + cmd = ( + 'subnet create ' + + '--network ' + + self.NETWORK_NAME + + ' --dhcp --subnet-range' + ) cmd_output = self._subnet_create(cmd, name1) self.addCleanup(self.openstack, 'subnet delete ' + name1) @@ -114,10 +108,13 @@ def test_subnet_list(self): cmd_output["ip_version"], ) - cmd = ('subnet create ' + - '--network ' + self.NETWORK_NAME + - ' --ip-version 6 --no-dhcp ' + - '--subnet-range') + cmd = ( + 'subnet create ' + + '--network ' + + self.NETWORK_NAME + + ' --ip-version 6 --no-dhcp ' + + '--subnet-range' + ) cmd_output = self._subnet_create(cmd, name2, is_type_ipv4=False) self.addCleanup(self.openstack, 'subnet delete ' + name2) @@ -140,8 +137,7 @@ def test_subnet_list(self): # Test list --long cmd_output = self.openstack( - 'subnet list ' + - '--long ', + 'subnet list ' + '--long ', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -150,8 +146,7 @@ def test_subnet_list(self): # Test list --name cmd_output = self.openstack( - 'subnet list ' + - '--name ' + name1, + 'subnet list ' + '--name ' + name1, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -160,8 +155,7 @@ def test_subnet_list(self): # Test list --ip-version cmd_output = self.openstack( - 'subnet list ' + - '--ip-version 6', + 'subnet list ' + '--ip-version 6', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -170,8 +164,7 @@ def test_subnet_list(self): # Test list --network cmd_output = self.openstack( - 'subnet list ' + - '--network ' + self.NETWORK_ID, + 'subnet list ' + '--network ' + self.NETWORK_ID, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -180,8 +173,7 @@ def test_subnet_list(self): # Test list --no-dhcp cmd_output = self.openstack( - 'subnet list ' + - '--no-dhcp ', + 'subnet list ' + '--no-dhcp ', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -193,9 +185,12 @@ def test_subnet_set_show_unset(self): name = uuid.uuid4().hex new_name = name + "_" - cmd = ('subnet create ' + - '--network ' + self.NETWORK_NAME + - ' --description aaaa --subnet-range') + cmd = ( + 'subnet create ' + + '--network ' + + self.NETWORK_NAME + + ' --description aaaa --subnet-range' + ) cmd_output = self._subnet_create(cmd, name) self.addCleanup(self.openstack, 'subnet delete ' + new_name) @@ -210,19 +205,18 @@ def test_subnet_set_show_unset(self): # Test set --no-dhcp --name --gateway --description cmd_output = self.openstack( - 'subnet set ' + - '--name ' + new_name + - ' --description bbbb ' + - '--no-dhcp ' + - '--gateway 10.10.11.1 ' + - '--service-type network:floatingip_agent_gateway ' + - name + 'subnet set ' + + '--name ' + + new_name + + ' --description bbbb ' + + '--no-dhcp ' + + '--gateway 10.10.11.1 ' + + name ) self.assertOutput('', cmd_output) cmd_output = self.openstack( - 'subnet show ' + - new_name, + 'subnet show ' + new_name, parse_output=True, ) self.assertEqual( @@ -241,28 +235,16 @@ def test_subnet_set_show_unset(self): '10.10.11.1', cmd_output["gateway_ip"], ) - self.assertEqual( - ['network:floatingip_agent_gateway'], - cmd_output["service_types"], - ) # Test unset - cmd_output = self.openstack( - 'subnet unset ' + - '--service-type network:floatingip_agent_gateway ' + - new_name - ) + cmd_output = self.openstack('subnet unset --gateway ' + new_name) self.assertOutput('', cmd_output) cmd_output = self.openstack( - 'subnet show ' + - new_name, + 'subnet show ' + new_name, parse_output=True, ) - self.assertEqual( - [], - cmd_output["service_types"], - ) + self.assertIsNone(cmd_output["gateway_ip"]) def _subnet_create(self, cmd, name, is_type_ipv4=True): # Try random subnet range for subnet creating @@ -271,23 +253,32 @@ def _subnet_create(self, cmd, name, is_type_ipv4=True): for i in range(4): # Make a random subnet if is_type_ipv4: - subnet = ".".join(map( - str, - (random.randint(0, 223) for _ in range(3)) - )) + ".0/26" + subnet = ( + ".".join( + map(str, (random.randint(0, 223) for _ in range(3))) + ) + + ".0/26" + ) else: - subnet = ":".join(map( - str, - (hex(random.randint(0, 65535))[2:] for _ in range(7)) - )) + ":0/112" + subnet = ( + ":".join( + map( + str, + ( + hex(random.randint(0, 65535))[2:] + for _ in range(7) + ), + ) + ) + + ":0/112" + ) try: cmd_output = self.openstack( - cmd + ' ' + subnet + ' ' + - name, + cmd + ' ' + subnet + ' ' + name, parse_output=True, ) except Exception: - if (i == 3): + if i == 3: # raise the exception at the last time raise pass @@ -297,7 +288,11 @@ def _subnet_create(self, cmd, name, is_type_ipv4=True): return cmd_output def _create_resource_for_tag_test(self, name, args): - cmd = ('subnet create --network ' + - self.NETWORK_NAME + ' ' + args + - ' --subnet-range') + cmd = ( + 'subnet create --network ' + + self.NETWORK_NAME + + ' ' + + args + + ' --subnet-range' + ) return self._subnet_create(cmd, name) diff --git a/openstackclient/tests/functional/network/v2/test_subnet_pool.py b/openstackclient/tests/functional/network/v2/test_subnet_pool.py index 8dc5e7a1c5..97d9f0b9d4 100644 --- a/openstackclient/tests/functional/network/v2/test_subnet_pool.py +++ b/openstackclient/tests/functional/network/v2/test_subnet_pool.py @@ -21,37 +21,19 @@ class SubnetPoolTests(common.NetworkTagTests): base_command = 'subnet pool' - def setUp(self): - super(SubnetPoolTests, self).setUp() - # Nothing in this class works with Nova Network - if not self.haz_network: - self.skipTest("No Network service present") - def test_subnet_pool_create_delete(self): """Test create, delete""" name1 = uuid.uuid4().hex cmd_output, pool_prefix = self._subnet_pool_create("", name1) - self.assertEqual( - name1, - cmd_output["name"] - ) - self.assertEqual( - [pool_prefix], - cmd_output["prefixes"] - ) + self.assertEqual(name1, cmd_output["name"]) + self.assertEqual([pool_prefix], cmd_output["prefixes"]) name2 = uuid.uuid4().hex cmd_output, pool_prefix = self._subnet_pool_create("", name2) - self.assertEqual( - name2, - cmd_output["name"] - ) - self.assertEqual( - [pool_prefix], - cmd_output["prefixes"] - ) + self.assertEqual(name2, cmd_output["name"]) + self.assertEqual([pool_prefix], cmd_output["prefixes"]) del_output = self.openstack( 'subnet pool delete ' + name1 + ' ' + name2, @@ -81,12 +63,15 @@ def test_subnet_pool_list(self): self.assertNotEqual(admin_project_id, demo_project_id) self.assertEqual(admin_project_id, auth_project_id) + # type narrow + assert admin_project_id is not None + assert demo_project_id is not None + name1 = uuid.uuid4().hex name2 = uuid.uuid4().hex cmd_output, pool_prefix = self._subnet_pool_create( - '--project ' + demo_project_id + - ' --no-share ', + '--project ' + demo_project_id + ' --no-share ', name1, ) self.addCleanup(self.openstack, 'subnet pool delete ' + name1) @@ -131,8 +116,7 @@ def test_subnet_pool_list(self): # Test list --project cmd_output = self.openstack( - 'subnet pool list ' + - '--project ' + demo_project_id, + 'subnet pool list ' + '--project ' + demo_project_id, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -141,8 +125,7 @@ def test_subnet_pool_list(self): # Test list --share cmd_output = self.openstack( - 'subnet pool list ' + - '--share', + 'subnet pool list ' + '--share', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -151,8 +134,7 @@ def test_subnet_pool_list(self): # Test list --name cmd_output = self.openstack( - 'subnet pool list ' + - '--name ' + name1, + 'subnet pool list ' + '--name ' + name1, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -161,8 +143,7 @@ def test_subnet_pool_list(self): # Test list --long cmd_output = self.openstack( - 'subnet pool list ' + - '--long ', + 'subnet pool list ' + '--long ', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -175,11 +156,11 @@ def test_subnet_pool_set_show(self): name = uuid.uuid4().hex new_name = name + "_" cmd_output, pool_prefix = self._subnet_pool_create( - '--default-prefix-length 16 ' + - '--min-prefix-length 16 ' + - '--max-prefix-length 32 ' + - '--description aaaa ' + - '--default-quota 10 ', + '--default-prefix-length 16 ' + + '--min-prefix-length 16 ' + + '--max-prefix-length 32 ' + + '--description aaaa ' + + '--default-quota 10 ', name, ) @@ -218,21 +199,21 @@ def test_subnet_pool_set_show(self): # Test set cmd_output = self.openstack( - 'subnet pool set ' + - '--name ' + new_name + - ' --description bbbb ' + - ' --pool-prefix 10.110.0.0/16 ' + - '--default-prefix-length 8 ' + - '--min-prefix-length 8 ' + - '--max-prefix-length 16 ' + - '--default-quota 20 ' + - name, + 'subnet pool set ' + + '--name ' + + new_name + + ' --description bbbb ' + + ' --pool-prefix 10.110.0.0/16 ' + + '--default-prefix-length 8 ' + + '--min-prefix-length 8 ' + + '--max-prefix-length 16 ' + + '--default-quota 20 ' + + name, ) self.assertOutput('', cmd_output) cmd_output = self.openstack( - 'subnet pool show ' + - new_name, + 'subnet pool show ' + new_name, parse_output=True, ) self.assertEqual( @@ -300,26 +281,42 @@ def _subnet_pool_create(self, cmd, name, is_type_ipv4=True): for i in range(4): # Create a random prefix if is_type_ipv4: - pool_prefix = ".".join(map( - str, - (random.randint(0, 223) for _ in range(2)), - )) + ".0.0/16" + pool_prefix = ( + ".".join( + map( + str, + (random.randint(0, 223) for _ in range(2)), + ) + ) + + ".0.0/16" + ) else: - pool_prefix = ":".join(map( - str, - (hex(random.randint(0, 65535))[2:] for _ in range(6)), - )) + ":0:0/96" + pool_prefix = ( + ":".join( + map( + str, + ( + hex(random.randint(0, 65535))[2:] + for _ in range(6) + ), + ) + ) + + ":0:0/96" + ) try: cmd_output = self.openstack( - 'subnet pool create ' + - cmd + ' ' + - '--pool-prefix ' + pool_prefix + ' ' + - name, + 'subnet pool create ' + + cmd + + ' ' + + '--pool-prefix ' + + pool_prefix + + ' ' + + name, parse_output=True, ) except Exception: - if (i == 3): + if i == 3: # Raise the exception the last time raise pass diff --git a/openstackclient/tests/functional/object/v1/common.py b/openstackclient/tests/functional/object/v1/common.py index b013343027..036731da50 100644 --- a/openstackclient/tests/functional/object/v1/common.py +++ b/openstackclient/tests/functional/object/v1/common.py @@ -18,5 +18,5 @@ class ObjectStoreTests(base.TestCase): @classmethod def setUpClass(cls): - super(ObjectStoreTests, cls).setUpClass() + super().setUpClass() cls.haz_object_store = cls.is_service_enabled('object-store') diff --git a/openstackclient/tests/functional/object/v1/test_container.py b/openstackclient/tests/functional/object/v1/test_container.py index d66aa842b0..6480fb9077 100644 --- a/openstackclient/tests/functional/object/v1/test_container.py +++ b/openstackclient/tests/functional/object/v1/test_container.py @@ -18,11 +18,12 @@ class ContainerTests(common.ObjectStoreTests): """Functional tests for Object Store container commands""" + NAME = uuid.uuid4().hex @classmethod def setUpClass(cls): - super(ContainerTests, cls).setUpClass() + super().setUpClass() if cls.haz_object_store: opts = cls.get_opts(['container']) raw_output = cls.openstack('container create ' + cls.NAME + opts) @@ -35,10 +36,10 @@ def tearDownClass(cls): raw_output = cls.openstack('container delete ' + cls.NAME) cls.assertOutput('', raw_output) finally: - super(ContainerTests, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(ContainerTests, self).setUp() + super().setUp() # Skip tests if no object-store is present if not self.haz_object_store: self.skipTest("No object-store service present") diff --git a/openstackclient/tests/functional/object/v1/test_object.py b/openstackclient/tests/functional/object/v1/test_object.py index b3f23e5287..68ca204337 100644 --- a/openstackclient/tests/functional/object/v1/test_object.py +++ b/openstackclient/tests/functional/object/v1/test_object.py @@ -27,7 +27,7 @@ class ObjectTests(common.ObjectStoreTests): CONTAINER_NAME = uuid.uuid4().hex def setUp(self): - super(ObjectTests, self).setUp() + super().setUp() # Skip tests if no object-store is present if not self.haz_object_store: self.skipTest("No object-store service present") @@ -53,39 +53,41 @@ def _test_object(self, object_file): self.openstack('container save ' + self.CONTAINER_NAME) # TODO(stevemar): Assert returned fields - raw_output = self.openstack('object create %s %s' % - (self.CONTAINER_NAME, object_file)) + raw_output = self.openstack( + f'object create {self.CONTAINER_NAME} {object_file}' + ) items = self.parse_listing(raw_output) self.assert_show_fields(items, OBJECT_FIELDS) - raw_output = self.openstack('object list %s' % self.CONTAINER_NAME) + raw_output = self.openstack(f'object list {self.CONTAINER_NAME}') items = self.parse_listing(raw_output) self.assert_table_structure(items, BASIC_LIST_HEADERS) - self.openstack('object save %s %s' % - (self.CONTAINER_NAME, object_file)) + self.openstack(f'object save {self.CONTAINER_NAME} {object_file}') # TODO(stevemar): Assert returned fields tmp_file = 'tmp.txt' self.addCleanup(os.remove, tmp_file) - self.openstack('object save %s %s --file %s' % - (self.CONTAINER_NAME, object_file, tmp_file)) + self.openstack( + f'object save {self.CONTAINER_NAME} {object_file} --file {tmp_file}' + ) # TODO(stevemar): Assert returned fields - raw_output = self.openstack('object save %s %s --file -' % - (self.CONTAINER_NAME, object_file)) + raw_output = self.openstack( + f'object save {self.CONTAINER_NAME} {object_file} --file -' + ) self.assertEqual(raw_output, 'test content') - self.openstack('object show %s %s' % - (self.CONTAINER_NAME, object_file)) + self.openstack(f'object show {self.CONTAINER_NAME} {object_file}') # TODO(stevemar): Assert returned fields - raw_output = self.openstack('object delete %s %s' % - (self.CONTAINER_NAME, object_file)) + raw_output = self.openstack( + f'object delete {self.CONTAINER_NAME} {object_file}' + ) self.assertEqual(0, len(raw_output)) - self.openstack('object create %s %s' % - (self.CONTAINER_NAME, object_file)) - raw_output = self.openstack('container delete -r %s' % - self.CONTAINER_NAME) + self.openstack(f'object create {self.CONTAINER_NAME} {object_file}') + raw_output = self.openstack( + f'container delete -r {self.CONTAINER_NAME}' + ) self.assertEqual(0, len(raw_output)) diff --git a/openstackclient/tests/functional/volume/base.py b/openstackclient/tests/functional/volume/base.py index 041d8d070c..d1d4ffe00e 100644 --- a/openstackclient/tests/functional/volume/base.py +++ b/openstackclient/tests/functional/volume/base.py @@ -16,11 +16,18 @@ class BaseVolumeTests(base.TestCase): - """Base class for Volume functional tests. """ + """Base class for Volume functional tests.""" @classmethod - def wait_for_status(cls, check_type, check_name, desired_status, - wait=120, interval=5, failures=None): + def wait_for_status( + cls, + check_type, + check_name, + desired_status, + wait=120, + interval=5, + failures=None, + ): current_status = "notset" if failures is None: failures = ['error'] @@ -32,34 +39,36 @@ def wait_for_status(cls, check_type, check_name, desired_status, ) current_status = output['status'] if current_status == desired_status: - print('{} {} now has status {}' - .format(check_type, check_name, current_status)) + print( + f'{check_type} {check_name} now has status {current_status}' + ) return - print('Checking {} {} Waiting for {} current status: {}' - .format(check_type, check_name, - desired_status, current_status)) + print( + f'Checking {check_type} {check_name} Waiting for {desired_status} current status: {current_status}' + ) if current_status in failures: raise Exception( - 'Current status {} of {} {} is one of failures {}' - .format(current_status, check_type, check_name, failures)) + f'Current status {current_status} of {check_type} {check_name} is one of failures {failures}' + ) time.sleep(interval) total_sleep += interval cls.assertOutput(desired_status, current_status) @classmethod - def wait_for_delete(cls, check_type, check_name, wait=120, interval=5, - name_field=None): + def wait_for_delete( + cls, check_type, check_name, wait=120, interval=5, name_field=None + ): total_sleep = 0 name_field = name_field or 'Name' while total_sleep < wait: result = cls.openstack(check_type + ' list', parse_output=True) names = [x[name_field] for x in result] if check_name not in names: - print('{} {} is now deleted'.format(check_type, check_name)) + print(f'{check_type} {check_name} is now deleted') return - print('Checking {} {} Waiting for deleted' - .format(check_type, check_name)) + print(f'Checking {check_type} {check_name} Waiting for deleted') time.sleep(interval) total_sleep += interval - raise Exception('Timeout: {} {} was not deleted in {} seconds' - .format(check_type, check_name, wait)) + raise Exception( + f'Timeout: {check_type} {check_name} was not deleted in {wait} seconds' + ) diff --git a/openstackclient/tests/functional/volume/v1/test_qos.py b/openstackclient/tests/functional/volume/v1/test_qos.py deleted file mode 100644 index c449938e37..0000000000 --- a/openstackclient/tests/functional/volume/v1/test_qos.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstackclient.tests.functional.volume.v1 import common - - -class QosTests(common.BaseVolumeTests): - """Functional tests for volume qos. """ - - def test_volume_qos_create_list(self): - """Test create, list, delete multiple""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume qos create ' + - name1, - parse_output=True, - ) - self.assertEqual( - name1, - cmd_output['name'] - ) - - name2 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume qos create ' + - name2, - parse_output=True, - ) - self.assertEqual( - name2, - cmd_output['name'] - ) - - # Test list - cmd_output = self.openstack( - 'volume qos list', - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertIn(name2, names) - - # Test delete multiple - del_output = self.openstack('volume qos delete ' + name1 + ' ' + name2) - self.assertOutput('', del_output) - - def test_volume_qos_set_show_unset(self): - """Tests create volume qos, set, unset, show, delete""" - - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume qos create ' + - '--consumer front-end ' - '--property Alpha=a ' + - name, - parse_output=True, - ) - self.addCleanup(self.openstack, 'volume qos delete ' + name) - self.assertEqual( - name, - cmd_output['name'] - ) - - self.assertEqual( - "front-end", - cmd_output['consumer'] - ) - - # Test volume qos set - raw_output = self.openstack( - 'volume qos set ' + - '--property Alpha=c ' + - '--property Beta=b ' + - name, - ) - self.assertOutput('', raw_output) - - # Test volume qos show - cmd_output = self.openstack( - 'volume qos show ' + - name, - parse_output=True, - ) - self.assertEqual( - name, - cmd_output['name'] - ) - self.assertEqual( - {'Alpha': 'c', 'Beta': 'b'}, - cmd_output['properties'] - ) - - # Test volume qos unset - raw_output = self.openstack( - 'volume qos unset ' + - '--property Alpha ' + - name, - ) - self.assertOutput('', raw_output) - - cmd_output = self.openstack( - 'volume qos show ' + - name, - parse_output=True, - ) - self.assertEqual( - name, - cmd_output['name'] - ) - self.assertEqual( - {'Beta': 'b'}, - cmd_output['properties'] - ) - - # TODO(qiangjiahui): Add tests for associate and disassociate volume type diff --git a/openstackclient/tests/functional/volume/v1/test_service.py b/openstackclient/tests/functional/volume/v1/test_service.py deleted file mode 100644 index 7de2de5541..0000000000 --- a/openstackclient/tests/functional/volume/v1/test_service.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstackclient.tests.functional.volume.v1 import common - - -class VolumeServiceTests(common.BaseVolumeTests): - """Functional tests for volume service.""" - - def test_volume_service_list(self): - cmd_output = self.openstack('volume service list', parse_output=True) - - # Get the nonredundant services and hosts - services = list(set([x['Binary'] for x in cmd_output])) - - # Test volume service list --service - cmd_output = self.openstack( - 'volume service list ' + - '--service ' + - services[0], - parse_output=True, - ) - for x in cmd_output: - self.assertEqual( - services[0], - x['Binary'] - ) - - # TODO(zhiyong.dai): test volume service list --host after solving - # https://bugs.launchpad.net/python-openstackclient/+bug/1664451 - - def test_volume_service_set(self): - - # Get a service and host - cmd_output = self.openstack( - 'volume service list', - parse_output=True, - ) - service_1 = cmd_output[0]['Binary'] - host_1 = cmd_output[0]['Host'] - - # Test volume service set --enable - raw_output = self.openstack( - 'volume service set --enable ' + - host_1 + ' ' + - service_1 - ) - self.assertOutput('', raw_output) - - cmd_output = self.openstack( - 'volume service list --long', - parse_output=True, - ) - self.assertEqual( - 'enabled', - cmd_output[0]['Status'] - ) - self.assertIsNone(cmd_output[0]['Disabled Reason']) - - # Test volume service set --disable and --disable-reason - disable_reason = 'disable_reason' - raw_output = self.openstack( - 'volume service set --disable ' + - '--disable-reason ' + - disable_reason + ' ' + - host_1 + ' ' + - service_1 - ) - self.assertOutput('', raw_output) - - cmd_output = self.openstack( - 'volume service list --long', - parse_output=True, - ) - self.assertEqual( - 'disabled', - cmd_output[0]['Status'] - ) - self.assertEqual( - disable_reason, - cmd_output[0]['Disabled Reason'] - ) diff --git a/openstackclient/tests/functional/volume/v1/test_snapshot.py b/openstackclient/tests/functional/volume/v1/test_snapshot.py deleted file mode 100644 index c8c956d1e7..0000000000 --- a/openstackclient/tests/functional/volume/v1/test_snapshot.py +++ /dev/null @@ -1,250 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstackclient.tests.functional.volume.v1 import common - - -class VolumeSnapshotTests(common.BaseVolumeTests): - """Functional tests for volume snapshot. """ - - VOLLY = uuid.uuid4().hex - - @classmethod - def setUpClass(cls): - super(VolumeSnapshotTests, cls).setUpClass() - # create a volume for all tests to create snapshot - cmd_output = cls.openstack( - 'volume create ' + - '--size 1 ' + - cls.VOLLY, - parse_output=True, - ) - cls.wait_for_status('volume', cls.VOLLY, 'available') - cls.VOLUME_ID = cmd_output['id'] - - @classmethod - def tearDownClass(cls): - try: - cls.wait_for_status('volume', cls.VOLLY, 'available') - raw_output = cls.openstack('volume delete --force ' + cls.VOLLY) - cls.assertOutput('', raw_output) - finally: - super(VolumeSnapshotTests, cls).tearDownClass() - - def test_volume_snapshot_delete(self): - """Test create, delete multiple""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name1 + - ' --volume ' + self.VOLLY, - parse_output=True, - ) - self.assertEqual( - name1, - cmd_output["display_name"], - ) - - name2 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name2 + - ' --volume ' + self.VOLLY, - parse_output=True, - ) - self.assertEqual( - name2, - cmd_output["display_name"], - ) - - self.wait_for_status('volume snapshot', name1, 'available') - self.wait_for_status('volume snapshot', name2, 'available') - - del_output = self.openstack( - 'volume snapshot delete ' + name1 + ' ' + name2) - self.assertOutput('', del_output) - self.wait_for_delete('volume snapshot', name1) - self.wait_for_delete('volume snapshot', name2) - - def test_volume_snapshot_list(self): - """Test create, list filter""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name1 + - ' --volume ' + self.VOLLY, - parse_output=True, - ) - self.addCleanup(self.wait_for_delete, 'volume snapshot', name1) - self.addCleanup(self.openstack, 'volume snapshot delete ' + name1) - self.assertEqual( - name1, - cmd_output["display_name"], - ) - self.assertEqual( - self.VOLUME_ID, - cmd_output["volume_id"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.wait_for_status('volume snapshot', name1, 'available') - - name2 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name2 + - ' --volume ' + self.VOLLY, - parse_output=True, - ) - self.addCleanup(self.wait_for_delete, 'volume snapshot', name2) - self.addCleanup(self.openstack, 'volume snapshot delete ' + name2) - self.assertEqual( - name2, - cmd_output["display_name"], - ) - self.assertEqual( - self.VOLUME_ID, - cmd_output["volume_id"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.wait_for_status('volume snapshot', name2, 'available') - - # Test list --long, --status - cmd_output = self.openstack( - 'volume snapshot list ' + - '--long ' + - '--status error', - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertNotIn(name1, names) - self.assertNotIn(name2, names) - - # Test list --volume - cmd_output = self.openstack( - 'volume snapshot list ' + - '--volume ' + self.VOLLY, - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertIn(name2, names) - - # Test list --name - cmd_output = self.openstack( - 'volume snapshot list ' + - '--name ' + name1, - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertNotIn(name2, names) - - def test_snapshot_set(self): - """Test create, set, unset, show, delete volume snapshot""" - name = uuid.uuid4().hex - new_name = name + "_" - cmd_output = self.openstack( - 'volume snapshot create ' + - '--volume ' + self.VOLLY + - ' --description aaaa ' + - name, - parse_output=True, - ) - self.addCleanup(self.wait_for_delete, 'volume snapshot', new_name) - self.addCleanup(self.openstack, 'volume snapshot delete ' + new_name) - self.assertEqual( - name, - cmd_output["display_name"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.assertEqual( - 'aaaa', - cmd_output["display_description"], - ) - self.wait_for_status('volume snapshot', name, 'available') - - # Test volume snapshot set - raw_output = self.openstack( - 'volume snapshot set ' + - '--name ' + new_name + - ' --description bbbb ' + - '--property Alpha=a ' + - '--property Beta=b ' + - name, - ) - self.assertOutput('', raw_output) - - # Show snapshot set result - cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, - parse_output=True, - ) - self.assertEqual( - new_name, - cmd_output["display_name"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.assertEqual( - 'bbbb', - cmd_output["display_description"], - ) - self.assertEqual( - {'Alpha': 'a', 'Beta': 'b'}, - cmd_output["properties"], - ) - - # Test volume unset - raw_output = self.openstack( - 'volume snapshot unset ' + - '--property Alpha ' + - new_name, - ) - self.assertOutput('', raw_output) - - cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, - parse_output=True, - ) - self.assertEqual( - {'Beta': 'b'}, - cmd_output["properties"], - ) - - # Test volume snapshot set --no-property - raw_output = self.openstack( - 'volume snapshot set ' + - '--no-property ' + - new_name, - ) - self.assertOutput('', raw_output) - cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, - parse_output=True, - ) - self.assertEqual({}, cmd_output["properties"]) diff --git a/openstackclient/tests/functional/volume/v1/test_transfer_request.py b/openstackclient/tests/functional/volume/v1/test_transfer_request.py deleted file mode 100644 index 0ee73d8a42..0000000000 --- a/openstackclient/tests/functional/volume/v1/test_transfer_request.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstackclient.tests.functional.volume.v1 import common - - -class TransferRequestTests(common.BaseVolumeTests): - """Functional tests for transfer request. """ - - NAME = uuid.uuid4().hex - VOLUME_NAME = uuid.uuid4().hex - - @classmethod - def setUpClass(cls): - super(TransferRequestTests, cls).setUpClass() - cmd_output = cls.openstack( - 'volume create --size 1 ' + cls.VOLUME_NAME, - parse_output=True, - ) - cls.assertOutput(cls.VOLUME_NAME, cmd_output['name']) - - cls.wait_for_status("volume", cls.VOLUME_NAME, "available") - - @classmethod - def tearDownClass(cls): - try: - raw_output_volume = cls.openstack( - 'volume delete ' + cls.VOLUME_NAME) - cls.assertOutput('', raw_output_volume) - finally: - super(TransferRequestTests, cls).tearDownClass() - - def test_volume_transfer_request_accept(self): - volume_name = uuid.uuid4().hex - name = uuid.uuid4().hex - - # create a volume - cmd_output = self.openstack( - 'volume create --size 1 ' + volume_name, - parse_output=True, - ) - self.assertEqual(volume_name, cmd_output['name']) - - # create volume transfer request for the volume - # and get the auth_key of the new transfer request - cmd_output = self.openstack( - 'volume transfer request create ' + - volume_name + - ' --name ' + name, - parse_output=True, - ) - auth_key = cmd_output['auth_key'] - self.assertTrue(auth_key) - - # accept the volume transfer request - output = self.openstack( - 'volume transfer request accept ' + - name + ' ' + - '--auth-key ' + auth_key, - parse_output=True, - ) - self.assertEqual(name, output.get('name')) - - # the volume transfer will be removed by default after accepted - # so just need to delete the volume here - raw_output = self.openstack( - 'volume delete ' + volume_name) - self.assertEqual('', raw_output) - - def test_volume_transfer_request_list_show(self): - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume transfer request create ' + - ' --name ' + name + ' ' + - self.VOLUME_NAME, - parse_output=True, - ) - self.addCleanup( - self.openstack, - 'volume transfer request delete ' + name - ) - self.assertOutput(name, cmd_output['name']) - auth_key = cmd_output['auth_key'] - self.assertTrue(auth_key) - - cmd_output = self.openstack( - 'volume transfer request list', - parse_output=True, - ) - self.assertIn(name, [req['Name'] for req in cmd_output]) - - cmd_output = self.openstack( - 'volume transfer request show ' + - name, - parse_output=True, - ) - self.assertEqual(name, cmd_output['name']) diff --git a/openstackclient/tests/functional/volume/v1/test_volume.py b/openstackclient/tests/functional/volume/v1/test_volume.py deleted file mode 100644 index 727ee73b7e..0000000000 --- a/openstackclient/tests/functional/volume/v1/test_volume.py +++ /dev/null @@ -1,247 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstackclient.tests.functional.volume.v1 import common - - -class VolumeTests(common.BaseVolumeTests): - """Functional tests for volume. """ - - def test_volume_create_and_delete(self): - """Test create, delete multiple""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - name1, - parse_output=True, - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - - name2 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume create ' + - '--size 2 ' + - name2, - parse_output=True, - ) - self.assertEqual( - 2, - cmd_output["size"], - ) - - self.wait_for_status("volume", name1, "available") - self.wait_for_status("volume", name2, "available") - del_output = self.openstack('volume delete ' + name1 + ' ' + name2) - self.assertOutput('', del_output) - - def test_volume_list(self): - """Test create, list filter""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - name1, - parse_output=True, - ) - self.addCleanup(self.openstack, 'volume delete ' + name1) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.wait_for_status("volume", name1, "available") - - name2 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume create ' + - '--size 2 ' + - name2, - parse_output=True, - ) - self.addCleanup(self.openstack, 'volume delete ' + name2) - self.assertEqual( - 2, - cmd_output["size"], - ) - self.wait_for_status("volume", name2, "available") - - # Test list - cmd_output = self.openstack( - 'volume list ', - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertIn(name2, names) - - # Test list --long - cmd_output = self.openstack( - 'volume list --long', - parse_output=True, - ) - bootable = [x["Bootable"] for x in cmd_output] - self.assertIn('false', bootable) - - # Test list --name - cmd_output = self.openstack( - 'volume list ' + - '--name ' + name1, - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertNotIn(name2, names) - - def test_volume_set_and_unset(self): - """Tests create volume, set, unset, show, delete""" - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - '--description aaaa ' + - '--property Alpha=a ' + - name, - parse_output=True, - ) - self.assertEqual( - name, - cmd_output["name"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.assertEqual( - 'aaaa', - cmd_output["display_description"], - ) - self.assertEqual( - {'Alpha': 'a'}, - cmd_output["properties"], - ) - self.assertEqual( - 'false', - cmd_output["bootable"], - ) - self.wait_for_status("volume", name, "available") - - # Test volume set - new_name = uuid.uuid4().hex - self.addCleanup(self.openstack, 'volume delete ' + new_name) - raw_output = self.openstack( - 'volume set ' + - '--name ' + new_name + - ' --size 2 ' + - '--description bbbb ' + - '--no-property ' + - '--property Beta=b ' + - '--property Gamma=c ' + - '--bootable ' + - name, - ) - self.assertOutput('', raw_output) - - cmd_output = self.openstack( - 'volume show ' + - new_name, - parse_output=True, - ) - self.assertEqual( - new_name, - cmd_output["name"], - ) - self.assertEqual( - 2, - cmd_output["size"], - ) - self.assertEqual( - 'bbbb', - cmd_output["display_description"], - ) - self.assertEqual( - {'Beta': 'b', 'Gamma': 'c'}, - cmd_output["properties"], - ) - self.assertEqual( - 'true', - cmd_output["bootable"], - ) - - # Test volume unset - raw_output = self.openstack( - 'volume unset ' + - '--property Beta ' + - new_name, - ) - self.assertOutput('', raw_output) - - cmd_output = self.openstack( - 'volume show ' + - new_name, - parse_output=True, - ) - self.assertEqual( - {'Gamma': 'c'}, - cmd_output["properties"], - ) - - def test_volume_create_and_list_and_show_backward_compatibility(self): - """Test backward compatibility of create, list, show""" - name1 = uuid.uuid4().hex - output = self.openstack( - 'volume create ' + - '-c display_name -c id ' + - '--size 1 ' + - name1, - parse_output=True, - ) - self.assertIn('display_name', output) - self.assertEqual(name1, output['display_name']) - self.assertIn('id', output) - volume_id = output['id'] - self.assertIsNotNone(volume_id) - self.assertNotIn('name', output) - self.addCleanup(self.openstack, 'volume delete ' + volume_id) - - self.wait_for_status("volume", name1, "available") - - output = self.openstack( - 'volume list ' + - '-c "Display Name"', - parse_output=True, - ) - for each_volume in output: - self.assertIn('Display Name', each_volume) - - output = self.openstack( - 'volume list ' + - '-c "Name"', - parse_output=True, - ) - for each_volume in output: - self.assertIn('Name', each_volume) - - output = self.openstack( - 'volume show ' + - '-c display_name -c id ' + - name1, - parse_output=True, - ) - self.assertIn('display_name', output) - self.assertEqual(name1, output['display_name']) - self.assertIn('id', output) - self.assertNotIn('name', output) diff --git a/openstackclient/tests/functional/volume/v1/test_volume_type.py b/openstackclient/tests/functional/volume/v1/test_volume_type.py deleted file mode 100644 index 037d45f0d5..0000000000 --- a/openstackclient/tests/functional/volume/v1/test_volume_type.py +++ /dev/null @@ -1,216 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time -import uuid - -from openstackclient.tests.functional.volume.v1 import common - - -class VolumeTypeTests(common.BaseVolumeTests): - """Functional tests for volume type. """ - - def test_volume_type_create_list(self): - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume type create --private ' + - name, - parse_output=True, - ) - self.addCleanup( - self.openstack, - 'volume type delete ' + - name, - ) - self.assertEqual(name, cmd_output['name']) - - cmd_output = self.openstack( - 'volume type show %s' % name, - parse_output=True, - ) - self.assertEqual(self.NAME, cmd_output['name']) - - cmd_output = self.openstack('volume type list', parse_output=True) - self.assertIn(self.NAME, [t['Name'] for t in cmd_output]) - - cmd_output = self.openstack( - 'volume type list --default', - parse_output=True, - ) - self.assertEqual(1, len(cmd_output)) - self.assertEqual('lvmdriver-1', cmd_output[0]['Name']) - - def test_volume_type_set_unset_properties(self): - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume type create --private ' + - name, - parse_output=True, - ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) - self.assertEqual(name, cmd_output['name']) - - raw_output = self.openstack( - 'volume type set --property a=b --property c=d %s' % name - ) - self.assertEqual("", raw_output) - cmd_output = self.openstack( - 'volume type show %s' % name, - parse_output=True, - ) - self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties']) - - raw_output = self.openstack( - 'volume type unset --property a %s' % name - ) - self.assertEqual("", raw_output) - cmd_output = self.openstack( - 'volume type show %s' % name, - parse_output=True, - ) - self.assertEqual({'c': 'd'}, cmd_output['properties']) - - def test_volume_type_set_unset_multiple_properties(self): - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume type create --private ' + - name, - parse_output=True, - ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) - self.assertEqual(name, cmd_output['name']) - - raw_output = self.openstack( - 'volume type set --property a=b --property c=d %s' % name - ) - self.assertEqual("", raw_output) - cmd_output = self.openstack( - 'volume type show %s' % name, - parse_output=True, - ) - self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties']) - - raw_output = self.openstack( - 'volume type unset --property a --property c %s' % name - ) - self.assertEqual("", raw_output) - cmd_output = self.openstack( - 'volume type show %s' % name, - parse_output=True, - ) - self.assertEqual({}, cmd_output['properties']) - - def test_multi_delete(self): - vol_type1 = uuid.uuid4().hex - vol_type2 = uuid.uuid4().hex - self.openstack('volume type create %s' % vol_type1) - time.sleep(5) - self.openstack('volume type create %s' % vol_type2) - time.sleep(5) - cmd = 'volume type delete %s %s' % (vol_type1, vol_type2) - raw_output = self.openstack(cmd) - self.assertOutput('', raw_output) - - # NOTE: Add some basic functional tests with the old format to - # make sure the command works properly, need to change - # these to new test format when beef up all tests for - # volume type commands. - def test_encryption_type(self): - encryption_type = uuid.uuid4().hex - # test create new encryption type - cmd_output = self.openstack( - 'volume type create ' - '--encryption-provider LuksEncryptor ' - '--encryption-cipher aes-xts-plain64 ' - '--encryption-key-size 128 ' - '--encryption-control-location front-end ' + - encryption_type) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} - for attr, value in expected.items(): - self.assertEqual(value, cmd_output['encryption'][attr]) - # test show encryption type - cmd_output = self.openstack( - 'volume type show --encryption-type ' + encryption_type, - parse_output=True, - ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} - for attr, value in expected.items(): - self.assertEqual(value, cmd_output['encryption'][attr]) - # test list encryption type - cmd_output = self.openstack( - 'volume type list --encryption-type', - parse_output=True, - ) - encryption_output = [t['Encryption'] for t in cmd_output - if t['Name'] == encryption_type][0] - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} - for attr, value in expected.items(): - self.assertEqual(value, encryption_output[attr]) - # test set new encryption type - raw_output = self.openstack( - 'volume type set ' - '--encryption-provider LuksEncryptor ' - '--encryption-cipher aes-xts-plain64 ' - '--encryption-key-size 128 ' - '--encryption-control-location front-end ' + - self.NAME) - self.assertEqual('', raw_output) - - name = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume type create --private ' + name, - parse_output=True, - ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name, - ) - self.assertEqual(name, cmd_output['name']) - - cmd_output = self.openstack( - 'volume type show --encryption-type ' + name, - parse_output=True, - ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} - for attr, value in expected.items(): - self.assertEqual(value, cmd_output['encryption'][attr]) - # test unset encryption type - raw_output = self.openstack( - 'volume type unset --encryption-type ' + name - ) - self.assertEqual('', raw_output) - cmd_output = self.openstack( - 'volume type show --encryption-type ' + name, - parse_output=True, - ) - self.assertEqual({}, cmd_output['encryption']) - # test delete encryption type - raw_output = self.openstack('volume type delete ' + encryption_type) - self.assertEqual('', raw_output) diff --git a/openstackclient/tests/functional/volume/v2/common.py b/openstackclient/tests/functional/volume/v2/common.py index 7e3a80845a..f15d4d961f 100644 --- a/openstackclient/tests/functional/volume/v2/common.py +++ b/openstackclient/tests/functional/volume/v2/common.py @@ -16,7 +16,7 @@ class BaseVolumeTests(base.BaseVolumeTests): - """Base class for Volume functional tests. """ + """Base class for Volume functional tests.""" @classmethod def setUpClass(cls): diff --git a/openstackclient/tests/functional/volume/v2/test_qos.py b/openstackclient/tests/functional/volume/v2/test_qos.py index 0a5405736b..fc4c52de2e 100644 --- a/openstackclient/tests/functional/volume/v2/test_qos.py +++ b/openstackclient/tests/functional/volume/v2/test_qos.py @@ -16,31 +16,23 @@ class QosTests(common.BaseVolumeTests): - """Functional tests for volume qos. """ + """Functional tests for volume qos.""" def test_volume_qos_create_delete_list(self): """Test create, list, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - name1, + 'volume qos create ' + name1, parse_output=True, ) - self.assertEqual( - name1, - cmd_output['name'] - ) + self.assertEqual(name1, cmd_output['name']) name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - name2, + 'volume qos create ' + name2, parse_output=True, ) - self.assertEqual( - name2, - cmd_output['name'] - ) + self.assertEqual(name2, cmd_output['name']) # Test list cmd_output = self.openstack( @@ -60,126 +52,90 @@ def test_volume_qos_set_show_unset(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - '--consumer front-end ' - '--property Alpha=a ' + - name, + 'volume qos create ' + + '--consumer front-end ' + + '--property Alpha=a ' + + name, parse_output=True, ) self.addCleanup(self.openstack, 'volume qos delete ' + name) - self.assertEqual( - name, - cmd_output['name'] - ) + self.assertEqual(name, cmd_output['name']) - self.assertEqual( - "front-end", - cmd_output['consumer'] - ) - self.assertEqual( - {'Alpha': 'a'}, - cmd_output['properties'] - ) + self.assertEqual("front-end", cmd_output['consumer']) + self.assertEqual({'Alpha': 'a'}, cmd_output['properties']) # Test volume qos set raw_output = self.openstack( - 'volume qos set ' + - '--property Alpha=c ' + - '--property Beta=b ' + - name, + 'volume qos set ' + + '--no-property ' + + '--property Beta=b ' + + '--property Charlie=c ' + + name, ) self.assertOutput('', raw_output) # Test volume qos show cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) + self.assertEqual(name, cmd_output['name']) self.assertEqual( - name, - cmd_output['name'] - ) - self.assertEqual( - {'Alpha': 'c', 'Beta': 'b'}, - cmd_output['properties'] + {'Beta': 'b', 'Charlie': 'c'}, + cmd_output['properties'], ) # Test volume qos unset raw_output = self.openstack( - 'volume qos unset ' + - '--property Alpha ' + - name, + 'volume qos unset ' + '--property Charlie ' + name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) - self.assertEqual( - name, - cmd_output['name'] - ) - self.assertEqual( - {'Beta': 'b'}, - cmd_output['properties'] - ) + self.assertEqual(name, cmd_output['name']) + self.assertEqual({'Beta': 'b'}, cmd_output['properties']) def test_volume_qos_asso_disasso(self): """Tests associate and disassociate qos with volume type""" vol_type1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create ' + - vol_type1, + 'volume type create ' + vol_type1, parse_output=True, ) - self.assertEqual( - vol_type1, - cmd_output['name'] - ) + self.assertEqual(vol_type1, cmd_output['name']) self.addCleanup(self.openstack, 'volume type delete ' + vol_type1) vol_type2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create ' + - vol_type2, + 'volume type create ' + vol_type2, parse_output=True, ) - self.assertEqual( - vol_type2, - cmd_output['name'] - ) + self.assertEqual(vol_type2, cmd_output['name']) self.addCleanup(self.openstack, 'volume type delete ' + vol_type2) name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - name, + 'volume qos create ' + name, parse_output=True, ) - self.assertEqual( - name, - cmd_output['name'] - ) + self.assertEqual(name, cmd_output['name']) self.addCleanup(self.openstack, 'volume qos delete ' + name) # Test associate raw_output = self.openstack( - 'volume qos associate ' + - name + ' ' + vol_type1 + 'volume qos associate ' + name + ' ' + vol_type1 ) self.assertOutput('', raw_output) raw_output = self.openstack( - 'volume qos associate ' + - name + ' ' + vol_type2 + 'volume qos associate ' + name + ' ' + vol_type2 ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) types = cmd_output["associations"] @@ -188,14 +144,15 @@ def test_volume_qos_asso_disasso(self): # Test disassociate raw_output = self.openstack( - 'volume qos disassociate ' + - '--volume-type ' + vol_type1 + - ' ' + name + 'volume qos disassociate ' + + '--volume-type ' + + vol_type1 + + ' ' + + name ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) types = cmd_output["associations"] @@ -204,13 +161,11 @@ def test_volume_qos_asso_disasso(self): # Test disassociate --all raw_output = self.openstack( - 'volume qos associate ' + - name + ' ' + vol_type1 + 'volume qos associate ' + name + ' ' + vol_type1 ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) types = cmd_output["associations"] @@ -218,13 +173,11 @@ def test_volume_qos_asso_disasso(self): self.assertIn(vol_type2, types) raw_output = self.openstack( - 'volume qos disassociate ' + - '--all ' + name + 'volume qos disassociate ' + '--all ' + name ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) self.assertNotIn("associations", cmd_output.keys()) diff --git a/openstackclient/tests/functional/volume/v2/test_service.py b/openstackclient/tests/functional/volume/v2/test_service.py index 5794f81ff0..cca32f9627 100644 --- a/openstackclient/tests/functional/volume/v2/test_service.py +++ b/openstackclient/tests/functional/volume/v2/test_service.py @@ -20,37 +20,26 @@ def test_volume_service_list(self): cmd_output = self.openstack('volume service list', parse_output=True) # Get the nonredundant services and hosts - services = list(set([x['Binary'] for x in cmd_output])) - hosts = list(set([x['Host'] for x in cmd_output])) + services = list({x['Binary'] for x in cmd_output}) + hosts = list({x['Host'] for x in cmd_output}) # Test volume service list --service cmd_output = self.openstack( - 'volume service list ' + - '--service ' + - services[0], + 'volume service list ' + '--service ' + services[0], parse_output=True, ) for x in cmd_output: - self.assertEqual( - services[0], - x['Binary'] - ) + self.assertEqual(services[0], x['Binary']) # Test volume service list --host cmd_output = self.openstack( - 'volume service list ' + - '--host ' + - hosts[0], + 'volume service list ' + '--host ' + hosts[0], parse_output=True, ) for x in cmd_output: - self.assertIn( - hosts[0], - x['Host'] - ) + self.assertIn(hosts[0], x['Host']) def test_volume_service_set(self): - # Get a service and host cmd_output = self.openstack( 'volume service list', @@ -61,9 +50,7 @@ def test_volume_service_set(self): # Test volume service set --enable raw_output = self.openstack( - 'volume service set --enable ' + - host_1 + ' ' + - service_1 + 'volume service set --enable ' + host_1 + ' ' + service_1 ) self.assertOutput('', raw_output) @@ -71,22 +58,19 @@ def test_volume_service_set(self): 'volume service list --long', parse_output=True, ) - self.assertEqual( - 'enabled', - cmd_output[0]['Status'] - ) - self.assertIsNone( - cmd_output[0]['Disabled Reason'] - ) + self.assertEqual('enabled', cmd_output[0]['Status']) + self.assertIsNone(cmd_output[0]['Disabled Reason']) # Test volume service set --disable and --disable-reason disable_reason = 'disable_reason' raw_output = self.openstack( - 'volume service set --disable ' + - '--disable-reason ' + - disable_reason + ' ' + - host_1 + ' ' + - service_1 + 'volume service set --disable ' + + '--disable-reason ' + + disable_reason + + ' ' + + host_1 + + ' ' + + service_1 ) self.assertOutput('', raw_output) @@ -94,11 +78,5 @@ def test_volume_service_set(self): 'volume service list --long', parse_output=True, ) - self.assertEqual( - 'disabled', - cmd_output[0]['Status'] - ) - self.assertEqual( - disable_reason, - cmd_output[0]['Disabled Reason'] - ) + self.assertEqual('disabled', cmd_output[0]['Status']) + self.assertEqual(disable_reason, cmd_output[0]['Disabled Reason']) diff --git a/openstackclient/tests/functional/volume/v2/test_transfer_request.py b/openstackclient/tests/functional/volume/v2/test_transfer_request.py index ac71cba2c7..07d70f0fb4 100644 --- a/openstackclient/tests/functional/volume/v2/test_transfer_request.py +++ b/openstackclient/tests/functional/volume/v2/test_transfer_request.py @@ -16,7 +16,7 @@ class TransferRequestTests(common.BaseVolumeTests): - """Functional tests for transfer request. """ + """Functional tests for transfer request.""" API_VERSION = '2' @@ -26,27 +26,31 @@ def test_volume_transfer_request_accept(self): # create a volume cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) self.assertEqual(volume_name, cmd_output['name']) self.addCleanup( self.openstack, - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume delete ' + - volume_name + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume delete ' + + volume_name, ) self.wait_for_status("volume", volume_name, "available") # create volume transfer request for the volume # and get the auth_key of the new transfer request cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request create ' + - ' --name ' + xfer_name + ' ' + - volume_name, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request create ' + + ' --name ' + + xfer_name + + ' ' + + volume_name, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -57,10 +61,14 @@ def test_volume_transfer_request_accept(self): # accept the volume transfer request cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request accept ' + - '--auth-key ' + auth_key + ' ' + - xfer_id, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request accept ' + + '--auth-key ' + + auth_key + + ' ' + + xfer_id, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -72,25 +80,29 @@ def test_volume_transfer_request_list_show(self): # create a volume cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) self.assertEqual(volume_name, cmd_output['name']) self.addCleanup( self.openstack, - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume delete ' + - volume_name + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume delete ' + + volume_name, ) self.wait_for_status("volume", volume_name, "available") cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request create ' + - ' --name ' + xfer_name + ' ' + - volume_name, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request create ' + + ' --name ' + + xfer_name + + ' ' + + volume_name, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -100,16 +112,20 @@ def test_volume_transfer_request_list_show(self): self.wait_for_status("volume", volume_name, "awaiting-transfer") cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request list', + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request list', parse_output=True, ) self.assertIn(xfer_name, [req['Name'] for req in cmd_output]) cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request show ' + - xfer_id, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request show ' + + xfer_id, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -120,8 +136,10 @@ def test_volume_transfer_request_list_show(self): # to become 'available' before attempting to delete # the volume. cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request delete ' + - xfer_id + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request delete ' + + xfer_id ) self.wait_for_status("volume", volume_name, "available") diff --git a/openstackclient/tests/functional/volume/v2/test_volume.py b/openstackclient/tests/functional/volume/v2/test_volume.py index 832dabe678..32b60dfaac 100644 --- a/openstackclient/tests/functional/volume/v2/test_volume.py +++ b/openstackclient/tests/functional/volume/v2/test_volume.py @@ -16,15 +16,13 @@ class VolumeTests(common.BaseVolumeTests): - """Functional tests for volume. """ + """Functional tests for volume.""" def test_volume_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - name1, + 'volume create ' + '--size 1 ' + name1, parse_output=True, ) self.assertEqual( @@ -34,9 +32,7 @@ def test_volume_delete(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 2 ' + - name2, + 'volume create ' + '--size 2 ' + name2, parse_output=True, ) self.assertEqual( @@ -53,9 +49,7 @@ def test_volume_list(self): """Test create, list filter""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - name1, + 'volume create ' + '--size 1 ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + name1) @@ -67,9 +61,7 @@ def test_volume_list(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 2 ' + - name2, + 'volume create ' + '--size 2 ' + name2, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + name2) @@ -78,17 +70,12 @@ def test_volume_list(self): cmd_output["size"], ) self.wait_for_status("volume", name2, "available") - raw_output = self.openstack( - 'volume set ' + - '--state error ' + - name2 - ) + raw_output = self.openstack('volume set ' + '--state error ' + name2) self.assertOutput('', raw_output) # Test list --long cmd_output = self.openstack( - 'volume list ' + - '--long', + 'volume list ' + '--long', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -97,8 +84,7 @@ def test_volume_list(self): # Test list --status cmd_output = self.openstack( - 'volume list ' + - '--status error', + 'volume list ' + '--status error', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -113,11 +99,11 @@ def test_volume_set_and_unset(self): name = uuid.uuid4().hex new_name = name + "_" cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - '--description aaaa ' + - '--property Alpha=a ' + - name, + 'volume create ' + + '--size 1 ' + + '--description aaaa ' + + '--property Alpha=a ' + + name, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + new_name) @@ -145,23 +131,23 @@ def test_volume_set_and_unset(self): # Test volume set raw_output = self.openstack( - 'volume set ' + - '--name ' + new_name + - ' --size 2 ' + - '--description bbbb ' + - '--no-property ' + - '--property Beta=b ' + - '--property Gamma=c ' + - '--image-property a=b ' + - '--image-property c=d ' + - '--bootable ' + - name, + 'volume set ' + + '--name ' + + new_name + + ' --size 2 ' + + '--description bbbb ' + + '--no-property ' + + '--property Beta=b ' + + '--property Gamma=c ' + + '--image-property a=b ' + + '--image-property c=d ' + + '--bootable ' + + name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume show ' + - new_name, + 'volume show ' + new_name, parse_output=True, ) self.assertEqual( @@ -185,22 +171,21 @@ def test_volume_set_and_unset(self): cmd_output["volume_image_metadata"], ) self.assertEqual( - 'true', + True, cmd_output["bootable"], ) # Test volume unset raw_output = self.openstack( - 'volume unset ' + - '--property Beta ' + - '--image-property a ' + - new_name, + 'volume unset ' + + '--property Beta ' + + '--image-property a ' + + new_name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume show ' + - new_name, + 'volume show ' + new_name, parse_output=True, ) self.assertEqual( @@ -219,9 +204,7 @@ def test_volume_snapshot(self): snapshot_name = uuid.uuid4().hex # Make a snapshot cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) self.wait_for_status("volume", volume_name, "available") @@ -230,9 +213,10 @@ def test_volume_snapshot(self): cmd_output["name"], ) cmd_output = self.openstack( - 'volume snapshot create ' + - snapshot_name + - ' --volume ' + volume_name, + 'volume snapshot create ' + + snapshot_name + + ' --volume ' + + volume_name, parse_output=True, ) self.wait_for_status("volume snapshot", snapshot_name, "available") @@ -240,9 +224,7 @@ def test_volume_snapshot(self): name = uuid.uuid4().hex # Create volume from snapshot cmd_output = self.openstack( - 'volume create ' + - '--snapshot ' + snapshot_name + - ' ' + name, + 'volume create ' + '--snapshot ' + snapshot_name + ' ' + name, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + name) @@ -254,8 +236,7 @@ def test_volume_snapshot(self): self.wait_for_status("volume", name, "available") # Delete snapshot - raw_output = self.openstack( - 'volume snapshot delete ' + snapshot_name) + raw_output = self.openstack('volume snapshot delete ' + snapshot_name) self.assertOutput('', raw_output) # Deleting snapshot may take time. If volume snapshot still exists when # a parent volume delete is requested, the volume deletion will fail. @@ -265,9 +246,7 @@ def test_volume_list_backward_compatibility(self): """Test backward compatibility of list command""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - name1, + 'volume create ' + '--size 1 ' + name1, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + name1) @@ -279,8 +258,7 @@ def test_volume_list_backward_compatibility(self): # Test list -c "Display Name" cmd_output = self.openstack( - 'volume list ' + - '-c "Display Name"', + 'volume list ' + '-c "Display Name"', parse_output=True, ) for each_volume in cmd_output: @@ -288,8 +266,7 @@ def test_volume_list_backward_compatibility(self): # Test list -c "Name" cmd_output = self.openstack( - 'volume list ' + - '-c "Name"', + 'volume list ' + '-c "Name"', parse_output=True, ) for each_volume in cmd_output: diff --git a/openstackclient/tests/functional/volume/v2/test_volume_backup.py b/openstackclient/tests/functional/volume/v2/test_volume_backup.py index 07bd2d161c..7ace71901c 100644 --- a/openstackclient/tests/functional/volume/v2/test_volume_backup.py +++ b/openstackclient/tests/functional/volume/v2/test_volume_backup.py @@ -16,10 +16,10 @@ class VolumeBackupTests(common.BaseVolumeTests): - """Functional tests for volume backups. """ + """Functional tests for volume backups.""" def setUp(self): - super(VolumeBackupTests, self).setUp() + super().setUp() self.backup_enabled = False serv_list = self.openstack('volume service list', parse_output=True) for service in serv_list: @@ -34,29 +34,26 @@ def test_volume_backup_restore(self): vol_id = uuid.uuid4().hex # create a volume self.openstack( - 'volume create ' + - '--size 1 ' + - vol_id, + 'volume create ' + '--size 1 ' + vol_id, parse_output=True, ) self.wait_for_status("volume", vol_id, "available") # create a backup backup = self.openstack( - 'volume backup create ' + - vol_id, + 'volume backup create ' + vol_id, parse_output=True, ) self.wait_for_status("volume backup", backup['id'], "available") # restore the backup backup_restored = self.openstack( - 'volume backup restore %s %s' - % (backup['id'], vol_id), + 'volume backup restore {} {}'.format(backup['id'], vol_id), parse_output=True, ) self.assertEqual(backup_restored['backup_id'], backup['id']) self.wait_for_status("volume backup", backup['id'], "available") - self.wait_for_status("volume", backup_restored['volume_id'], - "available") - self.addCleanup(self.openstack, 'volume delete %s' % vol_id) + self.wait_for_status( + "volume", backup_restored['volume_id'], "available" + ) + self.addCleanup(self.openstack, f'volume delete {vol_id}') diff --git a/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py b/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py index 12fdad2c35..e5daded1bd 100644 --- a/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py +++ b/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py @@ -16,18 +16,16 @@ class VolumeSnapshotTests(common.BaseVolumeTests): - """Functional tests for volume snapshot. """ + """Functional tests for volume snapshot.""" VOLLY = uuid.uuid4().hex @classmethod def setUpClass(cls): - super(VolumeSnapshotTests, cls).setUpClass() + super().setUpClass() # create a volume for all tests to create snapshot cmd_output = cls.openstack( - 'volume create ' + - '--size 1 ' + - cls.VOLLY, + 'volume create ' + '--size 1 ' + cls.VOLLY, parse_output=True, ) cls.wait_for_status('volume', cls.VOLLY, 'available') @@ -37,19 +35,16 @@ def setUpClass(cls): def tearDownClass(cls): try: cls.wait_for_status('volume', cls.VOLLY, 'available') - raw_output = cls.openstack( - 'volume delete --force ' + cls.VOLLY) + raw_output = cls.openstack('volume delete --force ' + cls.VOLLY) cls.assertOutput('', raw_output) finally: - super(VolumeSnapshotTests, cls).tearDownClass() + super().tearDownClass() def test_volume_snapshot_delete(self): """Test create, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume snapshot create ' + - name1 + - ' --volume ' + self.VOLLY, + 'volume snapshot create ' + name1 + ' --volume ' + self.VOLLY, parse_output=True, ) self.assertEqual( @@ -59,9 +54,7 @@ def test_volume_snapshot_delete(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume snapshot create ' + - name2 + - ' --volume ' + self.VOLLY, + 'volume snapshot create ' + name2 + ' --volume ' + self.VOLLY, parse_output=True, ) self.assertEqual( @@ -73,7 +66,8 @@ def test_volume_snapshot_delete(self): self.wait_for_status('volume snapshot', name2, 'available') del_output = self.openstack( - 'volume snapshot delete ' + name1 + ' ' + name2) + 'volume snapshot delete ' + name1 + ' ' + name2 + ) self.assertOutput('', del_output) self.wait_for_delete('volume snapshot', name1) self.wait_for_delete('volume snapshot', name2) @@ -82,9 +76,7 @@ def test_volume_snapshot_list(self): """Test create, list filter""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume snapshot create ' + - name1 + - ' --volume ' + self.VOLLY, + 'volume snapshot create ' + name1 + ' --volume ' + self.VOLLY, parse_output=True, ) self.addCleanup(self.wait_for_delete, 'volume snapshot', name1) @@ -105,9 +97,7 @@ def test_volume_snapshot_list(self): name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume snapshot create ' + - name2 + - ' --volume ' + self.VOLLY, + 'volume snapshot create ' + name2 + ' --volume ' + self.VOLLY, parse_output=True, ) self.addCleanup(self.wait_for_delete, 'volume snapshot', name2) @@ -127,17 +117,13 @@ def test_volume_snapshot_list(self): self.wait_for_status('volume snapshot', name2, 'available') raw_output = self.openstack( - 'volume snapshot set ' + - '--state error_deleting ' + - name2 + 'volume snapshot set ' + '--state error_deleting ' + name2 ) self.assertOutput('', raw_output) # Test list --long, --status cmd_output = self.openstack( - 'volume snapshot list ' + - '--long ' + - '--status error_deleting', + 'volume snapshot list ' + '--long ' + '--status error_deleting', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -145,17 +131,13 @@ def test_volume_snapshot_list(self): self.assertIn(name2, names) raw_output = self.openstack( - 'volume snapshot set ' + - '--state error ' + - name2 + 'volume snapshot set ' + '--state error ' + name2 ) self.assertOutput('', raw_output) # Test list --long, --status cmd_output = self.openstack( - 'volume snapshot list ' + - '--long ' + - '--status error', + 'volume snapshot list ' + '--long ' + '--status error', parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -164,8 +146,7 @@ def test_volume_snapshot_list(self): # Test list --volume cmd_output = self.openstack( - 'volume snapshot list ' + - '--volume ' + self.VOLLY, + 'volume snapshot list ' + '--volume ' + self.VOLLY, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -174,8 +155,7 @@ def test_volume_snapshot_list(self): # Test list --name cmd_output = self.openstack( - 'volume snapshot list ' + - '--name ' + name1, + 'volume snapshot list ' + '--name ' + name1, parse_output=True, ) names = [x["Name"] for x in cmd_output] @@ -187,11 +167,12 @@ def test_volume_snapshot_set(self): name = uuid.uuid4().hex new_name = name + "_" cmd_output = self.openstack( - 'volume snapshot create ' + - '--volume ' + self.VOLLY + - ' --description aaaa ' + - '--property Alpha=a ' + - name, + 'volume snapshot create ' + + '--volume ' + + self.VOLLY + + ' --description aaaa ' + + '--property Alpha=a ' + + name, parse_output=True, ) self.addCleanup(self.wait_for_delete, 'volume snapshot', new_name) @@ -216,19 +197,19 @@ def test_volume_snapshot_set(self): # Test volume snapshot set raw_output = self.openstack( - 'volume snapshot set ' + - '--name ' + new_name + - ' --description bbbb ' + - '--property Alpha=c ' + - '--property Beta=b ' + - name, + 'volume snapshot set ' + + '--name ' + + new_name + + ' --description bbbb ' + + '--property Alpha=c ' + + '--property Beta=b ' + + name, ) self.assertOutput('', raw_output) # Show snapshot set result cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, + 'volume snapshot show ' + new_name, parse_output=True, ) self.assertEqual( @@ -250,15 +231,12 @@ def test_volume_snapshot_set(self): # Test volume snapshot unset raw_output = self.openstack( - 'volume snapshot unset ' + - '--property Alpha ' + - new_name, + 'volume snapshot unset ' + '--property Alpha ' + new_name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, + 'volume snapshot show ' + new_name, parse_output=True, ) self.assertEqual( @@ -268,14 +246,11 @@ def test_volume_snapshot_set(self): # Test volume snapshot set --no-property raw_output = self.openstack( - 'volume snapshot set ' + - '--no-property ' + - new_name, + 'volume snapshot set ' + '--no-property ' + new_name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, + 'volume snapshot show ' + new_name, parse_output=True, ) self.assertNotIn( diff --git a/openstackclient/tests/functional/volume/v2/test_volume_type.py b/openstackclient/tests/functional/volume/v2/test_volume_type.py index 5cad92971d..80bf85a5bb 100644 --- a/openstackclient/tests/functional/volume/v2/test_volume_type.py +++ b/openstackclient/tests/functional/volume/v2/test_volume_type.py @@ -17,13 +17,12 @@ class VolumeTypeTests(common.BaseVolumeTests): - """Functional tests for volume type. """ + """Functional tests for volume type.""" def test_volume_type_create_list(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) self.addCleanup( @@ -33,7 +32,7 @@ def test_volume_type_create_list(self): self.assertEqual(name, cmd_output['name']) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual(name, cmd_output['name']) @@ -51,32 +50,26 @@ def test_volume_type_create_list(self): def test_volume_type_set_unset_properties(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) + self.addCleanup(self.openstack, 'volume type delete ' + name) self.assertEqual(name, cmd_output['name']) raw_output = self.openstack( - 'volume type set --property a=b --property c=d %s' % name + f'volume type set --property a=b --property c=d {name}' ) self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties']) - raw_output = self.openstack( - 'volume type unset --property a %s' % name - ) + raw_output = self.openstack(f'volume type unset --property a {name}') self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({'c': 'd'}, cmd_output['properties']) @@ -84,32 +77,28 @@ def test_volume_type_set_unset_properties(self): def test_volume_type_set_unset_multiple_properties(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) + self.addCleanup(self.openstack, 'volume type delete ' + name) self.assertEqual(name, cmd_output['name']) raw_output = self.openstack( - 'volume type set --property a=b --property c=d %s' % name + f'volume type set --property a=b --property c=d {name}' ) self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties']) raw_output = self.openstack( - 'volume type unset --property a --property c %s' % name + f'volume type unset --property a --property c {name}' ) self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({}, cmd_output['properties']) @@ -117,34 +106,28 @@ def test_volume_type_set_unset_multiple_properties(self): def test_volume_type_set_unset_project(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) + self.addCleanup(self.openstack, 'volume type delete ' + name) self.assertEqual(name, cmd_output['name']) - raw_output = self.openstack( - 'volume type set --project admin %s' % name - ) + raw_output = self.openstack(f'volume type set --project admin {name}') self.assertEqual("", raw_output) raw_output = self.openstack( - 'volume type unset --project admin %s' % name + f'volume type unset --project admin {name}' ) self.assertEqual("", raw_output) def test_multi_delete(self): vol_type1 = uuid.uuid4().hex vol_type2 = uuid.uuid4().hex - self.openstack('volume type create %s' % vol_type1) + self.openstack(f'volume type create {vol_type1}') time.sleep(5) - self.openstack('volume type create %s' % vol_type2) + self.openstack(f'volume type create {vol_type2}') time.sleep(5) - cmd = 'volume type delete %s %s' % (vol_type1, vol_type2) + cmd = f'volume type delete {vol_type1} {vol_type2}' raw_output = self.openstack(cmd) self.assertOutput('', raw_output) @@ -161,14 +144,15 @@ def test_encryption_type(self): '--encryption-provider LuksEncryptor ' '--encryption-cipher aes-xts-plain64 ' '--encryption-key-size 128 ' - '--encryption-control-location front-end ' + - encryption_type, + '--encryption-control-location front-end ' + encryption_type, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test show encryption type @@ -176,10 +160,12 @@ def test_encryption_type(self): 'volume type show --encryption-type ' + encryption_type, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test list encryption type @@ -187,35 +173,39 @@ def test_encryption_type(self): 'volume type list --encryption-type', parse_output=True, ) - encryption_output = [t['Encryption'] for t in cmd_output - if t['Name'] == encryption_type][0] - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + encryption_output = [ + t['Encryption'] for t in cmd_output if t['Name'] == encryption_type + ][0] + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, encryption_output[attr]) # test set existing encryption type raw_output = self.openstack( 'volume type set ' '--encryption-key-size 256 ' - '--encryption-control-location back-end ' + - encryption_type) + '--encryption-control-location back-end ' + encryption_type + ) self.assertEqual('', raw_output) cmd_output = self.openstack( 'volume type show --encryption-type ' + encryption_type, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 256, - 'control_location': 'back-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 256, + 'control_location': 'back-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test set new encryption type cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) self.addCleanup( @@ -229,18 +219,20 @@ def test_encryption_type(self): '--encryption-provider LuksEncryptor ' '--encryption-cipher aes-xts-plain64 ' '--encryption-key-size 128 ' - '--encryption-control-location front-end ' + - name) + '--encryption-control-location front-end ' + name + ) self.assertEqual('', raw_output) cmd_output = self.openstack( 'volume type show --encryption-type ' + name, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test unset encryption type diff --git a/openstackclient/tests/functional/volume/v3/common.py b/openstackclient/tests/functional/volume/v3/common.py index 29f769b640..cbab39275c 100644 --- a/openstackclient/tests/functional/volume/v3/common.py +++ b/openstackclient/tests/functional/volume/v3/common.py @@ -16,7 +16,7 @@ class BaseVolumeTests(base.BaseVolumeTests): - """Base class for Volume functional tests. """ + """Base class for Volume functional tests.""" @classmethod def setUpClass(cls): diff --git a/openstackclient/tests/functional/volume/v3/test_qos.py b/openstackclient/tests/functional/volume/v3/test_qos.py index 51578e1445..54dffbc0e7 100644 --- a/openstackclient/tests/functional/volume/v3/test_qos.py +++ b/openstackclient/tests/functional/volume/v3/test_qos.py @@ -16,31 +16,23 @@ class QosTests(common.BaseVolumeTests): - """Functional tests for volume qos. """ + """Functional tests for volume qos.""" def test_volume_qos_create_delete_list(self): """Test create, list, delete multiple""" name1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - name1, + 'volume qos create ' + name1, parse_output=True, ) - self.assertEqual( - name1, - cmd_output['name'] - ) + self.assertEqual(name1, cmd_output['name']) name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - name2, + 'volume qos create ' + name2, parse_output=True, ) - self.assertEqual( - name2, - cmd_output['name'] - ) + self.assertEqual(name2, cmd_output['name']) # Test list cmd_output = self.openstack( @@ -60,126 +52,90 @@ def test_volume_qos_set_show_unset(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - '--consumer front-end ' - '--property Alpha=a ' + - name, + 'volume qos create ' + + '--consumer front-end ' + + '--property Alpha=a ' + + name, parse_output=True, ) self.addCleanup(self.openstack, 'volume qos delete ' + name) - self.assertEqual( - name, - cmd_output['name'] - ) + self.assertEqual(name, cmd_output['name']) - self.assertEqual( - "front-end", - cmd_output['consumer'] - ) - self.assertEqual( - {'Alpha': 'a'}, - cmd_output['properties'] - ) + self.assertEqual("front-end", cmd_output['consumer']) + self.assertEqual({'Alpha': 'a'}, cmd_output['properties']) # Test volume qos set raw_output = self.openstack( - 'volume qos set ' + - '--property Alpha=c ' + - '--property Beta=b ' + - name, + 'volume qos set ' + + '--no-property ' + + '--property Beta=b ' + + '--property Charlie=c ' + + name, ) self.assertOutput('', raw_output) # Test volume qos show cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) + self.assertEqual(name, cmd_output['name']) self.assertEqual( - name, - cmd_output['name'] - ) - self.assertEqual( - {'Alpha': 'c', 'Beta': 'b'}, - cmd_output['properties'] + {'Beta': 'b', 'Charlie': 'c'}, + cmd_output['properties'], ) # Test volume qos unset raw_output = self.openstack( - 'volume qos unset ' + - '--property Alpha ' + - name, + 'volume qos unset ' + '--property Charlie ' + name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) - self.assertEqual( - name, - cmd_output['name'] - ) - self.assertEqual( - {'Beta': 'b'}, - cmd_output['properties'] - ) + self.assertEqual(name, cmd_output['name']) + self.assertEqual({'Beta': 'b'}, cmd_output['properties']) def test_volume_qos_asso_disasso(self): """Tests associate and disassociate qos with volume type""" vol_type1 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create ' + - vol_type1, + 'volume type create ' + vol_type1, parse_output=True, ) - self.assertEqual( - vol_type1, - cmd_output['name'] - ) + self.assertEqual(vol_type1, cmd_output['name']) self.addCleanup(self.openstack, 'volume type delete ' + vol_type1) vol_type2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create ' + - vol_type2, + 'volume type create ' + vol_type2, parse_output=True, ) - self.assertEqual( - vol_type2, - cmd_output['name'] - ) + self.assertEqual(vol_type2, cmd_output['name']) self.addCleanup(self.openstack, 'volume type delete ' + vol_type2) name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume qos create ' + - name, + 'volume qos create ' + name, parse_output=True, ) - self.assertEqual( - name, - cmd_output['name'] - ) + self.assertEqual(name, cmd_output['name']) self.addCleanup(self.openstack, 'volume qos delete ' + name) # Test associate raw_output = self.openstack( - 'volume qos associate ' + - name + ' ' + vol_type1 + 'volume qos associate ' + name + ' ' + vol_type1 ) self.assertOutput('', raw_output) raw_output = self.openstack( - 'volume qos associate ' + - name + ' ' + vol_type2 + 'volume qos associate ' + name + ' ' + vol_type2 ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) types = cmd_output["associations"] @@ -188,14 +144,15 @@ def test_volume_qos_asso_disasso(self): # Test disassociate raw_output = self.openstack( - 'volume qos disassociate ' + - '--volume-type ' + vol_type1 + - ' ' + name + 'volume qos disassociate ' + + '--volume-type ' + + vol_type1 + + ' ' + + name ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) types = cmd_output["associations"] @@ -204,13 +161,11 @@ def test_volume_qos_asso_disasso(self): # Test disassociate --all raw_output = self.openstack( - 'volume qos associate ' + - name + ' ' + vol_type1 + 'volume qos associate ' + name + ' ' + vol_type1 ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) types = cmd_output["associations"] @@ -218,13 +173,11 @@ def test_volume_qos_asso_disasso(self): self.assertIn(vol_type2, types) raw_output = self.openstack( - 'volume qos disassociate ' + - '--all ' + name + 'volume qos disassociate ' + '--all ' + name ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume qos show ' + - name, + 'volume qos show ' + name, parse_output=True, ) self.assertNotIn("associations", cmd_output.keys()) diff --git a/openstackclient/tests/functional/volume/v3/test_transfer_request.py b/openstackclient/tests/functional/volume/v3/test_transfer_request.py index 449fa08e92..c9bdbc48f7 100644 --- a/openstackclient/tests/functional/volume/v3/test_transfer_request.py +++ b/openstackclient/tests/functional/volume/v3/test_transfer_request.py @@ -16,7 +16,7 @@ class TransferRequestTests(common.BaseVolumeTests): - """Functional tests for transfer request. """ + """Functional tests for transfer request.""" API_VERSION = '3' @@ -26,26 +26,31 @@ def test_volume_transfer_request_accept(self): # create a volume cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) self.assertEqual(volume_name, cmd_output['name']) self.addCleanup( self.openstack, - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume delete ' + - volume_name + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume delete ' + + volume_name, ) self.wait_for_status("volume", volume_name, "available") # create volume transfer request for the volume # and get the auth_key of the new transfer request cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request create ' + - ' --name ' + xfer_name + ' ' + volume_name, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request create ' + + ' --name ' + + xfer_name + + ' ' + + volume_name, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -56,9 +61,14 @@ def test_volume_transfer_request_accept(self): # accept the volume transfer request cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request accept ' + - '--auth-key ' + auth_key + ' ' + xfer_id, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request accept ' + + '--auth-key ' + + auth_key + + ' ' + + xfer_id, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -70,23 +80,29 @@ def test_volume_transfer_request_list_show(self): # create a volume cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + volume_name, + 'volume create ' + '--size 1 ' + volume_name, parse_output=True, ) self.assertEqual(volume_name, cmd_output['name']) self.addCleanup( self.openstack, - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume delete ' + - volume_name + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume delete ' + + volume_name, ) self.wait_for_status("volume", volume_name, "available") cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request create ' + - ' --name ' + xfer_name + ' ' + volume_name, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request create ' + + ' --name ' + + xfer_name + + ' ' + + volume_name, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -96,16 +112,20 @@ def test_volume_transfer_request_list_show(self): self.wait_for_status("volume", volume_name, "awaiting-transfer") cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request list', + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request list', parse_output=True, ) self.assertIn(xfer_name, [req['Name'] for req in cmd_output]) cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request show ' + - xfer_id, + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request show ' + + xfer_id, parse_output=True, ) self.assertEqual(xfer_name, cmd_output['name']) @@ -116,8 +136,10 @@ def test_volume_transfer_request_list_show(self): # to become 'available' before attempting to delete # the volume. cmd_output = self.openstack( - '--os-volume-api-version ' + self.API_VERSION + ' ' + - 'volume transfer request delete ' + - xfer_id + '--os-volume-api-version ' + + self.API_VERSION + + ' ' + + 'volume transfer request delete ' + + xfer_id ) self.wait_for_status("volume", volume_name, "available") diff --git a/openstackclient/tests/functional/volume/v3/test_volume.py b/openstackclient/tests/functional/volume/v3/test_volume.py index 8a394e7515..07a7959167 100644 --- a/openstackclient/tests/functional/volume/v3/test_volume.py +++ b/openstackclient/tests/functional/volume/v3/test_volume.py @@ -16,7 +16,7 @@ class VolumeTests(common.BaseVolumeTests): - """Functional tests for volume. """ + """Functional tests for volume.""" def test_volume_delete(self): """Test create, delete multiple""" @@ -70,11 +70,7 @@ def test_volume_list(self): cmd_output["size"], ) self.wait_for_status("volume", name2, "available") - raw_output = self.openstack( - 'volume set ' + - '--state error ' + - name2 - ) + raw_output = self.openstack('volume set ' + '--state error ' + name2) self.assertOutput('', raw_output) # Test list --long @@ -103,11 +99,11 @@ def test_volume_set_and_unset(self): name = uuid.uuid4().hex new_name = name + "_" cmd_output = self.openstack( - 'volume create ' + - '--size 1 ' + - '--description aaaa ' + - '--property Alpha=a ' + - name, + 'volume create ' + + '--size 1 ' + + '--description aaaa ' + + '--property Alpha=a ' + + name, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + new_name) @@ -128,24 +124,25 @@ def test_volume_set_and_unset(self): cmd_output["properties"], ) self.assertEqual( - 'false', + False, cmd_output["bootable"], ) self.wait_for_status("volume", name, "available") # Test volume set raw_output = self.openstack( - 'volume set ' + - '--name ' + new_name + - ' --size 2 ' + - '--description bbbb ' + - '--no-property ' + - '--property Beta=b ' + - '--property Gamma=c ' + - '--image-property a=b ' + - '--image-property c=d ' + - '--bootable ' + - name, + 'volume set ' + + '--name ' + + new_name + + ' --size 2 ' + + '--description bbbb ' + + '--no-property ' + + '--property Beta=b ' + + '--property Gamma=c ' + + '--image-property a=b ' + + '--image-property c=d ' + + '--bootable ' + + name, ) self.assertOutput('', raw_output) self.wait_for_status("volume", new_name, "available") @@ -175,16 +172,16 @@ def test_volume_set_and_unset(self): cmd_output["volume_image_metadata"], ) self.assertEqual( - 'true', + True, cmd_output["bootable"], ) # Test volume unset raw_output = self.openstack( - 'volume unset ' + - '--property Beta ' + - '--image-property a ' + - new_name, + 'volume unset ' + + '--property Beta ' + + '--image-property a ' + + new_name, ) self.assertOutput('', raw_output) @@ -217,9 +214,10 @@ def test_volume_snapshot(self): cmd_output["name"], ) cmd_output = self.openstack( - 'volume snapshot create ' + - snapshot_name + - ' --volume ' + volume_name, + 'volume snapshot create ' + + snapshot_name + + ' --volume ' + + volume_name, parse_output=True, ) self.wait_for_status("volume snapshot", snapshot_name, "available") @@ -227,9 +225,7 @@ def test_volume_snapshot(self): name = uuid.uuid4().hex # Create volume from snapshot cmd_output = self.openstack( - 'volume create ' + - '--snapshot ' + snapshot_name + - ' ' + name, + 'volume create ' + '--snapshot ' + snapshot_name + ' ' + name, parse_output=True, ) self.addCleanup(self.openstack, 'volume delete ' + name) @@ -241,8 +237,7 @@ def test_volume_snapshot(self): self.wait_for_status("volume", name, "available") # Delete snapshot - raw_output = self.openstack( - 'volume snapshot delete ' + snapshot_name) + raw_output = self.openstack('volume snapshot delete ' + snapshot_name) self.assertOutput('', raw_output) # Deleting snapshot may take time. If volume snapshot still exists when # a parent volume delete is requested, the volume deletion will fail. diff --git a/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py b/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py index 7b2d88d095..b84bb0368b 100644 --- a/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py +++ b/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py @@ -16,18 +16,16 @@ class VolumeSnapshotTests(common.BaseVolumeTests): - """Functional tests for volume snapshot. """ + """Functional tests for volume snapshot.""" VOLLY = uuid.uuid4().hex @classmethod def setUpClass(cls): - super(VolumeSnapshotTests, cls).setUpClass() - # create a volume for all tests to create snapshot + super().setUpClass() + # create a test volume used by all snapshot tests cmd_output = cls.openstack( - 'volume create ' + - '--size 1 ' + - cls.VOLLY, + 'volume create ' + '--size 1 ' + cls.VOLLY, parse_output=True, ) cls.wait_for_status('volume', cls.VOLLY, 'available') @@ -37,189 +35,81 @@ def setUpClass(cls): def tearDownClass(cls): try: cls.wait_for_status('volume', cls.VOLLY, 'available') - raw_output = cls.openstack( - 'volume delete --force ' + cls.VOLLY) + raw_output = cls.openstack('volume delete --force ' + cls.VOLLY) cls.assertOutput('', raw_output) finally: - super(VolumeSnapshotTests, cls).tearDownClass() + super().tearDownClass() - def test_volume_snapshot_delete(self): - """Test create, delete multiple""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name1 + - ' --volume ' + self.VOLLY, - parse_output=True, - ) - self.assertEqual( - name1, - cmd_output["name"], - ) + def test_volume_snapshot(self): + # create volume snapshot + name = uuid.uuid4().hex - name2 = uuid.uuid4().hex cmd_output = self.openstack( - 'volume snapshot create ' + - name2 + - ' --volume ' + self.VOLLY, + 'volume snapshot create ' + + '--volume ' + + self.VOLLY + + ' --description aaaa ' + + '--property Alpha=a ' + + name, parse_output=True, ) - self.assertEqual( - name2, - cmd_output["name"], - ) - - self.wait_for_status('volume snapshot', name1, 'available') - self.wait_for_status('volume snapshot', name2, 'available') + snap_id = cmd_output['id'] - del_output = self.openstack( - 'volume snapshot delete ' + name1 + ' ' + name2) - self.assertOutput('', del_output) - self.wait_for_delete('volume snapshot', name1) - self.wait_for_delete('volume snapshot', name2) - - def test_volume_snapshot_list(self): - """Test create, list filter""" - name1 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name1 + - ' --volume ' + self.VOLLY, - parse_output=True, - ) - self.addCleanup(self.wait_for_delete, 'volume snapshot', name1) - self.addCleanup(self.openstack, 'volume snapshot delete ' + name1) - self.assertEqual( - name1, - cmd_output["name"], + self.addCleanup(self.wait_for_delete, 'volume snapshot', snap_id) + # delete volume snapshot + self.addCleanup( + self.openstack, + 'volume snapshot delete ' + snap_id, ) - self.assertEqual( - self.VOLUME_ID, - cmd_output["volume_id"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.wait_for_status('volume snapshot', name1, 'available') + self.wait_for_status('volume snapshot', snap_id, 'available') - name2 = uuid.uuid4().hex - cmd_output = self.openstack( - 'volume snapshot create ' + - name2 + - ' --volume ' + self.VOLLY, + # show volume snapshot + snapshot_info = self.openstack( + 'volume snapshot show ' + name, parse_output=True, ) - self.addCleanup(self.wait_for_delete, 'volume snapshot', name2) - self.addCleanup(self.openstack, 'volume snapshot delete ' + name2) - self.assertEqual( - name2, - cmd_output["name"], - ) - self.assertEqual( - self.VOLUME_ID, - cmd_output["volume_id"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.wait_for_status('volume snapshot', name2, 'available') - raw_output = self.openstack( - 'volume snapshot set ' + - '--state error ' + - name2 - ) - self.assertOutput('', raw_output) - # Test list --long, --status - cmd_output = self.openstack( - 'volume snapshot list ' + - '--long ' + - '--status error', - parse_output=True, - ) - names = [x["Name"] for x in cmd_output] - self.assertNotIn(name1, names) - self.assertIn(name2, names) + self.assertEqual(name, snapshot_info['name']) + self.assertEqual('aaaa', snapshot_info["description"]) + self.assertEqual({'Alpha': 'a'}, snapshot_info["properties"]) - # Test list --volume + # list volume snapshot --name cmd_output = self.openstack( - 'volume snapshot list ' + - '--volume ' + self.VOLLY, + 'volume snapshot list --name ' + name, parse_output=True, ) - names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertIn(name2, names) + names = [x['Name'] for x in cmd_output] + self.assertIn(name, names) - # Test list --name + # list volume snapshot --volume cmd_output = self.openstack( - 'volume snapshot list ' + - '--name ' + name1, + 'volume snapshot list ' + '--volume ' + self.VOLLY, parse_output=True, ) names = [x["Name"] for x in cmd_output] - self.assertIn(name1, names) - self.assertNotIn(name2, names) + self.assertIn(name, names) - def test_volume_snapshot_set(self): - """Test create, set, unset, show, delete volume snapshot""" - name = uuid.uuid4().hex + # set volume snapshot new_name = name + "_" - cmd_output = self.openstack( - 'volume snapshot create ' + - '--volume ' + self.VOLLY + - ' --description aaaa ' + - '--property Alpha=a ' + - name, - parse_output=True, - ) - self.addCleanup(self.wait_for_delete, 'volume snapshot', new_name) - self.addCleanup(self.openstack, 'volume snapshot delete ' + new_name) - self.assertEqual( - name, - cmd_output["name"], - ) - self.assertEqual( - 1, - cmd_output["size"], - ) - self.assertEqual( - 'aaaa', - cmd_output["description"], - ) - self.assertEqual( - {'Alpha': 'a'}, - cmd_output["properties"], - ) - self.wait_for_status('volume snapshot', name, 'available') - - # Test volume snapshot set raw_output = self.openstack( - 'volume snapshot set ' + - '--name ' + new_name + - ' --description bbbb ' + - '--property Alpha=c ' + - '--property Beta=b ' + - name, + 'volume snapshot set ' + + '--name ' + + new_name + + ' --description bbbb ' + + '--property Alpha=c ' + + '--property Beta=b ' + + snap_id, ) self.assertOutput('', raw_output) - # Show snapshot set result cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, + 'volume snapshot show ' + new_name, parse_output=True, ) self.assertEqual( new_name, cmd_output["name"], ) - self.assertEqual( - 1, - cmd_output["size"], - ) self.assertEqual( 'bbbb', cmd_output["description"], @@ -229,17 +119,14 @@ def test_volume_snapshot_set(self): cmd_output["properties"], ) - # Test volume snapshot unset + # unset volume snapshot raw_output = self.openstack( - 'volume snapshot unset ' + - '--property Alpha ' + - new_name, + 'volume snapshot unset ' + '--property Alpha ' + new_name, ) self.assertOutput('', raw_output) cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, + 'volume snapshot show ' + new_name, parse_output=True, ) self.assertEqual( @@ -247,19 +134,25 @@ def test_volume_snapshot_set(self): cmd_output["properties"], ) - # Test volume snapshot set --no-property + # set volume snapshot --no-property, --state error raw_output = self.openstack( - 'volume snapshot set ' + - '--no-property ' + - new_name, + 'volume snapshot set ' + + '--no-property ' + + '--state error ' + + new_name, ) self.assertOutput('', raw_output) + cmd_output = self.openstack( - 'volume snapshot show ' + - new_name, + 'volume snapshot show ' + new_name, parse_output=True, ) - self.assertNotIn( - {'Beta': 'b'}, - cmd_output["properties"], + self.assertEqual({}, cmd_output["properties"]) + + # list volume snapshot --long --status + cmd_output = self.openstack( + 'volume snapshot list ' + '--long ' + '--status error', + parse_output=True, ) + names = [x["Name"] for x in cmd_output] + self.assertIn(new_name, names) diff --git a/openstackclient/tests/functional/volume/v3/test_volume_type.py b/openstackclient/tests/functional/volume/v3/test_volume_type.py index 18e46c5247..421b3224f6 100644 --- a/openstackclient/tests/functional/volume/v3/test_volume_type.py +++ b/openstackclient/tests/functional/volume/v3/test_volume_type.py @@ -17,13 +17,12 @@ class VolumeTypeTests(common.BaseVolumeTests): - """Functional tests for volume type. """ + """Functional tests for volume type.""" def test_volume_type_create_list(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) self.addCleanup( @@ -33,7 +32,7 @@ def test_volume_type_create_list(self): self.assertEqual(name, cmd_output['name']) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual(name, cmd_output['name']) @@ -51,32 +50,26 @@ def test_volume_type_create_list(self): def test_volume_type_set_unset_properties(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) + self.addCleanup(self.openstack, 'volume type delete ' + name) self.assertEqual(name, cmd_output['name']) raw_output = self.openstack( - 'volume type set --property a=b --property c=d %s' % name + f'volume type set --property a=b --property c=d {name}' ) self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties']) - raw_output = self.openstack( - 'volume type unset --property a %s' % name - ) + raw_output = self.openstack(f'volume type unset --property a {name}') self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({'c': 'd'}, cmd_output['properties']) @@ -84,32 +77,28 @@ def test_volume_type_set_unset_properties(self): def test_volume_type_set_unset_multiple_properties(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) + self.addCleanup(self.openstack, 'volume type delete ' + name) self.assertEqual(name, cmd_output['name']) raw_output = self.openstack( - 'volume type set --property a=b --property c=d %s' % name + f'volume type set --property a=b --property c=d {name}' ) self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties']) raw_output = self.openstack( - 'volume type unset --property a --property c %s' % name + f'volume type unset --property a --property c {name}' ) self.assertEqual("", raw_output) cmd_output = self.openstack( - 'volume type show %s' % name, + f'volume type show {name}', parse_output=True, ) self.assertEqual({}, cmd_output['properties']) @@ -117,34 +106,28 @@ def test_volume_type_set_unset_multiple_properties(self): def test_volume_type_set_unset_project(self): name = uuid.uuid4().hex cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) - self.addCleanup( - self.openstack, - 'volume type delete ' + name - ) + self.addCleanup(self.openstack, 'volume type delete ' + name) self.assertEqual(name, cmd_output['name']) - raw_output = self.openstack( - 'volume type set --project admin %s' % name - ) + raw_output = self.openstack(f'volume type set --project admin {name}') self.assertEqual("", raw_output) raw_output = self.openstack( - 'volume type unset --project admin %s' % name + f'volume type unset --project admin {name}' ) self.assertEqual("", raw_output) def test_multi_delete(self): vol_type1 = uuid.uuid4().hex vol_type2 = uuid.uuid4().hex - self.openstack('volume type create %s' % vol_type1) + self.openstack(f'volume type create {vol_type1}') time.sleep(5) - self.openstack('volume type create %s' % vol_type2) + self.openstack(f'volume type create {vol_type2}') time.sleep(5) - cmd = 'volume type delete %s %s' % (vol_type1, vol_type2) + cmd = f'volume type delete {vol_type1} {vol_type2}' raw_output = self.openstack(cmd) self.assertOutput('', raw_output) @@ -161,14 +144,15 @@ def test_encryption_type(self): '--encryption-provider LuksEncryptor ' '--encryption-cipher aes-xts-plain64 ' '--encryption-key-size 128 ' - '--encryption-control-location front-end ' + - encryption_type, + '--encryption-control-location front-end ' + encryption_type, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test show encryption type @@ -176,10 +160,12 @@ def test_encryption_type(self): 'volume type show --encryption-type ' + encryption_type, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test list encryption type @@ -187,35 +173,39 @@ def test_encryption_type(self): 'volume type list --encryption-type', parse_output=True, ) - encryption_output = [t['Encryption'] for t in cmd_output - if t['Name'] == encryption_type][0] - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + encryption_output = [ + t['Encryption'] for t in cmd_output if t['Name'] == encryption_type + ][0] + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, encryption_output[attr]) # test set existing encryption type raw_output = self.openstack( 'volume type set ' '--encryption-key-size 256 ' - '--encryption-control-location back-end ' + - encryption_type) + '--encryption-control-location back-end ' + encryption_type + ) self.assertEqual('', raw_output) cmd_output = self.openstack( 'volume type show --encryption-type ' + encryption_type, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 256, - 'control_location': 'back-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 256, + 'control_location': 'back-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test set new encryption type cmd_output = self.openstack( - 'volume type create --private ' + - name, + 'volume type create --private ' + name, parse_output=True, ) self.addCleanup( @@ -229,18 +219,20 @@ def test_encryption_type(self): '--encryption-provider LuksEncryptor ' '--encryption-cipher aes-xts-plain64 ' '--encryption-key-size 128 ' - '--encryption-control-location front-end ' + - name) + '--encryption-control-location front-end ' + name + ) self.assertEqual('', raw_output) cmd_output = self.openstack( 'volume type show --encryption-type ' + name, parse_output=True, ) - expected = {'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end'} + expected = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } for attr, value in expected.items(): self.assertEqual(value, cmd_output['encryption'][attr]) # test unset encryption type diff --git a/openstackclient/tests/unit/api/fakes.py b/openstackclient/tests/unit/api/fakes.py index 26213a2f42..192bd69d5b 100644 --- a/openstackclient/tests/unit/api/fakes.py +++ b/openstackclient/tests/unit/api/fakes.py @@ -47,10 +47,9 @@ class TestSession(utils.TestCase): - BASE_URL = 'https://api.example.com:1234/vX' def setUp(self): - super(TestSession, self).setUp() + super().setUp() self.sess = session.Session() self.requests_mock = self.useFixture(fixture.Fixture()) diff --git a/openstackclient/tests/unit/api/test_api.py b/openstackclient/tests/unit/api/test_api.py index 5f4a0c1afa..f930260b16 100644 --- a/openstackclient/tests/unit/api/test_api.py +++ b/openstackclient/tests/unit/api/test_api.py @@ -20,9 +20,8 @@ class TestKeystoneSession(api_fakes.TestSession): - def setUp(self): - super(TestKeystoneSession, self).setUp() + super().setUp() self.api = api.KeystoneSession( session=self.sess, endpoint=self.BASE_URL, @@ -40,9 +39,8 @@ def test_session_request(self): class TestBaseAPI(api_fakes.TestSession): - def setUp(self): - super(TestBaseAPI, self).setUp() + super().setUp() self.api = api.BaseAPI( session=self.sess, endpoint=self.BASE_URL, @@ -80,7 +78,6 @@ def test_delete(self): # find tests def test_find_attr_by_id(self): - # All first requests (by name) will fail in this test self.requests_mock.register_uri( 'GET', @@ -172,7 +169,6 @@ def test_find_attr_by_name(self): self.assertEqual(api_fakes.RESP_ITEM_1, ret) def test_find_attr_path_resource(self): - # Test resource different than path self.requests_mock.register_uri( 'GET', diff --git a/openstackclient/tests/unit/api/test_compute_v2.py b/openstackclient/tests/unit/api/test_compute_v2.py index edf5258f6c..a609025b22 100644 --- a/openstackclient/tests/unit/api/test_compute_v2.py +++ b/openstackclient/tests/unit/api/test_compute_v2.py @@ -13,753 +13,536 @@ """Compute v2 API Library Tests""" -from keystoneauth1 import session +import http +from unittest import mock +import uuid + +from openstack.compute.v2 import _proxy from osc_lib import exceptions as osc_lib_exceptions -from requests_mock.contrib import fixture from openstackclient.api import compute_v2 as compute +from openstackclient.tests.unit import fakes from openstackclient.tests.unit import utils -FAKE_PROJECT = 'xyzpdq' -FAKE_URL = 'http://gopher.com/v2' +class TestSecurityGroup(utils.TestCase): + def setUp(self): + super().setUp() + + self.compute_client = mock.Mock(_proxy.Proxy) + + def test_create_security_group(self): + sg_name = 'name-' + uuid.uuid4().hex + sg_description = 'description-' + uuid.uuid4().hex + data = { + 'security_group': { + 'id': uuid.uuid4().hex, + 'name': sg_name, + 'description': sg_description, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + } + } + self.compute_client.post.return_value = fakes.FakeResponse(data=data) + + result = compute.create_security_group( + self.compute_client, sg_name, sg_description + ) + + self.compute_client.post.assert_called_once_with( + '/os-security-groups', + data={'name': sg_name, 'description': sg_description}, + microversion='2.1', + ) + self.assertEqual(data['security_group'], result) + + def test_list_security_groups(self): + data = { + 'security_groups': [ + { + 'id': uuid.uuid4().hex, + 'name': uuid.uuid4().hex, + 'description': 'description-' + uuid.uuid4().hex, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + } + ], + } + self.compute_client.get.return_value = fakes.FakeResponse(data=data) + + result = compute.list_security_groups(self.compute_client) + + self.compute_client.get.assert_called_once_with( + '/os-security-groups', microversion='2.1' + ) + self.assertEqual(data['security_groups'], result) + + def test_find_security_group_by_id(self): + sg_id = uuid.uuid4().hex + sg_name = 'name-' + uuid.uuid4().hex + data = { + 'security_group': { + 'id': sg_id, + 'name': sg_name, + 'description': 'description-' + uuid.uuid4().hex, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + } + } + self.compute_client.get.side_effect = [ + fakes.FakeResponse(data=data), + ] + result = compute.find_security_group(self.compute_client, sg_id) + + self.compute_client.get.assert_has_calls( + [ + mock.call(f'/os-security-groups/{sg_id}', microversion='2.1'), + ] + ) + self.assertEqual(data['security_group'], result) + + def test_find_security_group_by_name(self): + sg_id = uuid.uuid4().hex + sg_name = 'name-' + uuid.uuid4().hex + data = { + 'security_groups': [ + { + 'id': sg_id, + 'name': sg_name, + 'description': 'description-' + uuid.uuid4().hex, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + } + ], + } + self.compute_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] -class TestComputeAPIv2(utils.TestCase): + result = compute.find_security_group(self.compute_client, sg_name) - def setUp(self): - super(TestComputeAPIv2, self).setUp() - sess = session.Session() - self.api = compute.APIv2(session=sess, endpoint=FAKE_URL) - self.requests_mock = self.useFixture(fixture.Fixture()) - - -class TestFloatingIP(TestComputeAPIv2): - - FAKE_FLOATING_IP_RESP = { - 'id': 1, - 'ip': '203.0.113.11', # TEST-NET-3 - 'fixed_ip': '198.51.100.11', # TEST-NET-2 - 'pool': 'nova', - 'instance_id': None, - } - FAKE_FLOATING_IP_RESP_2 = { - 'id': 2, - 'ip': '203.0.113.12', # TEST-NET-3 - 'fixed_ip': '198.51.100.12', # TEST-NET-2 - 'pool': 'nova', - 'instance_id': None, - } - LIST_FLOATING_IP_RESP = [ - FAKE_FLOATING_IP_RESP, - FAKE_FLOATING_IP_RESP_2, - ] - - FAKE_SERVER_RESP_1 = { - 'id': 1, - 'name': 'server1', - } - - def test_floating_ip_add_id(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/servers/1/action', - json={'server': {}}, - status_code=200, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/servers/1', - json={'server': self.FAKE_SERVER_RESP_1}, - status_code=200, - ) - ret = self.api.floating_ip_add('1', '1.0.1.0') - self.assertEqual(200, ret.status_code) - - def test_floating_ip_add_name(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/servers/1/action', - json={'server': {}}, - status_code=200, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/servers/server1', - json={'server': self.FAKE_SERVER_RESP_1}, - status_code=200, - ) - ret = self.api.floating_ip_add('server1', '1.0.1.0') - self.assertEqual(200, ret.status_code) - - def test_floating_ip_create(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-floating-ips', - json={'floating_ip': self.FAKE_FLOATING_IP_RESP}, - status_code=200, - ) - ret = self.api.floating_ip_create('nova') - self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret) - - def test_floating_ip_create_not_found(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-floating-ips', - status_code=404, + self.compute_client.get.assert_has_calls( + [ + mock.call( + f'/os-security-groups/{sg_name}', microversion='2.1' + ), + mock.call('/os-security-groups', microversion='2.1'), + ] ) + self.assertEqual(data['security_groups'][0], result) + + def test_find_security_group_not_found(self): + data = {'security_groups': []} + self.compute_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] self.assertRaises( osc_lib_exceptions.NotFound, - self.api.floating_ip_create, - 'not-nova', - ) + compute.find_security_group, + self.compute_client, + 'invalid-sg', + ) + + def test_find_security_group_by_name_duplicate(self): + sg_name = 'name-' + uuid.uuid4().hex + data = { + 'security_groups': [ + { + 'id': uuid.uuid4().hex, + 'name': sg_name, + 'description': 'description-' + uuid.uuid4().hex, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + }, + { + 'id': uuid.uuid4().hex, + 'name': sg_name, + 'description': 'description-' + uuid.uuid4().hex, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + }, + ], + } + self.compute_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] - def test_floating_ip_delete(self): - self.requests_mock.register_uri( - 'DELETE', - FAKE_URL + '/os-floating-ips/1', - status_code=202, - ) - ret = self.api.floating_ip_delete('1') - self.assertEqual(202, ret.status_code) - self.assertEqual("", ret.text) - - def test_floating_ip_delete_none(self): - ret = self.api.floating_ip_delete() - self.assertIsNone(ret) - - def test_floating_ip_find_id(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ips/1', - json={'floating_ip': self.FAKE_FLOATING_IP_RESP}, - status_code=200, - ) - ret = self.api.floating_ip_find('1') - self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret) - - def test_floating_ip_find_ip(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ips/' + self.FAKE_FLOATING_IP_RESP['ip'], - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ips', - json={'floating_ips': self.LIST_FLOATING_IP_RESP}, - status_code=200, - ) - ret = self.api.floating_ip_find(self.FAKE_FLOATING_IP_RESP['ip']) - self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret) - - def test_floating_ip_find_not_found(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ips/1.2.3.4', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ips', - json={'floating_ips': self.LIST_FLOATING_IP_RESP}, - status_code=200, - ) self.assertRaises( osc_lib_exceptions.NotFound, - self.api.floating_ip_find, - '1.2.3.4', - ) + compute.find_security_group, + self.compute_client, + sg_name, + ) + + def test_update_security_group(self): + sg_id = uuid.uuid4().hex + sg_name = 'name-' + uuid.uuid4().hex + sg_description = 'description-' + uuid.uuid4().hex + data = { + 'security_group': { + 'id': sg_id, + 'name': sg_name, + 'description': sg_description, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + } + } + self.compute_client.put.return_value = fakes.FakeResponse(data=data) - def test_floating_ip_list(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ips', - json={'floating_ips': self.LIST_FLOATING_IP_RESP}, - status_code=200, - ) - ret = self.api.floating_ip_list() - self.assertEqual(self.LIST_FLOATING_IP_RESP, ret) - - def test_floating_ip_remove_id(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/servers/1/action', - status_code=200, + result = compute.update_security_group( + self.compute_client, sg_id, sg_name, sg_description ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/servers/1', - json={'server': self.FAKE_SERVER_RESP_1}, - status_code=200, - ) - ret = self.api.floating_ip_remove('1', '1.0.1.0') - self.assertEqual(200, ret.status_code) - - def test_floating_ip_remove_name(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/servers/1/action', - status_code=200, + + self.compute_client.put.assert_called_once_with( + f'/os-security-groups/{sg_id}', + data={'name': sg_name, 'description': sg_description}, + microversion='2.1', ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/servers/server1', - json={'server': self.FAKE_SERVER_RESP_1}, - status_code=200, + self.assertEqual(data['security_group'], result) + + def test_delete_security_group(self): + sg_id = uuid.uuid4().hex + self.compute_client.delete.return_value = fakes.FakeResponse( + status_code=http.HTTPStatus.NO_CONTENT ) - ret = self.api.floating_ip_remove('server1', '1.0.1.0') - self.assertEqual(200, ret.status_code) + result = compute.delete_security_group(self.compute_client, sg_id) -class TestFloatingIPPool(TestComputeAPIv2): + self.compute_client.delete.assert_called_once_with( + f'/os-security-groups/{sg_id}', + microversion='2.1', + ) + self.assertIsNone(result) - LIST_FLOATING_IP_POOL_RESP = [ - {"name": "tide"}, - {"name": "press"}, - ] - def test_floating_ip_pool_list(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-floating-ip-pools', - json={'floating_ip_pools': self.LIST_FLOATING_IP_POOL_RESP}, - status_code=200, - ) - ret = self.api.floating_ip_pool_list() - self.assertEqual(self.LIST_FLOATING_IP_POOL_RESP, ret) - - -class TestHost(TestComputeAPIv2): - - FAKE_HOST_RESP_1 = { - "zone": "internal", - "host_name": "myhost", - "service": "conductor", - } - - FAKE_HOST_RESP_2 = { - "zone": "internal", - "host_name": "myhost", - "service": "scheduler", - } - - FAKE_HOST_RESP_3 = { - "zone": "nova", - "host_name": "myhost", - "service": "compute", - } - - LIST_HOST_RESP = [ - FAKE_HOST_RESP_1, - FAKE_HOST_RESP_2, - FAKE_HOST_RESP_3, - ] - - def test_host_list_no_options(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-hosts', - json={'hosts': self.LIST_HOST_RESP}, - status_code=200, +class TestSecurityGroupRule(utils.TestCase): + def setUp(self): + super().setUp() + + self.compute_client = mock.Mock(_proxy.Proxy) + + def test_create_security_group_rule(self): + sg_id = uuid.uuid4().hex + data = { + 'security_group_rule': { + 'parent_group_id': sg_id, + 'ip_protocol': 'tcp', + 'from_port': 22, + 'to_port': 22, + 'cidr': '10.0.0.0/24', + } + } + self.compute_client.post.return_value = fakes.FakeResponse(data=data) + + result = compute.create_security_group_rule( + self.compute_client, + security_group_id=sg_id, + ip_protocol='tcp', + from_port=22, + to_port=22, + remote_ip='10.0.0.0/24', + remote_group=None, ) - ret = self.api.host_list() - self.assertEqual(self.LIST_HOST_RESP, ret) - - def test_host_list_zone(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-hosts?zone=nova', - json={'hosts': [self.FAKE_HOST_RESP_3]}, - status_code=200, + + self.compute_client.post.assert_called_once_with( + '/os-security-group-rules', + data={ + 'parent_group_id': sg_id, + 'ip_protocol': 'tcp', + 'from_port': 22, + 'to_port': 22, + 'cidr': '10.0.0.0/24', + 'group_id': None, + }, + microversion='2.1', ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-hosts', - json={'hosts': [self.FAKE_HOST_RESP_3]}, - status_code=200, + self.assertEqual(data['security_group_rule'], result) + + def test_delete_security_group_rule(self): + sg_id = uuid.uuid4().hex + self.compute_client.delete.return_value = fakes.FakeResponse( + status_code=http.HTTPStatus.NO_CONTENT ) - ret = self.api.host_list(zone='nova') - self.assertEqual([self.FAKE_HOST_RESP_3], ret) - - def test_host_set_none(self): - ret = self.api.host_set(host='myhost') - self.assertIsNone(ret) - - def test_host_set(self): - self.requests_mock.register_uri( - 'PUT', - FAKE_URL + '/os-hosts/myhost', - json={}, - status_code=200, + + result = compute.delete_security_group_rule(self.compute_client, sg_id) + + self.compute_client.delete.assert_called_once_with( + f'/os-security-group-rules/{sg_id}', + microversion='2.1', ) - ret = self.api.host_set(host='myhost', status='enabled') - self.assertEqual({}, ret) - - def test_host_show(self): - FAKE_RESOURCE_1 = { - "cpu": 2, - "disk_gb": 1028, - "host": "c1a7de0ac9d94e4baceae031d05caae3", - "memory_mb": 8192, - "project": "(total)", + self.assertIsNone(result) + + +class TestNetwork(utils.TestCase): + def setUp(self): + super().setUp() + + self.compute_client = mock.Mock(_proxy.Proxy) + + def test_create_network(self): + net_name = 'name-' + uuid.uuid4().hex + net_subnet = '10.0.0.0/24' + data = { + 'network': { + 'id': uuid.uuid4().hex, + 'label': net_name, + 'cidr': net_subnet, + 'share_address': True, + # other fields omitted for brevity + } } - FAKE_RESOURCE_2 = { - "cpu": 0, - "disk_gb": 0, - "host": "c1a7de0ac9d94e4baceae031d05caae3", - "memory_mb": 512, - "project": "(used_now)", + self.compute_client.post.return_value = fakes.FakeResponse(data=data) + + result = compute.create_network( + self.compute_client, + name=net_name, + subnet=net_subnet, + share_subnet=True, + ) + + self.compute_client.post.assert_called_once_with( + '/os-networks', + data={ + 'label': net_name, + 'cidr': net_subnet, + 'share_address': True, + }, + microversion='2.1', + ) + self.assertEqual(data['network'], result) + + def test_list_networks(self): + data = { + 'networks': [ + { + 'id': uuid.uuid4().hex, + 'label': f'name-{uuid.uuid4().hex}', + # other fields omitted for brevity + } + ], } - FAKE_RESOURCE_3 = { - "cpu": 0, - "disk_gb": 0, - "host": "c1a7de0ac9d94e4baceae031d05caae3", - "memory_mb": 0, - "project": "(used_max)", + self.compute_client.get.return_value = fakes.FakeResponse(data=data) + + result = compute.list_networks(self.compute_client) + + self.compute_client.get.assert_called_once_with( + '/os-networks', microversion='2.1' + ) + self.assertEqual(data['networks'], result) + + def test_find_network_by_id(self): + net_id = uuid.uuid4().hex + net_name = 'name-' + uuid.uuid4().hex + data = { + 'network': { + 'id': net_id, + 'label': net_name, + # other fields omitted for brevity + } } - FAKE_HOST_RESP = [ - {'resource': FAKE_RESOURCE_1}, - {'resource': FAKE_RESOURCE_2}, - {'resource': FAKE_RESOURCE_3}, + self.compute_client.get.side_effect = [ + fakes.FakeResponse(data=data), ] - FAKE_HOST_LIST = [ - FAKE_RESOURCE_1, - FAKE_RESOURCE_2, - FAKE_RESOURCE_3, + + result = compute.find_network(self.compute_client, net_id) + + self.compute_client.get.assert_has_calls( + [ + mock.call(f'/os-networks/{net_id}', microversion='2.1'), + ] + ) + self.assertEqual(data['network'], result) + + def test_find_network_by_name(self): + net_id = uuid.uuid4().hex + net_name = 'name-' + uuid.uuid4().hex + data = { + 'networks': [ + { + 'id': net_id, + 'label': net_name, + # other fields omitted for brevity + } + ], + } + self.compute_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), ] - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-hosts/myhost', - json={'host': FAKE_HOST_RESP}, - status_code=200, - ) - ret = self.api.host_show(host='myhost') - self.assertEqual(FAKE_HOST_LIST, ret) - - -class TestNetwork(TestComputeAPIv2): - - FAKE_NETWORK_RESP = { - 'id': '1', - 'label': 'label1', - 'cidr': '1.2.3.0/24', - } - - FAKE_NETWORK_RESP_2 = { - 'id': '2', - 'label': 'label2', - 'cidr': '4.5.6.0/24', - } - - LIST_NETWORK_RESP = [ - FAKE_NETWORK_RESP, - FAKE_NETWORK_RESP_2, - ] - - def test_network_create_default(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-networks', - json={'network': self.FAKE_NETWORK_RESP}, - status_code=200, - ) - ret = self.api.network_create('label1') - self.assertEqual(self.FAKE_NETWORK_RESP, ret) - - def test_network_create_options(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-networks', - json={'network': self.FAKE_NETWORK_RESP}, - status_code=200, - ) - ret = self.api.network_create( - name='label1', - subnet='1.2.3.0/24', - ) - self.assertEqual(self.FAKE_NETWORK_RESP, ret) - - def test_network_delete_id(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks/1', - json={'network': self.FAKE_NETWORK_RESP}, - status_code=200, - ) - self.requests_mock.register_uri( - 'DELETE', - FAKE_URL + '/os-networks/1', - status_code=202, - ) - ret = self.api.network_delete('1') - self.assertEqual(202, ret.status_code) - self.assertEqual("", ret.text) - - def test_network_delete_name(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks/label1', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks', - json={'networks': self.LIST_NETWORK_RESP}, - status_code=200, - ) - self.requests_mock.register_uri( - 'DELETE', - FAKE_URL + '/os-networks/1', - status_code=202, - ) - ret = self.api.network_delete('label1') - self.assertEqual(202, ret.status_code) - self.assertEqual("", ret.text) - - def test_network_delete_not_found(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks/label3', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks', - json={'networks': self.LIST_NETWORK_RESP}, - status_code=200, - ) - self.assertRaises( - osc_lib_exceptions.NotFound, - self.api.network_delete, - 'label3', - ) + result = compute.find_network(self.compute_client, net_name) - def test_network_find_id(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks/1', - json={'network': self.FAKE_NETWORK_RESP}, - status_code=200, - ) - ret = self.api.network_find('1') - self.assertEqual(self.FAKE_NETWORK_RESP, ret) - - def test_network_find_name(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks/label2', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks', - json={'networks': self.LIST_NETWORK_RESP}, - status_code=200, - ) - ret = self.api.network_find('label2') - self.assertEqual(self.FAKE_NETWORK_RESP_2, ret) - - def test_network_find_not_found(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks/label3', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks', - json={'networks': self.LIST_NETWORK_RESP}, - status_code=200, - ) - self.assertRaises( - osc_lib_exceptions.NotFound, - self.api.network_find, - 'label3', + self.compute_client.get.assert_has_calls( + [ + mock.call(f'/os-networks/{net_name}', microversion='2.1'), + mock.call('/os-networks', microversion='2.1'), + ] ) + self.assertEqual(data['networks'][0], result) - def test_network_list_no_options(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-networks', - json={'networks': self.LIST_NETWORK_RESP}, - status_code=200, - ) - ret = self.api.network_list() - self.assertEqual(self.LIST_NETWORK_RESP, ret) - - -class TestSecurityGroup(TestComputeAPIv2): - - FAKE_SECURITY_GROUP_RESP = { - 'id': '1', - 'name': 'sg1', - 'description': 'test security group', - 'tenant_id': '0123456789', - 'rules': [] - } - FAKE_SECURITY_GROUP_RESP_2 = { - 'id': '2', - 'name': 'sg2', - 'description': 'another test security group', - 'tenant_id': '0123456789', - 'rules': [] - } - LIST_SECURITY_GROUP_RESP = [ - FAKE_SECURITY_GROUP_RESP_2, - FAKE_SECURITY_GROUP_RESP, - ] - - def test_security_group_create_default(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-security-groups', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP}, - status_code=200, - ) - ret = self.api.security_group_create('sg1') - self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret) - - def test_security_group_create_options(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-security-groups', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP}, - status_code=200, - ) - ret = self.api.security_group_create( - name='sg1', - description='desc', - ) - self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret) - - def test_security_group_delete_id(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/1', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP}, - status_code=200, - ) - self.requests_mock.register_uri( - 'DELETE', - FAKE_URL + '/os-security-groups/1', - status_code=202, - ) - ret = self.api.security_group_delete('1') - self.assertEqual(202, ret.status_code) - self.assertEqual("", ret.text) - - def test_security_group_delete_name(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/sg1', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups', - json={'security_groups': self.LIST_SECURITY_GROUP_RESP}, - status_code=200, - ) - self.requests_mock.register_uri( - 'DELETE', - FAKE_URL + '/os-security-groups/1', - status_code=202, - ) - ret = self.api.security_group_delete('sg1') - self.assertEqual(202, ret.status_code) - self.assertEqual("", ret.text) - - def test_security_group_delete_not_found(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/sg3', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups', - json={'security_groups': self.LIST_SECURITY_GROUP_RESP}, - status_code=200, - ) + def test_find_network_not_found(self): + data = {'networks': []} + self.compute_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] self.assertRaises( osc_lib_exceptions.NotFound, - self.api.security_group_delete, - 'sg3', - ) + compute.find_network, + self.compute_client, + 'invalid-net', + ) + + def test_find_network_by_name_duplicate(self): + net_name = 'name-' + uuid.uuid4().hex + data = { + 'networks': [ + { + 'id': uuid.uuid4().hex, + 'label': net_name, + # other fields omitted for brevity + }, + { + 'id': uuid.uuid4().hex, + 'label': net_name, + # other fields omitted for brevity + }, + ], + } + self.compute_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] - def test_security_group_find_id(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/1', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP}, - status_code=200, - ) - ret = self.api.security_group_find('1') - self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret) - - def test_security_group_find_name(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/sg2', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups', - json={'security_groups': self.LIST_SECURITY_GROUP_RESP}, - status_code=200, - ) - ret = self.api.security_group_find('sg2') - self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret) - - def test_security_group_find_not_found(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/sg3', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups', - json={'security_groups': self.LIST_SECURITY_GROUP_RESP}, - status_code=200, - ) self.assertRaises( osc_lib_exceptions.NotFound, - self.api.security_group_find, - 'sg3', + compute.find_network, + self.compute_client, + net_name, ) - def test_security_group_list_no_options(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups', - json={'security_groups': self.LIST_SECURITY_GROUP_RESP}, - status_code=200, + def test_delete_network(self): + net_id = uuid.uuid4().hex + self.compute_client.delete.return_value = fakes.FakeResponse( + status_code=http.HTTPStatus.NO_CONTENT ) - ret = self.api.security_group_list() - self.assertEqual(self.LIST_SECURITY_GROUP_RESP, ret) - - def test_security_group_set_options_id(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/1', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP}, - status_code=200, - ) - self.requests_mock.register_uri( - 'PUT', - FAKE_URL + '/os-security-groups/1', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP}, - status_code=200, - ) - ret = self.api.security_group_set( - security_group='1', - description='desc2') - self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret) - - def test_security_group_set_options_name(self): - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups/sg2', - status_code=404, - ) - self.requests_mock.register_uri( - 'GET', - FAKE_URL + '/os-security-groups', - json={'security_groups': self.LIST_SECURITY_GROUP_RESP}, - status_code=200, - ) - self.requests_mock.register_uri( - 'PUT', - FAKE_URL + '/os-security-groups/2', - json={'security_group': self.FAKE_SECURITY_GROUP_RESP_2}, - status_code=200, - ) - ret = self.api.security_group_set( - security_group='sg2', - description='desc2') - self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret) - - -class TestSecurityGroupRule(TestComputeAPIv2): - - FAKE_SECURITY_GROUP_RULE_RESP = { - 'id': '1', - 'name': 'sgr1', - 'tenant_id': 'proj-1', - 'ip_protocol': 'TCP', - 'from_port': 1, - 'to_port': 22, - 'group': {}, - # 'ip_range': , - # 'cidr': , - # 'parent_group_id': , - } - - def test_security_group_create_no_options(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-security-group-rules', - json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP}, - status_code=200, + + result = compute.delete_network(self.compute_client, net_id) + + self.compute_client.delete.assert_called_once_with( + f'/os-networks/{net_id}', microversion='2.1' ) - ret = self.api.security_group_rule_create( - security_group_id='1', - ip_protocol='tcp', + self.assertIsNone(result) + + +class TestFloatingIP(utils.TestCase): + def setUp(self): + super().setUp() + + self.compute_client = mock.Mock(_proxy.Proxy) + + def test_create_floating_ip(self): + network = 'network-' + uuid.uuid4().hex + data = { + 'floating_ip': { + 'fixed_ip': None, + 'id': uuid.uuid4().hex, + 'instance_id': None, + 'ip': '172.24.4.17', + 'pool': network, + } + } + self.compute_client.post.return_value = fakes.FakeResponse(data=data) + + result = compute.create_floating_ip( + self.compute_client, network=network ) - self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret) - - def test_security_group_create_options(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-security-group-rules', - json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP}, - status_code=200, + + self.compute_client.post.assert_called_once_with( + '/os-floating-ips', data={'pool': network}, microversion='2.1' ) - ret = self.api.security_group_rule_create( - security_group_id='1', - ip_protocol='tcp', - from_port=22, - to_port=22, - remote_ip='1.2.3.4/24', + self.assertEqual(data['floating_ip'], result) + + def test_list_floating_ips(self): + data = { + 'floating_ips': [ + { + 'fixed_ip': None, + 'id': uuid.uuid4().hex, + 'instance_id': None, + 'ip': '172.24.4.17', + 'pool': f'network-{uuid.uuid4().hex}', + } + ], + } + self.compute_client.get.return_value = fakes.FakeResponse(data=data) + + result = compute.list_floating_ips(self.compute_client) + + self.compute_client.get.assert_called_once_with( + '/os-floating-ips', microversion='2.1' ) - self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret) - - def test_security_group_create_port_errors(self): - self.requests_mock.register_uri( - 'POST', - FAKE_URL + '/os-security-group-rules', - json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP}, - status_code=200, + self.assertEqual(data['floating_ips'], result) + + def test_get_floating_ip(self): + fip_id = uuid.uuid4().hex + data = { + 'floating_ip': { + 'fixed_ip': None, + 'id': fip_id, + 'instance_id': None, + 'ip': '172.24.4.17', + 'pool': f'network-{uuid.uuid4().hex}', + } + } + self.compute_client.get.side_effect = [ + fakes.FakeResponse(data=data), + ] + + result = compute.get_floating_ip(self.compute_client, fip_id) + + self.compute_client.get.assert_called_once_with( + f'/os-floating-ips/{fip_id}', microversion='2.1' ) - self.assertRaises( - compute.InvalidValue, - self.api.security_group_rule_create, - security_group_id='1', - ip_protocol='tcp', - from_port='', - to_port=22, - remote_ip='1.2.3.4/24', + self.assertEqual(data['floating_ip'], result) + + def test_delete_floating_ip(self): + fip_id = uuid.uuid4().hex + self.compute_client.delete.return_value = fakes.FakeResponse( + status_code=http.HTTPStatus.NO_CONTENT ) - self.assertRaises( - compute.InvalidValue, - self.api.security_group_rule_create, - security_group_id='1', - ip_protocol='tcp', - from_port=0, - to_port=[], - remote_ip='1.2.3.4/24', + + result = compute.delete_floating_ip(self.compute_client, fip_id) + + self.compute_client.delete.assert_called_once_with( + f'/os-floating-ips/{fip_id}', microversion='2.1' ) + self.assertIsNone(result) + + +class TestFloatingIPPool(utils.TestCase): + def setUp(self): + super().setUp() + + self.compute_client = mock.Mock(_proxy.Proxy) + + def test_list_floating_ip_pools(self): + data = { + 'floating_ip_pools': [ + { + 'name': f'pool-{uuid.uuid4().hex}', + } + ], + } + self.compute_client.get.return_value = fakes.FakeResponse(data=data) + + result = compute.list_floating_ip_pools(self.compute_client) - def test_security_group_rule_delete(self): - self.requests_mock.register_uri( - 'DELETE', - FAKE_URL + '/os-security-group-rules/1', - status_code=202, + self.compute_client.get.assert_called_once_with( + '/os-floating-ip-pools', microversion='2.1' ) - ret = self.api.security_group_rule_delete('1') - self.assertEqual(202, ret.status_code) - self.assertEqual("", ret.text) + self.assertEqual(data['floating_ip_pools'], result) diff --git a/openstackclient/tests/unit/api/test_image_v1.py b/openstackclient/tests/unit/api/test_image_v1.py index 6ce3ddeac0..4d0ac53ea9 100644 --- a/openstackclient/tests/unit/api/test_image_v1.py +++ b/openstackclient/tests/unit/api/test_image_v1.py @@ -25,9 +25,8 @@ class TestImageAPIv1(utils.TestCase): - def setUp(self): - super(TestImageAPIv1, self).setUp() + super().setUp() sess = session.Session() self.api = image_v1.APIv1(session=sess, endpoint=FAKE_URL) @@ -35,7 +34,6 @@ def setUp(self): class TestImage(TestImageAPIv1): - PUB_PROT = { 'id': '1', 'name': 'pub1', diff --git a/openstackclient/tests/unit/api/test_image_v2.py b/openstackclient/tests/unit/api/test_image_v2.py index 22490e4632..a4cb606d87 100644 --- a/openstackclient/tests/unit/api/test_image_v2.py +++ b/openstackclient/tests/unit/api/test_image_v2.py @@ -25,9 +25,8 @@ class TestImageAPIv2(utils.TestCase): - def setUp(self): - super(TestImageAPIv2, self).setUp() + super().setUp() sess = session.Session() self.api = image_v2.APIv2(session=sess, endpoint=FAKE_URL) @@ -35,7 +34,6 @@ def setUp(self): class TestImage(TestImageAPIv2): - PUB_PROT = { 'id': '1', 'name': 'pub1', diff --git a/openstackclient/tests/unit/api/test_object_store_v1.py b/openstackclient/tests/unit/api/test_object_store_v1.py index b9e0740c88..3ff22f5090 100644 --- a/openstackclient/tests/unit/api/test_object_store_v1.py +++ b/openstackclient/tests/unit/api/test_object_store_v1.py @@ -30,10 +30,18 @@ FAKE_OBJECT = 'spigot' LIST_CONTAINER_RESP = [ - {"name": "qaz", "count": 0, "bytes": 0, - "last_modified": "2020-05-16T05:52:07.377550"}, - {"name": "fred", "count": 0, "bytes": 0, - "last_modified": "2020-05-16T05:55:07.377550"}, + { + "name": "qaz", + "count": 0, + "bytes": 0, + "last_modified": "2020-05-16T05:52:07.377550", + }, + { + "name": "fred", + "count": 0, + "bytes": 0, + "last_modified": "2020-05-16T05:55:07.377550", + }, ] LIST_OBJECT_RESP = [ @@ -43,18 +51,16 @@ class TestObjectAPIv1(utils.TestCase): - def setUp(self): - super(TestObjectAPIv1, self).setUp() + super().setUp() sess = session.Session() self.api = object_store.APIv1(session=sess, endpoint=FAKE_URL) self.requests_mock = self.useFixture(fixture.Fixture()) class TestContainer(TestObjectAPIv1): - def setUp(self): - super(TestContainer, self).setUp() + super().setUp() def test_container_create(self): headers = { @@ -128,15 +134,19 @@ def test_container_list_full_listing(self): ) self.requests_mock.register_uri( 'GET', - FAKE_URL + - '?marker=%s&limit=1&format=json' % LIST_CONTAINER_RESP[0]['name'], + FAKE_URL + + '?marker={}&limit=1&format=json'.format( + LIST_CONTAINER_RESP[0]['name'] + ), json=[LIST_CONTAINER_RESP[1]], status_code=200, ) self.requests_mock.register_uri( 'GET', - FAKE_URL + - '?marker=%s&limit=1&format=json' % LIST_CONTAINER_RESP[1]['name'], + FAKE_URL + + '?marker={}&limit=1&format=json'.format( + LIST_CONTAINER_RESP[1]['name'] + ), json=[], status_code=200, ) @@ -151,7 +161,7 @@ def test_container_show(self): 'X-Container-Meta-Owner': FAKE_ACCOUNT, 'x-container-object-count': '1', 'x-container-bytes-used': '577', - 'x-storage-policy': 'o1--sr-r3' + 'x-storage-policy': 'o1--sr-r3', } resp = { 'account': FAKE_ACCOUNT, @@ -172,11 +182,10 @@ def test_container_show(self): class TestObject(TestObjectAPIv1): - def setUp(self): - super(TestObject, self).setUp() + super().setUp() - @mock.patch('openstackclient.api.object_store_v1.io.open') + @mock.patch('openstackclient.api.object_store_v1.open') def base_object_create(self, file_contents, mock_open): mock_open.read.return_value = file_contents @@ -209,7 +218,7 @@ def base_object_create(self, file_contents, mock_open): def test_object_create(self): self.base_object_create('111\n222\n333\n') - self.base_object_create(bytes([0x31, 0x00, 0x0d, 0x0a, 0x7f, 0xff])) + self.base_object_create(bytes([0x31, 0x00, 0x0D, 0x0A, 0x7F, 0xFF])) def test_object_delete(self): self.requests_mock.register_uri( @@ -274,35 +283,35 @@ def test_object_list_marker_limit_end(self): ) self.assertEqual(LIST_CONTAINER_RESP, ret) -# def test_list_objects_full_listing(self): -# sess = self.app.client_manager.session -# -# def side_effect(*args, **kwargs): -# rv = sess.get().json.return_value -# sess.get().json.return_value = [] -# sess.get().json.side_effect = None -# return rv -# -# resp = [{'name': 'is-name'}] -# sess.get().json.return_value = resp -# sess.get().json.side_effect = side_effect -# -# data = lib_object.list_objects( -# sess, -# fake_url, -# fake_container, -# full_listing=True, -# ) -# -# # Check expected values -# sess.get.assert_called_with( -# fake_url + '/' + fake_container, -# params={ -# 'format': 'json', -# 'marker': 'is-name', -# } -# ) -# self.assertEqual(resp, data) + # def test_list_objects_full_listing(self): + # sess = self.app.client_manager.session + # + # def side_effect(*args, **kwargs): + # rv = sess.get().json.return_value + # sess.get().json.return_value = [] + # sess.get().json.side_effect = None + # return rv + # + # resp = [{'name': 'is-name'}] + # sess.get().json.return_value = resp + # sess.get().json.side_effect = side_effect + # + # data = lib_object.list_objects( + # sess, + # fake_url, + # fake_container, + # full_listing=True, + # ) + # + # # Check expected values + # sess.get.assert_called_with( + # fake_url + '/' + fake_container, + # params={ + # 'format': 'json', + # 'marker': 'is-name', + # } + # ) + # self.assertEqual(resp, data) def test_object_show(self): headers = { @@ -323,8 +332,7 @@ def test_object_show(self): 'content-length': '577', 'last-modified': '20130101', 'etag': 'qaz', - 'properties': {'wife': 'Wilma', - 'Husband': 'fred'}, + 'properties': {'wife': 'Wilma', 'Husband': 'fred'}, } self.requests_mock.register_uri( 'HEAD', diff --git a/openstackclient/tests/unit/api/test_volume_v2.py b/openstackclient/tests/unit/api/test_volume_v2.py new file mode 100644 index 0000000000..046d1cb9ba --- /dev/null +++ b/openstackclient/tests/unit/api/test_volume_v2.py @@ -0,0 +1,124 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Volume v2 API Library Tests""" + +import http +from unittest import mock +import uuid + +from openstack.block_storage.v2 import _proxy +from osc_lib import exceptions as osc_lib_exceptions + +from openstackclient.api import volume_v2 as volume +from openstackclient.tests.unit import fakes +from openstackclient.tests.unit import utils + + +class TestConsistencyGroup(utils.TestCase): + def setUp(self): + super().setUp() + + self.volume_sdk_client = mock.Mock(_proxy.Proxy) + + def test_find_consistency_group_by_id(self): + cg_id = uuid.uuid4().hex + cg_name = 'name-' + uuid.uuid4().hex + data = { + 'consistencygroup': { + 'id': cg_id, + 'name': cg_name, + 'status': 'available', + 'availability_zone': 'az1', + 'created_at': '2015-09-16T09:28:52.000000', + 'description': 'description-' + uuid.uuid4().hex, + 'volume_types': ['123456'], + } + } + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(data=data), + ] + + result = volume.find_consistency_group(self.volume_sdk_client, cg_id) + + self.volume_sdk_client.get.assert_has_calls( + [ + mock.call(f'/consistencygroups/{cg_id}'), + ] + ) + self.assertEqual(data['consistencygroup'], result) + + def test_find_consistency_group_by_name(self): + cg_id = uuid.uuid4().hex + cg_name = 'name-' + uuid.uuid4().hex + data = { + 'consistencygroups': [ + { + 'id': cg_id, + 'name': cg_name, + } + ], + } + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] + + result = volume.find_consistency_group(self.volume_sdk_client, cg_name) + + self.volume_sdk_client.get.assert_has_calls( + [ + mock.call(f'/consistencygroups/{cg_name}'), + mock.call('/consistencygroups'), + ] + ) + self.assertEqual(data['consistencygroups'][0], result) + + def test_find_consistency_group_not_found(self): + data = {'consistencygroups': []} + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] + self.assertRaises( + osc_lib_exceptions.NotFound, + volume.find_consistency_group, + self.volume_sdk_client, + 'invalid-cg', + ) + + def test_find_consistency_group_by_name_duplicate(self): + cg_name = 'name-' + uuid.uuid4().hex + data = { + 'consistencygroups': [ + { + 'id': uuid.uuid4().hex, + 'name': cg_name, + }, + { + 'id': uuid.uuid4().hex, + 'name': cg_name, + }, + ], + } + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] + + self.assertRaises( + osc_lib_exceptions.NotFound, + volume.find_consistency_group, + self.volume_sdk_client, + cg_name, + ) diff --git a/openstackclient/tests/unit/api/test_volume_v3.py b/openstackclient/tests/unit/api/test_volume_v3.py new file mode 100644 index 0000000000..d70f899334 --- /dev/null +++ b/openstackclient/tests/unit/api/test_volume_v3.py @@ -0,0 +1,124 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Volume v3 API Library Tests""" + +import http +from unittest import mock +import uuid + +from openstack.block_storage.v3 import _proxy +from osc_lib import exceptions as osc_lib_exceptions + +from openstackclient.api import volume_v3 as volume +from openstackclient.tests.unit import fakes +from openstackclient.tests.unit import utils + + +class TestConsistencyGroup(utils.TestCase): + def setUp(self): + super().setUp() + + self.volume_sdk_client = mock.Mock(_proxy.Proxy) + + def test_find_consistency_group_by_id(self): + cg_id = uuid.uuid4().hex + cg_name = 'name-' + uuid.uuid4().hex + data = { + 'consistencygroup': { + 'id': cg_id, + 'name': cg_name, + 'status': 'available', + 'availability_zone': 'az1', + 'created_at': '2015-09-16T09:28:52.000000', + 'description': 'description-' + uuid.uuid4().hex, + 'volume_types': ['123456'], + } + } + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(data=data), + ] + + result = volume.find_consistency_group(self.volume_sdk_client, cg_id) + + self.volume_sdk_client.get.assert_has_calls( + [ + mock.call(f'/consistencygroups/{cg_id}'), + ] + ) + self.assertEqual(data['consistencygroup'], result) + + def test_find_consistency_group_by_name(self): + cg_id = uuid.uuid4().hex + cg_name = 'name-' + uuid.uuid4().hex + data = { + 'consistencygroups': [ + { + 'id': cg_id, + 'name': cg_name, + } + ], + } + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] + + result = volume.find_consistency_group(self.volume_sdk_client, cg_name) + + self.volume_sdk_client.get.assert_has_calls( + [ + mock.call(f'/consistencygroups/{cg_name}'), + mock.call('/consistencygroups'), + ] + ) + self.assertEqual(data['consistencygroups'][0], result) + + def test_find_consistency_group_not_found(self): + data = {'consistencygroups': []} + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] + self.assertRaises( + osc_lib_exceptions.NotFound, + volume.find_consistency_group, + self.volume_sdk_client, + 'invalid-cg', + ) + + def test_find_consistency_group_by_name_duplicate(self): + cg_name = 'name-' + uuid.uuid4().hex + data = { + 'consistencygroups': [ + { + 'id': uuid.uuid4().hex, + 'name': cg_name, + }, + { + 'id': uuid.uuid4().hex, + 'name': cg_name, + }, + ], + } + self.volume_sdk_client.get.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), + fakes.FakeResponse(data=data), + ] + + self.assertRaises( + osc_lib_exceptions.NotFound, + volume.find_consistency_group, + self.volume_sdk_client, + cg_name, + ) diff --git a/openstackclient/tests/unit/common/test_availability_zone.py b/openstackclient/tests/unit/common/test_availability_zone.py index 096038cafe..d1383b5409 100644 --- a/openstackclient/tests/unit/common/test_availability_zone.py +++ b/openstackclient/tests/unit/common/test_availability_zone.py @@ -9,30 +9,26 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# - -from unittest import mock from openstackclient.common import availability_zone from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes from openstackclient.tests.unit import utils -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes def _build_compute_az_datalist(compute_az, long_datalist=False): datalist = () if not long_datalist: datalist = ( - compute_az.zoneName, + compute_az.name, 'available', ) else: for host, services in compute_az.hosts.items(): for service, state in services.items(): datalist += ( - compute_az.zoneName, + compute_az.name, 'available', '', host, @@ -46,14 +42,17 @@ def _build_volume_az_datalist(volume_az, long_datalist=False): datalist = () if not long_datalist: datalist = ( - volume_az.zoneName, + volume_az.name, 'available', ) else: datalist = ( - volume_az.zoneName, + volume_az.name, 'available', - '', '', '', '', + '', + '', + '', + '', ) return (datalist,) @@ -70,49 +69,20 @@ def _build_network_az_datalist(network_az, long_datalist=False): network_az.name, network_az.state, network_az.resource, - '', '', '', + '', + '', + '', ) return (datalist,) -class TestAvailabilityZone(utils.TestCommand): - - def setUp(self): - super().setUp() - - compute_client = compute_fakes.FakeComputev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.compute = compute_client - - self.compute_azs_mock = compute_client.availability_zones - self.compute_azs_mock.reset_mock() - - volume_client = volume_fakes.FakeVolumeClient( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.volume = volume_client - - self.volume_azs_mock = volume_client.availability_zones - self.volume_azs_mock.reset_mock() - - network_client = network_fakes.FakeNetworkV2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.network = network_client - - network_client.availability_zones = mock.Mock() - network_client.find_extension = mock.Mock() - self.network_azs_mock = network_client.availability_zones - - -class TestAvailabilityZoneList(TestAvailabilityZone): - - compute_azs = \ - compute_fakes.FakeAvailabilityZone.create_availability_zones() +class TestAvailabilityZoneList( + network_fakes.FakeClientMixin, + volume_fakes.FakeClientMixin, + compute_fakes.FakeClientMixin, + utils.TestCommand, +): + compute_azs = compute_fakes.create_availability_zones() volume_azs = volume_fakes.create_availability_zones(count=1) network_azs = network_fakes.create_availability_zones() @@ -129,9 +99,11 @@ class TestAvailabilityZoneList(TestAvailabilityZone): def setUp(self): super().setUp() - self.compute_azs_mock.list.return_value = self.compute_azs - self.volume_azs_mock.list.return_value = self.volume_azs - self.network_azs_mock.return_value = self.network_azs + self.compute_client.availability_zones.return_value = self.compute_azs + self.volume_sdk_client.availability_zones.return_value = ( + self.volume_azs + ) + self.network_client.availability_zones.return_value = self.network_azs # Get the command object to test self.cmd = availability_zone.ListAvailabilityZone(self.app, None) @@ -146,9 +118,9 @@ def test_availability_zone_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.compute_azs_mock.list.assert_called_with() - self.volume_azs_mock.list.assert_called_with() - self.network_azs_mock.assert_called_with() + self.compute_client.availability_zones.assert_called_with(details=True) + self.volume_sdk_client.availability_zones.assert_called_with() + self.network_client.availability_zones.assert_called_with() self.assertEqual(self.short_columnslist, columns) datalist = () @@ -174,21 +146,24 @@ def test_availability_zone_list_long(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.compute_azs_mock.list.assert_called_with() - self.volume_azs_mock.list.assert_called_with() - self.network_azs_mock.assert_called_with() + self.compute_client.availability_zones.assert_called_with(details=True) + self.volume_sdk_client.availability_zones.assert_called_with() + self.network_client.availability_zones.assert_called_with() self.assertEqual(self.long_columnslist, columns) datalist = () for compute_az in self.compute_azs: - datalist += _build_compute_az_datalist(compute_az, - long_datalist=True) + datalist += _build_compute_az_datalist( + compute_az, long_datalist=True + ) for volume_az in self.volume_azs: - datalist += _build_volume_az_datalist(volume_az, - long_datalist=True) + datalist += _build_volume_az_datalist( + volume_az, long_datalist=True + ) for network_az in self.network_azs: - datalist += _build_network_az_datalist(network_az, - long_datalist=True) + datalist += _build_network_az_datalist( + network_az, long_datalist=True + ) self.assertEqual(datalist, tuple(data)) def test_availability_zone_list_compute(self): @@ -205,9 +180,9 @@ def test_availability_zone_list_compute(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.compute_azs_mock.list.assert_called_with() - self.volume_azs_mock.list.assert_not_called() - self.network_azs_mock.assert_not_called() + self.compute_client.availability_zones.assert_called_with(details=True) + self.volume_sdk_client.availability_zones.assert_not_called() + self.network_client.availability_zones.assert_not_called() self.assertEqual(self.short_columnslist, columns) datalist = () @@ -229,9 +204,9 @@ def test_availability_zone_list_volume(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.compute_azs_mock.list.assert_not_called() - self.volume_azs_mock.list.assert_called_with() - self.network_azs_mock.assert_not_called() + self.compute_client.availability_zones.assert_not_called() + self.volume_sdk_client.availability_zones.assert_called_with() + self.network_client.availability_zones.assert_not_called() self.assertEqual(self.short_columnslist, columns) datalist = () @@ -253,9 +228,9 @@ def test_availability_zone_list_network(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.compute_azs_mock.list.assert_not_called() - self.volume_azs_mock.list.assert_not_called() - self.network_azs_mock.assert_called_with() + self.compute_client.availability_zones.assert_not_called() + self.volume_sdk_client.availability_zones.assert_not_called() + self.network_client.availability_zones.assert_called_with() self.assertEqual(self.short_columnslist, columns) datalist = () diff --git a/openstackclient/tests/unit/common/test_clientmanager.py b/openstackclient/tests/unit/common/test_clientmanager.py index a83b131800..165dcc12e7 100644 --- a/openstackclient/tests/unit/common/test_clientmanager.py +++ b/openstackclient/tests/unit/common/test_clientmanager.py @@ -23,7 +23,6 @@ class TestClientManager(osc_lib_test_utils.TestClientManager): - def _clientmanager_class(self): """Allow subclasses to override the ClientManager class""" return clientmanager.ClientManager @@ -54,10 +53,12 @@ def test_client_manager_admin_token(self): def test_client_manager_network_endpoint_disabled(self): auth_args = copy.deepcopy(self.default_password_auth) - auth_args.update({ - 'user_domain_name': 'default', - 'project_domain_name': 'default', - }) + auth_args.update( + { + 'user_domain_name': 'default', + 'project_domain_name': 'default', + } + ) # v3 fake doesn't have network endpoint client_manager = self._make_clientmanager( auth_args=auth_args, diff --git a/openstackclient/tests/unit/common/test_command.py b/openstackclient/tests/unit/common/test_command.py index 4fde5301d5..1f1efceadf 100644 --- a/openstackclient/tests/unit/common/test_command.py +++ b/openstackclient/tests/unit/common/test_command.py @@ -14,26 +14,26 @@ from unittest import mock -from osc_lib.command import command from osc_lib import exceptions +from openstackclient import command from openstackclient.tests.unit import fakes as test_fakes from openstackclient.tests.unit import utils as test_utils class FakeCommand(command.Command): - def take_action(self, parsed_args): pass class TestCommand(test_utils.TestCase): - def test_command_has_logger(self): cmd = FakeCommand(mock.Mock(), mock.Mock()) self.assertTrue(hasattr(cmd, 'log')) - self.assertEqual('openstackclient.tests.unit.common.test_command.' - 'FakeCommand', cmd.log.name) + self.assertEqual( + 'openstackclient.tests.unit.common.test_command.FakeCommand', + cmd.log.name, + ) def test_validate_os_beta_command_enabled(self): cmd = FakeCommand(mock.Mock(), mock.Mock()) @@ -45,5 +45,6 @@ def test_validate_os_beta_command_enabled(self): cmd.validate_os_beta_command_enabled() cmd.app.options.os_beta_command = False - self.assertRaises(exceptions.CommandError, - cmd.validate_os_beta_command_enabled) + self.assertRaises( + exceptions.CommandError, cmd.validate_os_beta_command_enabled + ) diff --git a/openstackclient/tests/unit/common/test_configuration.py b/openstackclient/tests/unit/common/test_configuration.py index 148228ec08..6e1145442b 100644 --- a/openstackclient/tests/unit/common/test_configuration.py +++ b/openstackclient/tests/unit/common/test_configuration.py @@ -19,7 +19,6 @@ class TestConfiguration(utils.TestCommand): - columns = ( 'auth.password', 'auth.token', diff --git a/openstackclient/tests/unit/common/test_extension.py b/openstackclient/tests/unit/common/test_extension.py index bd90b32d1b..dd684312c1 100644 --- a/openstackclient/tests/unit/common/test_extension.py +++ b/openstackclient/tests/unit/common/test_extension.py @@ -9,83 +9,60 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from unittest import mock from openstackclient.common import extension from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes from openstackclient.tests.unit import utils from openstackclient.tests.unit import utils as tests_utils -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes - - -class TestExtension(utils.TestCommand): +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes - def setUp(self): - super().setUp() - identity_client = identity_fakes.FakeIdentityv2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.identity = identity_client - self.identity_extensions_mock = identity_client.extensions - self.identity_extensions_mock.reset_mock() - - sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection = sdk_connection - self.compute_extensions_mock = sdk_connection.compute.extensions - self.compute_extensions_mock.reset_mock() - - volume_client = volume_fakes.FakeVolumeClient( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.volume = volume_client - volume_client.list_extensions = mock.Mock() - self.volume_extensions_mock = volume_client.list_extensions - self.volume_extensions_mock.reset_mock() - - network_client = network_fakes.FakeNetworkV2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.network = network_client - network_client.extensions = mock.Mock() - self.network_extensions_mock = network_client.extensions - self.network_extensions_mock.reset_mock() +class TestExtension( + network_fakes.FakeClientMixin, + compute_fakes.FakeClientMixin, + volume_fakes.FakeClientMixin, + identity_fakes.FakeClientMixin, + utils.TestCommand, +): ... class TestExtensionList(TestExtension): - columns = ('Name', 'Alias', 'Description') - long_columns = ('Name', 'Alias', 'Description', 'Namespace', 'Updated', - 'Links') + long_columns = ( + 'Name', + 'Alias', + 'Description', + 'Namespace', + 'Updated At', + 'Links', + ) volume_extension = volume_fakes.create_one_extension() identity_extension = identity_fakes.FakeExtension.create_one_extension() - compute_extension = compute_fakes.FakeExtension.create_one_extension() - network_extension = network_fakes.FakeExtension.create_one_extension() + compute_extension = compute_fakes.create_one_extension() + network_extension = network_fakes.create_one_extension() def setUp(self): super().setUp() - self.identity_extensions_mock.list.return_value = [ - self.identity_extension] - self.compute_extensions_mock.return_value = [self.compute_extension] - self.volume_extensions_mock.show_all.return_value = [ - self.volume_extension] - self.network_extensions_mock.return_value = [self.network_extension] + self.identity_client.extensions.list.return_value = [ + self.identity_extension + ] + self.compute_client.extensions.return_value = [self.compute_extension] + self.volume_sdk_client.extensions.return_value = [ + self.volume_extension + ] + self.network_client.extensions.return_value = [self.network_extension] # Get the command object to test self.cmd = extension.ListExtension(self.app, None) - def _test_extension_list_helper(self, arglist, verifylist, - expected_data, long=False): + def _test_extension_list_helper( + self, arglist, verifylist, expected_data, long=False + ): parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() @@ -125,10 +102,10 @@ def test_extension_list_no_options(self): ), ) self._test_extension_list_helper(arglist, verifylist, datalist) - self.identity_extensions_mock.list.assert_called_with() - self.compute_extensions_mock.assert_called_with() - self.volume_extensions_mock.show_all.assert_called_with() - self.network_extensions_mock.assert_called_with() + self.identity_client.extensions.list.assert_called_with() + self.compute_client.extensions.assert_called_with() + self.volume_sdk_client.extensions.assert_called_with() + self.network_client.extensions.assert_called_with() def test_extension_list_long(self): arglist = [ @@ -143,7 +120,7 @@ def test_extension_list_long(self): self.identity_extension.alias, self.identity_extension.description, self.identity_extension.namespace, - self.identity_extension.updated, + '', self.identity_extension.links, ), ( @@ -151,31 +128,31 @@ def test_extension_list_long(self): self.compute_extension.alias, self.compute_extension.description, self.compute_extension.namespace, - self.compute_extension.updated, + self.compute_extension.updated_at, self.compute_extension.links, ), ( self.volume_extension.name, self.volume_extension.alias, self.volume_extension.description, - self.volume_extension.namespace, - self.volume_extension.updated, + '', + self.volume_extension.updated_at, self.volume_extension.links, ), ( self.network_extension.name, self.network_extension.alias, self.network_extension.description, - self.network_extension.namespace, - self.network_extension.updated, + '', + self.network_extension.updated_at, self.network_extension.links, ), ) self._test_extension_list_helper(arglist, verifylist, datalist, True) - self.identity_extensions_mock.list.assert_called_with() - self.compute_extensions_mock.assert_called_with() - self.volume_extensions_mock.show_all.assert_called_with() - self.network_extensions_mock.assert_called_with() + self.identity_client.extensions.list.assert_called_with() + self.compute_client.extensions.assert_called_with() + self.volume_sdk_client.extensions.assert_called_with() + self.network_client.extensions.assert_called_with() def test_extension_list_identity(self): arglist = [ @@ -184,13 +161,15 @@ def test_extension_list_identity(self): verifylist = [ ('identity', True), ] - datalist = (( - self.identity_extension.name, - self.identity_extension.alias, - self.identity_extension.description, - ), ) + datalist = ( + ( + self.identity_extension.name, + self.identity_extension.alias, + self.identity_extension.description, + ), + ) self._test_extension_list_helper(arglist, verifylist, datalist) - self.identity_extensions_mock.list.assert_called_with() + self.identity_client.extensions.list.assert_called_with() def test_extension_list_network(self): arglist = [ @@ -207,7 +186,7 @@ def test_extension_list_network(self): ), ) self._test_extension_list_helper(arglist, verifylist, datalist) - self.network_extensions_mock.assert_called_with() + self.network_client.extensions.assert_called_with() def test_extension_list_network_with_long(self): arglist = [ @@ -218,17 +197,20 @@ def test_extension_list_network_with_long(self): ('network', True), ('long', True), ] - datalist = (( - self.network_extension.name, - self.network_extension.alias, - self.network_extension.description, - self.network_extension.namespace, - self.network_extension.updated, - self.network_extension.links, - ), ) - self._test_extension_list_helper(arglist, verifylist, datalist, - long=True) - self.network_extensions_mock.assert_called_with() + datalist = ( + ( + self.network_extension.name, + self.network_extension.alias, + self.network_extension.description, + '', + self.network_extension.updated_at, + self.network_extension.links, + ), + ) + self._test_extension_list_helper( + arglist, verifylist, datalist, long=True + ) + self.network_client.extensions.assert_called_with() def test_extension_list_compute(self): arglist = [ @@ -237,13 +219,15 @@ def test_extension_list_compute(self): verifylist = [ ('compute', True), ] - datalist = (( - self.compute_extension.name, - self.compute_extension.alias, - self.compute_extension.description, - ), ) + datalist = ( + ( + self.compute_extension.name, + self.compute_extension.alias, + self.compute_extension.description, + ), + ) self._test_extension_list_helper(arglist, verifylist, datalist) - self.compute_extensions_mock.assert_called_with() + self.compute_client.extensions.assert_called_with() def test_extension_list_compute_and_network(self): arglist = [ @@ -267,8 +251,8 @@ def test_extension_list_compute_and_network(self): ), ) self._test_extension_list_helper(arglist, verifylist, datalist) - self.compute_extensions_mock.assert_called_with() - self.network_extensions_mock.assert_called_with() + self.compute_client.extensions.assert_called_with() + self.network_client.extensions.assert_called_with() def test_extension_list_volume(self): arglist = [ @@ -277,36 +261,32 @@ def test_extension_list_volume(self): verifylist = [ ('volume', True), ] - datalist = (( - self.volume_extension.name, - self.volume_extension.alias, - self.volume_extension.description, - ), ) + datalist = ( + ( + self.volume_extension.name, + self.volume_extension.alias, + self.volume_extension.description, + ), + ) self._test_extension_list_helper(arglist, verifylist, datalist) - self.volume_extensions_mock.show_all.assert_called_with() + self.volume_sdk_client.extensions.assert_called_with() class TestExtensionShow(TestExtension): - extension_details = ( - network_fakes.FakeExtension.create_one_extension() - ) + extension_details = network_fakes.create_one_extension() columns = ( 'alias', 'description', - 'links', 'name', - 'namespace', - 'updated', + 'updated_at', ) data = ( extension_details.alias, extension_details.description, - extension_details.links, extension_details.name, - extension_details.namespace, - extension_details.updated + extension_details.updated_at, ) def setUp(self): @@ -314,15 +294,21 @@ def setUp(self): self.cmd = extension.ShowExtension(self.app, None) - self.app.client_manager.network.find_extension = mock.Mock( - return_value=self.extension_details) + self.app.client_manager.network.find_extension.return_value = ( + self.extension_details + ) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -337,7 +323,8 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) self.app.client_manager.network.find_extension.assert_called_with( - self.extension_details.alias, ignore_missing=False) + self.extension_details.alias, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/common/test_limits.py b/openstackclient/tests/unit/common/test_limits.py index e3cdcf45a2..a375a2ae5a 100644 --- a/openstackclient/tests/unit/common/test_limits.py +++ b/openstackclient/tests/unit/common/test_limits.py @@ -13,32 +13,62 @@ from openstackclient.common import limits from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes class TestComputeLimits(compute_fakes.TestComputev2): - - absolute_columns = [ - 'Name', - 'Value', - ] - - rate_columns = [ - "Verb", - "URI", - "Value", - "Remain", - "Unit", - "Next Available" - ] + absolute_columns = ['Name', 'Value'] + rate_columns = ["Verb", "URI", "Value", "Remain", "Unit", "Next Available"] def setUp(self): super().setUp() self.app.client_manager.volume_endpoint_enabled = False - self.compute = self.app.client_manager.compute - self.fake_limits = compute_fakes.FakeLimits() - self.compute.limits.get.return_value = self.fake_limits + self.fake_limits = compute_fakes.create_limits() + + self.absolute_data = [ + ('floating_ips', 10), + ('floating_ips_used', 0), + ('image_meta', 128), + ('instances', 10), + ('instances_used', 0), + ('keypairs', 100), + ('max_image_meta', 128), + ('max_security_group_rules', 20), + ('max_security_groups', 10), + ('max_server_group_members', 10), + ('max_server_groups', 10), + ('max_server_meta', 128), + ('max_total_cores', 20), + ('max_total_floating_ips', 10), + ('max_total_instances', 10), + ('max_total_keypairs', 100), + ('max_total_ram_size', 51200), + ('personality', 5), + ('personality_size', 10240), + ('security_group_rules', 20), + ('security_groups', 10), + ('security_groups_used', 0), + ('server_group_members', 10), + ('server_groups', 10), + ('server_groups_used', 0), + ('server_meta', 128), + ('total_cores', 20), + ('total_cores_used', 0), + ('total_floating_ips_used', 0), + ('total_instances_used', 0), + ('total_ram', 51200), + ('total_ram_used', 0), + ('total_security_groups_used', 0), + ('total_server_groups_used', 0), + ] + self.rate_data = [ + ('POST', '*', 10, 2, 'MINUTE', '2011-12-15T22:42:45Z'), + ('PUT', '*', 10, 2, 'MINUTE', '2011-12-15T22:42:45Z'), + ('DELETE', '*', 100, 100, 'MINUTE', '2011-12-15T22:42:45Z'), + ] + + self.compute_client.get_limits.return_value = self.fake_limits def test_compute_show_absolute(self): arglist = ['--absolute'] @@ -48,12 +78,8 @@ def test_compute_show_absolute(self): columns, data = cmd.take_action(parsed_args) - ret_limits = list(data) - compute_reference_limits = self.fake_limits.absolute_limits() - self.assertEqual(self.absolute_columns, columns) - self.assertEqual(compute_reference_limits, ret_limits) - self.assertEqual(19, len(ret_limits)) + self.assertEqual(self.absolute_data, data) def test_compute_show_rate(self): arglist = ['--rate'] @@ -63,36 +89,39 @@ def test_compute_show_rate(self): columns, data = cmd.take_action(parsed_args) - ret_limits = list(data) - compute_reference_limits = self.fake_limits.rate_limits() - self.assertEqual(self.rate_columns, columns) - self.assertEqual(compute_reference_limits, ret_limits) - self.assertEqual(3, len(ret_limits)) + self.assertEqual(self.rate_data, data) class TestVolumeLimits(volume_fakes.TestVolume): - absolute_columns = [ - 'Name', - 'Value', - ] - - rate_columns = [ - "Verb", - "URI", - "Value", - "Remain", - "Unit", - "Next Available" - ] + absolute_columns = ['Name', 'Value'] + rate_columns = ["Verb", "URI", "Value", "Remain", "Unit", "Next Available"] def setUp(self): super().setUp() self.app.client_manager.compute_endpoint_enabled = False - self.volume = self.app.client_manager.volume - self.fake_limits = volume_fakes.FakeLimits() - self.volume.limits.get.return_value = self.fake_limits + self.fake_limits = volume_fakes.create_limits() + + self.absolute_data = [ + ('max_total_backup_gigabytes', 1000), + ('max_total_backups', 10), + ('max_total_snapshots', 10), + ('max_total_volume_gigabytes', 1000), + ('max_total_volumes', 10), + ('total_backup_gigabytes_used', 0), + ('total_backups_used', 0), + ('total_gigabytes_used', 35), + ('total_snapshots_used', 1), + ('total_volumes_used', 4), + ] + self.rate_data = [ + ('POST', '*', 10, 2, 'MINUTE', '2011-12-15T22:42:45Z'), + ('PUT', '*', 10, 2, 'MINUTE', '2011-12-15T22:42:45Z'), + ('DELETE', '*', 100, 100, 'MINUTE', '2011-12-15T22:42:45Z'), + ] + + self.volume_sdk_client.get_limits.return_value = self.fake_limits def test_volume_show_absolute(self): arglist = ['--absolute'] @@ -102,12 +131,8 @@ def test_volume_show_absolute(self): columns, data = cmd.take_action(parsed_args) - ret_limits = list(data) - compute_reference_limits = self.fake_limits.absolute_limits() - self.assertEqual(self.absolute_columns, columns) - self.assertEqual(compute_reference_limits, ret_limits) - self.assertEqual(10, len(ret_limits)) + self.assertEqual(self.absolute_data, data) def test_volume_show_rate(self): arglist = ['--rate'] @@ -117,9 +142,5 @@ def test_volume_show_rate(self): columns, data = cmd.take_action(parsed_args) - ret_limits = list(data) - compute_reference_limits = self.fake_limits.rate_limits() - self.assertEqual(self.rate_columns, columns) - self.assertEqual(compute_reference_limits, ret_limits) - self.assertEqual(3, len(ret_limits)) + self.assertEqual(self.rate_data, data) diff --git a/openstackclient/tests/unit/common/test_logs.py b/openstackclient/tests/unit/common/test_logs.py deleted file mode 100644 index 0e7105619d..0000000000 --- a/openstackclient/tests/unit/common/test_logs.py +++ /dev/null @@ -1,208 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -# NOTE(dtroyer): This file is deprecated in Jun 2016, remove after 4.x release -# or Jun 2017. - -import logging -from unittest import mock - -from osc_lib import logs - -from openstackclient.tests.unit import utils - - -class TestContext(utils.TestCase): - - def test_log_level_from_options(self): - opts = mock.Mock() - opts.verbose_level = 0 - self.assertEqual(logging.ERROR, logs.log_level_from_options(opts)) - opts.verbose_level = 1 - self.assertEqual(logging.WARNING, logs.log_level_from_options(opts)) - opts.verbose_level = 2 - self.assertEqual(logging.INFO, logs.log_level_from_options(opts)) - opts.verbose_level = 3 - self.assertEqual(logging.DEBUG, logs.log_level_from_options(opts)) - - def test_log_level_from_config(self): - cfg = {'verbose_level': 0} - self.assertEqual(logging.ERROR, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1} - self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 2} - self.assertEqual(logging.INFO, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 3} - self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'critical'} - self.assertEqual(logging.CRITICAL, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'error'} - self.assertEqual(logging.ERROR, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'warning'} - self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'info'} - self.assertEqual(logging.INFO, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'debug'} - self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'bogus'} - self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg)) - cfg = {'verbose_level': 1, 'log_level': 'info', 'debug': True} - self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg)) - - @mock.patch('warnings.simplefilter') - def test_set_warning_filter(self, simplefilter): - logs.set_warning_filter(logging.ERROR) - simplefilter.assert_called_with("ignore") - logs.set_warning_filter(logging.WARNING) - simplefilter.assert_called_with("ignore") - logs.set_warning_filter(logging.INFO) - simplefilter.assert_called_with("once") - - -class TestFileFormatter(utils.TestCase): - - def test_nothing(self): - formatter = logs._FileFormatter() - self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s %(message)s'), formatter.fmt) - - def test_options(self): - class Opts(object): - cloud = 'cloudy' - os_project_name = 'projecty' - username = 'usernamey' - options = Opts() - formatter = logs._FileFormatter(options=options) - self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [cloudy usernamey projecty] %(message)s'), - formatter.fmt) - - def test_config(self): - config = mock.Mock() - config.config = {'cloud': 'cloudy'} - config.auth = {'project_name': 'projecty', 'username': 'usernamey'} - formatter = logs._FileFormatter(config=config) - self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [cloudy usernamey projecty] %(message)s'), - formatter.fmt) - - -class TestLogConfigurator(utils.TestCase): - - def setUp(self): - super(TestLogConfigurator, self).setUp() - self.options = mock.Mock() - self.options.verbose_level = 1 - self.options.log_file = None - self.options.debug = False - self.root_logger = mock.Mock() - self.root_logger.setLevel = mock.Mock() - self.root_logger.addHandler = mock.Mock() - self.requests_log = mock.Mock() - self.requests_log.setLevel = mock.Mock() - self.cliff_log = mock.Mock() - self.cliff_log.setLevel = mock.Mock() - self.stevedore_log = mock.Mock() - self.stevedore_log.setLevel = mock.Mock() - self.iso8601_log = mock.Mock() - self.iso8601_log.setLevel = mock.Mock() - self.loggers = [ - self.root_logger, - self.requests_log, - self.cliff_log, - self.stevedore_log, - self.iso8601_log] - - @mock.patch('logging.StreamHandler') - @mock.patch('logging.getLogger') - @mock.patch('osc_lib.logs.set_warning_filter') - def test_init(self, warning_filter, getLogger, handle): - getLogger.side_effect = self.loggers - console_logger = mock.Mock() - console_logger.setFormatter = mock.Mock() - console_logger.setLevel = mock.Mock() - handle.return_value = console_logger - - configurator = logs.LogConfigurator(self.options) - - getLogger.assert_called_with('iso8601') # last call - warning_filter.assert_called_with(logging.WARNING) - self.root_logger.setLevel.assert_called_with(logging.DEBUG) - self.root_logger.addHandler.assert_called_with(console_logger) - self.requests_log.setLevel.assert_called_with(logging.ERROR) - self.cliff_log.setLevel.assert_called_with(logging.ERROR) - self.stevedore_log.setLevel.assert_called_with(logging.ERROR) - self.iso8601_log.setLevel.assert_called_with(logging.ERROR) - self.assertFalse(configurator.dump_trace) - - @mock.patch('logging.getLogger') - @mock.patch('osc_lib.logs.set_warning_filter') - def test_init_no_debug(self, warning_filter, getLogger): - getLogger.side_effect = self.loggers - self.options.debug = True - - configurator = logs.LogConfigurator(self.options) - - warning_filter.assert_called_with(logging.DEBUG) - self.requests_log.setLevel.assert_called_with(logging.DEBUG) - self.assertTrue(configurator.dump_trace) - - @mock.patch('logging.FileHandler') - @mock.patch('logging.getLogger') - @mock.patch('osc_lib.logs.set_warning_filter') - @mock.patch('osc_lib.logs._FileFormatter') - def test_init_log_file(self, formatter, warning_filter, getLogger, handle): - getLogger.side_effect = self.loggers - self.options.log_file = '/tmp/log_file' - file_logger = mock.Mock() - file_logger.setFormatter = mock.Mock() - file_logger.setLevel = mock.Mock() - handle.return_value = file_logger - mock_formatter = mock.Mock() - formatter.return_value = mock_formatter - - logs.LogConfigurator(self.options) - - handle.assert_called_with(filename=self.options.log_file) - self.root_logger.addHandler.assert_called_with(file_logger) - file_logger.setFormatter.assert_called_with(mock_formatter) - file_logger.setLevel.assert_called_with(logging.WARNING) - - @mock.patch('logging.FileHandler') - @mock.patch('logging.getLogger') - @mock.patch('osc_lib.logs.set_warning_filter') - @mock.patch('osc_lib.logs._FileFormatter') - def test_configure(self, formatter, warning_filter, getLogger, handle): - getLogger.side_effect = self.loggers - configurator = logs.LogConfigurator(self.options) - cloud_config = mock.Mock() - config_log = '/tmp/config_log' - cloud_config.config = { - 'log_file': config_log, - 'verbose_level': 1, - 'log_level': 'info'} - file_logger = mock.Mock() - file_logger.setFormatter = mock.Mock() - file_logger.setLevel = mock.Mock() - handle.return_value = file_logger - mock_formatter = mock.Mock() - formatter.return_value = mock_formatter - - configurator.configure(cloud_config) - - warning_filter.assert_called_with(logging.INFO) - handle.assert_called_with(filename=config_log) - self.root_logger.addHandler.assert_called_with(file_logger) - file_logger.setFormatter.assert_called_with(mock_formatter) - file_logger.setLevel.assert_called_with(logging.INFO) - self.assertFalse(configurator.dump_trace) diff --git a/openstackclient/tests/unit/common/test_module.py b/openstackclient/tests/unit/common/test_module.py index d2e8293fa3..8396203681 100644 --- a/openstackclient/tests/unit/common/test_module.py +++ b/openstackclient/tests/unit/common/test_module.py @@ -11,17 +11,25 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# """Test module module""" +import sys from unittest import mock from openstackclient.common import module as osc_module -from openstackclient.tests.unit import fakes from openstackclient.tests.unit import utils +class FakeModule: + def __init__(self, name, version): + self.name = name + self.__version__ = version + # Workaround for openstacksdk case + self.version = mock.Mock() + self.version.__version__ = version + + # NOTE(dtroyer): module_1 must match the version list filter (not --all) # currently == '*client*' module_name_1 = 'fakeclient' @@ -43,18 +51,18 @@ module_version_5 = '0.0.1' MODULES = { - module_name_1: fakes.FakeModule(module_name_1, module_version_1), - module_name_2: fakes.FakeModule(module_name_2, module_version_2), - module_name_3: fakes.FakeModule(module_name_3, module_version_3), - module_name_4: fakes.FakeModule(module_name_4, module_version_4), - module_name_5: fakes.FakeModule(module_name_5, module_version_5), + 'sys': sys, + module_name_1: FakeModule(module_name_1, module_version_1), + module_name_2: FakeModule(module_name_2, module_version_2), + module_name_3: FakeModule(module_name_3, module_version_3), + module_name_4: FakeModule(module_name_4, module_version_4), + module_name_5: FakeModule(module_name_5, module_version_5), } class TestCommandList(utils.TestCommand): - def setUp(self): - super(TestCommandList, self).setUp() + super().setUp() self.app.command_manager = mock.Mock() self.app.command_manager.get_command_groups.return_value = [ @@ -81,16 +89,14 @@ def test_command_list_no_options(self): # handling the detection rather than using the hard-code below. collist = ('Command Group', 'Commands') self.assertEqual(collist, columns) - datalist = (( - 'openstack.common', - 'limits show\nextension list' - ),) + datalist = (('openstack.common', 'limits show\nextension list'),) self.assertEqual(datalist, tuple(data)) def test_command_list_with_group_not_found(self): arglist = [ - '--group', 'not_exist', + '--group', + 'not_exist', ] verifylist = [ ('group', 'not_exist'), @@ -105,7 +111,8 @@ def test_command_list_with_group_not_found(self): def test_command_list_with_group(self): arglist = [ - '--group', 'common', + '--group', + 'common', ] verifylist = [ ('group', 'common'), @@ -116,10 +123,7 @@ def test_command_list_with_group(self): collist = ('Command Group', 'Commands') self.assertEqual(collist, columns) - datalist = (( - 'openstack.common', - 'limits show\nextension list' - ),) + datalist = (('openstack.common', 'limits show\nextension list'),) self.assertEqual(datalist, tuple(data)) @@ -130,9 +134,8 @@ def test_command_list_with_group(self): clear=True, ) class TestModuleList(utils.TestCommand): - def setUp(self): - super(TestModuleList, self).setUp() + super().setUp() # Get the command object to test self.cmd = osc_module.ListModule(self.app, None) diff --git a/openstackclient/tests/unit/common/test_parseractions.py b/openstackclient/tests/unit/common/test_parseractions.py deleted file mode 100644 index 736cd0b6d2..0000000000 --- a/openstackclient/tests/unit/common/test_parseractions.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2012-2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -# NOTE(dtroyer): This file is deprecated in Jun 2016, remove after 4.x release -# or Jun 2017. - -import argparse - -from osc_lib.cli import parseractions - -from openstackclient.tests.unit import utils - - -class TestKeyValueAction(utils.TestCase): - - def setUp(self): - super(TestKeyValueAction, self).setUp() - - self.parser = argparse.ArgumentParser() - - # Set up our typical usage - self.parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - default={'green': '20%', 'format': '#rgb'}, - help='Property to store for this volume ' - '(repeat option to set multiple properties)', - ) - - def test_good_values(self): - results = self.parser.parse_args([ - '--property', 'red=', - '--property', 'green=100%', - '--property', 'blue=50%', - ]) - - actual = getattr(results, 'property', {}) - # All should pass through unmolested - expect = {'red': '', 'green': '100%', 'blue': '50%', 'format': '#rgb'} - self.assertEqual(expect, actual) - - def test_error_values(self): - self.assertRaises( - argparse.ArgumentTypeError, - self.parser.parse_args, - [ - '--property', 'red', - ] - ) - - -class TestMultiKeyValueAction(utils.TestCase): - - def setUp(self): - super(TestMultiKeyValueAction, self).setUp() - - self.parser = argparse.ArgumentParser() - - # Set up our typical usage - self.parser.add_argument( - '--test', - metavar='req1=xxx,req2=yyy', - action=parseractions.MultiKeyValueAction, - dest='test', - default=None, - required_keys=['req1', 'req2'], - optional_keys=['opt1', 'opt2'], - help='Test' - ) - - def test_good_values(self): - results = self.parser.parse_args([ - '--test', 'req1=aaa,req2=bbb', - '--test', 'req1=,req2=', - ]) - - actual = getattr(results, 'test', []) - expect = [ - {'req1': 'aaa', 'req2': 'bbb'}, - {'req1': '', 'req2': ''}, - ] - self.assertCountEqual(expect, actual) - - def test_empty_required_optional(self): - self.parser.add_argument( - '--test-empty', - metavar='req1=xxx,req2=yyy', - action=parseractions.MultiKeyValueAction, - dest='test_empty', - default=None, - required_keys=[], - optional_keys=[], - help='Test' - ) - - results = self.parser.parse_args([ - '--test-empty', 'req1=aaa,req2=bbb', - '--test-empty', 'req1=,req2=', - ]) - - actual = getattr(results, 'test_empty', []) - expect = [ - {'req1': 'aaa', 'req2': 'bbb'}, - {'req1': '', 'req2': ''}, - ] - self.assertCountEqual(expect, actual) - - def test_error_values_with_comma(self): - self.assertRaises( - argparse.ArgumentTypeError, - self.parser.parse_args, - [ - '--test', 'mmm,nnn=zzz', - ] - ) - - def test_error_values_without_comma(self): - self.assertRaises( - argparse.ArgumentTypeError, - self.parser.parse_args, - [ - '--test', 'mmmnnn', - ] - ) - - def test_missing_key(self): - self.assertRaises( - argparse.ArgumentTypeError, - self.parser.parse_args, - [ - '--test', 'req2=ddd', - ] - ) - - def test_invalid_key(self): - self.assertRaises( - argparse.ArgumentTypeError, - self.parser.parse_args, - [ - '--test', 'req1=aaa,req2=bbb,aaa=req1', - ] - ) - - def test_required_keys_not_list(self): - self.assertRaises( - TypeError, - self.parser.add_argument, - '--test-required-dict', - metavar='req1=xxx,req2=yyy', - action=parseractions.MultiKeyValueAction, - dest='test_required_dict', - default=None, - required_keys={'aaa': 'bbb'}, - optional_keys=['opt1', 'opt2'], - help='Test' - ) - - def test_optional_keys_not_list(self): - self.assertRaises( - TypeError, - self.parser.add_argument, - '--test-optional-dict', - metavar='req1=xxx,req2=yyy', - action=parseractions.MultiKeyValueAction, - dest='test_optional_dict', - default=None, - required_keys=['req1', 'req2'], - optional_keys={'aaa': 'bbb'}, - help='Test' - ) - - -class TestNonNegativeAction(utils.TestCase): - - def setUp(self): - super(TestNonNegativeAction, self).setUp() - - self.parser = argparse.ArgumentParser() - - # Set up our typical usage - self.parser.add_argument( - '--foo', - metavar='', - type=int, - action=parseractions.NonNegativeAction, - ) - - def test_negative_values(self): - self.assertRaises( - argparse.ArgumentTypeError, - self.parser.parse_args, - "--foo -1".split() - ) - - def test_zero_values(self): - results = self.parser.parse_args( - '--foo 0'.split() - ) - - actual = getattr(results, 'foo', None) - self.assertEqual(actual, 0) - - def test_positive_values(self): - results = self.parser.parse_args( - '--foo 1'.split() - ) - - actual = getattr(results, 'foo', None) - self.assertEqual(actual, 1) diff --git a/openstackclient/tests/unit/common/test_progressbar.py b/openstackclient/tests/unit/common/test_progressbar.py index a624fc438a..47a866b3ed 100644 --- a/openstackclient/tests/unit/common/test_progressbar.py +++ b/openstackclient/tests/unit/common/test_progressbar.py @@ -19,7 +19,6 @@ class TestProgressBarWrapper(utils.TestCase): - def test_iter_file_display_progress_bar(self): size = 98304 file_obj = io.StringIO('X' * size) @@ -31,10 +30,7 @@ def test_iter_file_display_progress_bar(self): chunk = file_obj.read(chunksize) while chunk: chunk = file_obj.read(chunksize) - self.assertEqual( - '[%s>] 100%%\n' % ('=' * 29), - output.getvalue() - ) + self.assertEqual('[%s>] 100%%\n' % ('=' * 29), output.getvalue()) finally: sys.stdout = saved_stdout diff --git a/openstackclient/tests/unit/common/test_project_cleanup.py b/openstackclient/tests/unit/common/test_project_cleanup.py index 50c434b980..42020646cf 100644 --- a/openstackclient/tests/unit/common/test_project_cleanup.py +++ b/openstackclient/tests/unit/common/test_project_cleanup.py @@ -10,76 +10,84 @@ # License for the specific language governing permissions and limitations # under the License. -from io import StringIO from unittest import mock from openstackclient.common import project_cleanup from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit import utils as test_utils -class TestProjectCleanupBase(tests_utils.TestCommand): - - def setUp(self): - super(TestProjectCleanupBase, self).setUp() - - self.app.client_manager.sdk_connection = mock.Mock() - - -class TestProjectCleanup(TestProjectCleanupBase): - +class TestProjectCleanup(test_utils.TestCommand): project = identity_fakes.FakeProject.create_one_project() def setUp(self): - super(TestProjectCleanup, self).setUp() + super().setUp() self.cmd = project_cleanup.ProjectCleanup(self.app, None) self.project_cleanup_mock = mock.Mock() - self.sdk_connect_as_project_mock = \ - mock.Mock(return_value=self.app.client_manager.sdk_connection) - self.app.client_manager.sdk_connection.project_cleanup = \ + self.sdk_connect_as_project_mock = mock.Mock( + return_value=self.app.client_manager.sdk_connection + ) + self.app.client_manager.sdk_connection.project_cleanup = ( self.project_cleanup_mock - self.app.client_manager.sdk_connection.identity.find_project = \ + ) + self.app.client_manager.sdk_connection.identity.find_project = ( mock.Mock(return_value=self.project) - self.app.client_manager.sdk_connection.connect_as_project = \ + ) + self.app.client_manager.sdk_connection.connect_as_project = ( self.sdk_connect_as_project_mock + ) def test_project_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_project_cleanup_with_filters(self): arglist = [ - '--project', self.project.id, - '--created-before', '2200-01-01', - '--updated-before', '2200-01-02' + '--project', + self.project.id, + '--created-before', + '2200-01-01', + '--updated-before', + '2200-01-02', ] verifylist = [ ('dry_run', False), ('auth_project', False), ('project', self.project.id), ('created_before', '2200-01-01'), - ('updated_before', '2200-01-02') + ('updated_before', '2200-01-02'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = None - with mock.patch('sys.stdin', StringIO('y')): + with mock.patch('getpass.getpass', return_value='y'): result = self.cmd.take_action(parsed_args) - self.sdk_connect_as_project_mock.assert_called_with( - self.project) - filters = { - 'created_at': '2200-01-01', - 'updated_at': '2200-01-02' - } + self.sdk_connect_as_project_mock.assert_called_with(self.project) + filters = {'created_at': '2200-01-01', 'updated_at': '2200-01-02'} calls = [ - mock.call(dry_run=True, status_queue=mock.ANY, filters=filters), - mock.call(dry_run=False, status_queue=mock.ANY, filters=filters) + mock.call( + dry_run=True, + status_queue=mock.ANY, + filters=filters, + skip_resources=None, + ), + mock.call( + dry_run=False, + status_queue=mock.ANY, + filters=filters, + skip_resources=None, + ), ] self.project_cleanup_mock.assert_has_calls(calls) @@ -87,7 +95,8 @@ def test_project_cleanup_with_filters(self): def test_project_cleanup_with_auto_approve(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, '--auto-approve', ] verifylist = [ @@ -101,11 +110,20 @@ def test_project_cleanup_with_auto_approve(self): result = self.cmd.take_action(parsed_args) - self.sdk_connect_as_project_mock.assert_called_with( - self.project) + self.sdk_connect_as_project_mock.assert_called_with(self.project) calls = [ - mock.call(dry_run=True, status_queue=mock.ANY, filters={}), - mock.call(dry_run=False, status_queue=mock.ANY, filters={}) + mock.call( + dry_run=True, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), + mock.call( + dry_run=False, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), ] self.project_cleanup_mock.assert_has_calls(calls) @@ -113,7 +131,8 @@ def test_project_cleanup_with_auto_approve(self): def test_project_cleanup_with_project(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('dry_run', False), @@ -123,14 +142,23 @@ def test_project_cleanup_with_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = None - with mock.patch('sys.stdin', StringIO('y')): + with mock.patch('getpass.getpass', return_value='y'): result = self.cmd.take_action(parsed_args) - self.sdk_connect_as_project_mock.assert_called_with( - self.project) + self.sdk_connect_as_project_mock.assert_called_with(self.project) calls = [ - mock.call(dry_run=True, status_queue=mock.ANY, filters={}), - mock.call(dry_run=False, status_queue=mock.ANY, filters={}) + mock.call( + dry_run=True, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), + mock.call( + dry_run=False, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), ] self.project_cleanup_mock.assert_has_calls(calls) @@ -138,7 +166,8 @@ def test_project_cleanup_with_project(self): def test_project_cleanup_with_project_abort(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('dry_run', False), @@ -148,13 +177,17 @@ def test_project_cleanup_with_project_abort(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = None - with mock.patch('sys.stdin', StringIO('n')): + with mock.patch('getpass.getpass', return_value='y'): result = self.cmd.take_action(parsed_args) - self.sdk_connect_as_project_mock.assert_called_with( - self.project) + self.sdk_connect_as_project_mock.assert_called_with(self.project) calls = [ - mock.call(dry_run=True, status_queue=mock.ANY, filters={}), + mock.call( + dry_run=True, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), ] self.project_cleanup_mock.assert_has_calls(calls) @@ -163,7 +196,8 @@ def test_project_cleanup_with_project_abort(self): def test_project_cleanup_with_dry_run(self): arglist = [ '--dry-run', - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('dry_run', True), @@ -175,10 +209,13 @@ def test_project_cleanup_with_dry_run(self): result = self.cmd.take_action(parsed_args) - self.sdk_connect_as_project_mock.assert_called_with( - self.project) + self.sdk_connect_as_project_mock.assert_called_with(self.project) self.project_cleanup_mock.assert_called_once_with( - dry_run=True, status_queue=mock.ANY, filters={}) + dry_run=True, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ) self.assertIsNone(result) @@ -196,13 +233,58 @@ def test_project_cleanup_with_auth_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = None - with mock.patch('sys.stdin', StringIO('y')): + with mock.patch('getpass.getpass', return_value='y'): result = self.cmd.take_action(parsed_args) self.sdk_connect_as_project_mock.assert_not_called() calls = [ - mock.call(dry_run=True, status_queue=mock.ANY, filters={}), - mock.call(dry_run=False, status_queue=mock.ANY, filters={}) + mock.call( + dry_run=True, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), + mock.call( + dry_run=False, + status_queue=mock.ANY, + filters={}, + skip_resources=None, + ), + ] + self.project_cleanup_mock.assert_has_calls(calls) + + self.assertIsNone(result) + + def test_project_cleanup_with_skip_resource(self): + skip_resource = "block_storage.backup" + arglist = [ + '--project', + self.project.id, + '--skip-resource', + skip_resource, + ] + verifylist = [('skip_resource', [skip_resource])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = None + + with mock.patch('getpass.getpass', return_value='y'): + result = self.cmd.take_action(parsed_args) + + self.sdk_connect_as_project_mock.assert_called_with(self.project) + + calls = [ + mock.call( + dry_run=True, + status_queue=mock.ANY, + filters={}, + skip_resources=[skip_resource], + ), + mock.call( + dry_run=False, + status_queue=mock.ANY, + filters={}, + skip_resources=[skip_resource], + ), ] self.project_cleanup_mock.assert_has_calls(calls) diff --git a/openstackclient/tests/unit/common/test_project_purge.py b/openstackclient/tests/unit/common/test_project_purge.py deleted file mode 100644 index 26333d7067..0000000000 --- a/openstackclient/tests/unit/common/test_project_purge.py +++ /dev/null @@ -1,320 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from osc_lib import exceptions - -from openstackclient.common import project_purge -from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit import fakes -from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -from openstackclient.tests.unit.image.v2 import fakes as image_fakes -from openstackclient.tests.unit import utils as tests_utils -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes - - -class TestProjectPurgeInit(tests_utils.TestCommand): - - def setUp(self): - super().setUp() - compute_client = compute_fakes.FakeComputev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.compute = compute_client - self.servers_mock = compute_client.servers - self.servers_mock.reset_mock() - - volume_client = volume_fakes.FakeVolumeClient( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.volume = volume_client - self.volumes_mock = volume_client.volumes - self.volumes_mock.reset_mock() - self.snapshots_mock = volume_client.volume_snapshots - self.snapshots_mock.reset_mock() - self.backups_mock = volume_client.backups - self.backups_mock.reset_mock() - - identity_client = identity_fakes.FakeIdentityv3Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.identity = identity_client - self.domains_mock = identity_client.domains - self.domains_mock.reset_mock() - self.projects_mock = identity_client.projects - self.projects_mock.reset_mock() - - image_client = image_fakes.FakeImagev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.image = image_client - self.images_mock = image_client.images - self.images_mock.reset_mock() - - -class TestProjectPurge(TestProjectPurgeInit): - - project = identity_fakes.FakeProject.create_one_project() - server = compute_fakes.FakeServer.create_one_server() - image = image_fakes.create_one_image() - volume = volume_fakes.create_one_volume() - backup = volume_fakes.create_one_backup() - snapshot = volume_fakes.create_one_snapshot() - - def setUp(self): - super().setUp() - self.projects_mock.get.return_value = self.project - self.projects_mock.delete.return_value = None - self.images_mock.list.return_value = [self.image] - self.images_mock.delete.return_value = None - self.servers_mock.list.return_value = [self.server] - self.servers_mock.delete.return_value = None - self.volumes_mock.list.return_value = [self.volume] - self.volumes_mock.delete.return_value = None - self.volumes_mock.force_delete.return_value = None - self.snapshots_mock.list.return_value = [self.snapshot] - self.snapshots_mock.delete.return_value = None - self.backups_mock.list.return_value = [self.backup] - self.backups_mock.delete.return_value = None - - self.cmd = project_purge.ProjectPurge(self.app, None) - - def test_project_no_options(self): - arglist = [] - verifylist = [] - - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - def test_project_purge_with_project(self): - arglist = [ - '--project', self.project.id, - ] - verifylist = [ - ('dry_run', False), - ('keep_project', False), - ('auth_project', False), - ('project', self.project.id), - ('project_domain', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_called_once_with(self.project.id) - self.projects_mock.delete.assert_called_once_with(self.project.id) - self.servers_mock.list.assert_called_once_with( - search_opts={'tenant_id': self.project.id, 'all_tenants': True}) - kwargs = {'filters': {'owner': self.project.id}} - self.images_mock.list.assert_called_once_with(**kwargs) - volume_search_opts = {'project_id': self.project.id, - 'all_tenants': True} - self.volumes_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.snapshots_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.backups_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.servers_mock.delete.assert_called_once_with(self.server.id) - self.images_mock.delete.assert_called_once_with(self.image.id) - self.volumes_mock.force_delete.assert_called_once_with(self.volume.id) - self.snapshots_mock.delete.assert_called_once_with(self.snapshot.id) - self.backups_mock.delete.assert_called_once_with(self.backup.id) - self.assertIsNone(result) - - def test_project_purge_with_dry_run(self): - arglist = [ - '--dry-run', - '--project', self.project.id, - ] - verifylist = [ - ('dry_run', True), - ('keep_project', False), - ('auth_project', False), - ('project', self.project.id), - ('project_domain', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_called_once_with(self.project.id) - self.projects_mock.delete.assert_not_called() - self.servers_mock.list.assert_called_once_with( - search_opts={'tenant_id': self.project.id, 'all_tenants': True}) - kwargs = {'filters': {'owner': self.project.id}} - self.images_mock.list.assert_called_once_with(**kwargs) - volume_search_opts = {'project_id': self.project.id, - 'all_tenants': True} - self.volumes_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.snapshots_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.backups_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.servers_mock.delete.assert_not_called() - self.images_mock.delete.assert_not_called() - self.volumes_mock.force_delete.assert_not_called() - self.snapshots_mock.delete.assert_not_called() - self.backups_mock.delete.assert_not_called() - self.assertIsNone(result) - - def test_project_purge_with_keep_project(self): - arglist = [ - '--keep-project', - '--project', self.project.id, - ] - verifylist = [ - ('dry_run', False), - ('keep_project', True), - ('auth_project', False), - ('project', self.project.id), - ('project_domain', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_called_once_with(self.project.id) - self.projects_mock.delete.assert_not_called() - self.servers_mock.list.assert_called_once_with( - search_opts={'tenant_id': self.project.id, 'all_tenants': True}) - kwargs = {'filters': {'owner': self.project.id}} - self.images_mock.list.assert_called_once_with(**kwargs) - volume_search_opts = {'project_id': self.project.id, - 'all_tenants': True} - self.volumes_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.snapshots_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.backups_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.servers_mock.delete.assert_called_once_with(self.server.id) - self.images_mock.delete.assert_called_once_with(self.image.id) - self.volumes_mock.force_delete.assert_called_once_with(self.volume.id) - self.snapshots_mock.delete.assert_called_once_with(self.snapshot.id) - self.backups_mock.delete.assert_called_once_with(self.backup.id) - self.assertIsNone(result) - - def test_project_purge_with_auth_project(self): - self.app.client_manager.auth_ref = mock.Mock() - self.app.client_manager.auth_ref.project_id = self.project.id - arglist = [ - '--auth-project', - ] - verifylist = [ - ('dry_run', False), - ('keep_project', False), - ('auth_project', True), - ('project', None), - ('project_domain', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_not_called() - self.projects_mock.delete.assert_called_once_with(self.project.id) - self.servers_mock.list.assert_called_once_with( - search_opts={'tenant_id': self.project.id, 'all_tenants': True}) - kwargs = {'filters': {'owner': self.project.id}} - self.images_mock.list.assert_called_once_with(**kwargs) - volume_search_opts = {'project_id': self.project.id, - 'all_tenants': True} - self.volumes_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.snapshots_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.backups_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.servers_mock.delete.assert_called_once_with(self.server.id) - self.images_mock.delete.assert_called_once_with(self.image.id) - self.volumes_mock.force_delete.assert_called_once_with(self.volume.id) - self.snapshots_mock.delete.assert_called_once_with(self.snapshot.id) - self.backups_mock.delete.assert_called_once_with(self.backup.id) - self.assertIsNone(result) - - @mock.patch.object(project_purge.LOG, 'error') - def test_project_purge_with_exception(self, mock_error): - self.servers_mock.delete.side_effect = exceptions.CommandError() - arglist = [ - '--project', self.project.id, - ] - verifylist = [ - ('dry_run', False), - ('keep_project', False), - ('auth_project', False), - ('project', self.project.id), - ('project_domain', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_called_once_with(self.project.id) - self.projects_mock.delete.assert_called_once_with(self.project.id) - self.servers_mock.list.assert_called_once_with( - search_opts={'tenant_id': self.project.id, 'all_tenants': True}) - kwargs = {'filters': {'owner': self.project.id}} - self.images_mock.list.assert_called_once_with(**kwargs) - volume_search_opts = {'project_id': self.project.id, - 'all_tenants': True} - self.volumes_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.snapshots_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.backups_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.servers_mock.delete.assert_called_once_with(self.server.id) - self.images_mock.delete.assert_called_once_with(self.image.id) - self.volumes_mock.force_delete.assert_called_once_with(self.volume.id) - self.snapshots_mock.delete.assert_called_once_with(self.snapshot.id) - self.backups_mock.delete.assert_called_once_with(self.backup.id) - mock_error.assert_called_with("1 of 1 servers failed to delete.") - self.assertIsNone(result) - - def test_project_purge_with_force_delete_backup(self): - self.backups_mock.delete.side_effect = [exceptions.CommandError, None] - arglist = [ - '--project', self.project.id, - ] - verifylist = [ - ('dry_run', False), - ('keep_project', False), - ('auth_project', False), - ('project', self.project.id), - ('project_domain', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_called_once_with(self.project.id) - self.projects_mock.delete.assert_called_once_with(self.project.id) - self.servers_mock.list.assert_called_once_with( - search_opts={'tenant_id': self.project.id, 'all_tenants': True}) - kwargs = {'filters': {'owner': self.project.id}} - self.images_mock.list.assert_called_once_with(**kwargs) - volume_search_opts = {'project_id': self.project.id, - 'all_tenants': True} - self.volumes_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.snapshots_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.backups_mock.list.assert_called_once_with( - search_opts=volume_search_opts) - self.servers_mock.delete.assert_called_once_with(self.server.id) - self.images_mock.delete.assert_called_once_with(self.image.id) - self.volumes_mock.force_delete.assert_called_once_with(self.volume.id) - self.snapshots_mock.delete.assert_called_once_with(self.snapshot.id) - self.assertEqual(2, self.backups_mock.delete.call_count) - self.backups_mock.delete.assert_called_with(self.backup.id, force=True) - self.assertIsNone(result) diff --git a/openstackclient/tests/unit/common/test_quota.py b/openstackclient/tests/unit/common/test_quota.py index 2470a96f7a..7bfb2e7f2f 100644 --- a/openstackclient/tests/unit/common/test_quota.py +++ b/openstackclient/tests/unit/common/test_quota.py @@ -13,64 +13,36 @@ import copy from unittest import mock -from osc_lib import exceptions +from openstack.block_storage.v3 import quota_set as _volume_quota_set +from openstack.compute.v2 import quota_set as _compute_quota_set +from openstack.identity.v3 import project as _project +from openstack.network.v2 import quota as _network_quota_set +from openstack.test import fakes as sdk_fakes +from openstack import exceptions as sdk_exceptions from openstackclient.common import quota from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit import fakes -from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes -from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3 +from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes +from openstackclient.tests.unit import utils +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes -class FakeQuotaResource(fakes.FakeResource): - - _keys = {'property': 'value'} - - def set_keys(self, args): - self._keys.update(args) - - def unset_keys(self, keys): - for key in keys: - self._keys.pop(key, None) - - def get_keys(self): - return self._keys - - -class TestQuota(compute_fakes.TestComputev2): - +class TestQuota( + identity_fakes.FakeClientMixin, + compute_fakes.FakeClientMixin, + network_fakes.FakeClientMixin, + volume_fakes.FakeClientMixin, + utils.TestCommand, +): def setUp(self): - super(TestQuota, self).setUp() - - # Set up common projects - self.projects = identity_fakes_v3.FakeProject.create_projects(count=2) - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - self.projects_mock.get.return_value = self.projects[0] - - self.compute_quotas_mock = self.app.client_manager.compute.quotas - self.compute_quotas_mock.reset_mock() - self.compute_quotas_class_mock = \ - self.app.client_manager.compute.quota_classes - self.compute_quotas_class_mock.reset_mock() - - self.volume_quotas_mock = self.app.client_manager.volume.quotas - self.volume_quotas_mock.reset_mock() - self.volume_quotas_class_mock = \ - self.app.client_manager.volume.quota_classes - self.volume_quotas_class_mock.reset_mock() - - self.app.client_manager.network = mock.Mock() - self.network_mock = self.app.client_manager.network + super().setUp() + self.projects = list( + sdk_fakes.generate_fake_resources(_project.Project, count=2) + ) self.app.client_manager.auth_ref = mock.Mock() - self.app.client_manager.auth_ref.service_catalog = mock.Mock() - self.service_catalog_mock = \ - self.app.client_manager.auth_ref.service_catalog - self.service_catalog_mock.reset_mock() - self.app.client_manager.auth_ref.project_id = identity_fakes.project_id + self.app.client_manager.auth_ref.project_id = self.projects[1].id class TestQuotaList(TestQuota): @@ -79,7 +51,6 @@ class TestQuotaList(TestQuota): compute_column_header = ( 'Project ID', 'Cores', - 'Fixed IPs', 'Injected Files', 'Injected File Content Bytes', 'Injected File Path Bytes', @@ -101,7 +72,7 @@ class TestQuotaList(TestQuota): 'Security Groups', 'Security Group Rules', 'Subnets', - 'Subnet Pools' + 'Subnet Pools', ) volume_column_header = ( @@ -115,29 +86,25 @@ class TestQuotaList(TestQuota): ) def setUp(self): - super(TestQuotaList, self).setUp() + super().setUp() - # Work with multiple projects in this class - self.projects_mock.get.side_effect = self.projects - self.projects_mock.list.return_value = self.projects + self.identity_sdk_client.get_project.side_effect = self.projects[0] + self.identity_sdk_client.projects.return_value = self.projects self.compute_quotas = [ - compute_fakes.FakeQuota.create_one_comp_quota(), - compute_fakes.FakeQuota.create_one_comp_quota(), + sdk_fakes.generate_fake_resource(_compute_quota_set.QuotaSet), + sdk_fakes.generate_fake_resource(_compute_quota_set.QuotaSet), ] - self.compute_default_quotas = [ - compute_fakes.FakeQuota.create_one_default_comp_quota(), - compute_fakes.FakeQuota.create_one_default_comp_quota(), - ] - self.compute = self.app.client_manager.compute - self.compute.quotas.defaults = mock.Mock( - side_effect=self.compute_default_quotas, + self.default_compute_quotas = sdk_fakes.generate_fake_resource( + _compute_quota_set.QuotaSet + ) + # the defaults are global hence use of return_value here + self.compute_client.get_quota_set_defaults.return_value = ( + self.default_compute_quotas ) - self.compute_reference_data = ( self.projects[0].id, self.compute_quotas[0].cores, - self.compute_quotas[0].fixed_ips, self.compute_quotas[0].injected_files, self.compute_quotas[0].injected_file_content_bytes, self.compute_quotas[0].injected_file_path_bytes, @@ -150,18 +117,16 @@ def setUp(self): ) self.network_quotas = [ - network_fakes.FakeQuota.create_one_net_quota(), - network_fakes.FakeQuota.create_one_net_quota(), - ] - self.network_default_quotas = [ - network_fakes.FakeQuota.create_one_default_net_quota(), - network_fakes.FakeQuota.create_one_default_net_quota(), + sdk_fakes.generate_fake_resource(_network_quota_set.Quota), + sdk_fakes.generate_fake_resource(_network_quota_set.Quota), ] - self.network = self.app.client_manager.network - self.network.get_quota_default = mock.Mock( - side_effect=self.network_default_quotas, + self.default_network_quotas = sdk_fakes.generate_fake_resource( + _network_quota_set.QuotaDefault + ) + # the defaults are global hence use of return_value here + self.network_client.get_quota_default.return_value = ( + self.default_network_quotas ) - self.network_reference_data = ( self.projects[0].id, self.network_quotas[0].floating_ips, @@ -176,18 +141,16 @@ def setUp(self): ) self.volume_quotas = [ - volume_fakes.create_one_vol_quota(), - volume_fakes.create_one_vol_quota(), - ] - self.volume_default_quotas = [ - volume_fakes.create_one_default_vol_quota(), - volume_fakes.create_one_default_vol_quota(), + sdk_fakes.generate_fake_resource(_volume_quota_set.QuotaSet), + sdk_fakes.generate_fake_resource(_volume_quota_set.QuotaSet), ] - self.volume = self.app.client_manager.volume - self.volume.quotas.defaults = mock.Mock( - side_effect=self.volume_default_quotas, + self.default_volume_quotas = sdk_fakes.generate_fake_resource( + _volume_quota_set.QuotaSet + ) + # the defaults are global hence use of return_value here + self.volume_sdk_client.get_quota_set_defaults.return_value = ( + self.default_volume_quotas ) - self.volume_reference_data = ( self.projects[0].id, self.volume_quotas[0].backups, @@ -200,120 +163,9 @@ def setUp(self): self.cmd = quota.ListQuota(self.app, None) - @staticmethod - def _get_detailed_reference_data(quota): - reference_data = [] - for name, values in quota.to_dict().items(): - if type(values) is dict: - if 'used' in values: - # For network quota it's "used" key instead of "in_use" - in_use = values['used'] - else: - in_use = values['in_use'] - resource_values = [ - in_use, - values['reserved'], - values['limit']] - reference_data.append(tuple([name] + resource_values)) - return reference_data - - def test_quota_list_details_compute(self): - detailed_quota = ( - compute_fakes.FakeQuota.create_one_comp_detailed_quota()) - - detailed_column_header = ( - 'Resource', - 'In Use', - 'Reserved', - 'Limit', - ) - detailed_reference_data = ( - self._get_detailed_reference_data(detailed_quota)) - - self.compute.quotas.get = mock.Mock(return_value=detailed_quota) - - arglist = [ - '--detail', '--compute', - ] - verifylist = [ - ('detail', True), - ('compute', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(detailed_column_header, columns) - self.assertEqual( - sorted(detailed_reference_data), sorted(ret_quotas)) - - def test_quota_list_details_network(self): - detailed_quota = ( - network_fakes.FakeQuota.create_one_net_detailed_quota()) - - detailed_column_header = ( - 'Resource', - 'In Use', - 'Reserved', - 'Limit', - ) - detailed_reference_data = ( - self._get_detailed_reference_data(detailed_quota)) - - self.network.get_quota = mock.Mock(return_value=detailed_quota) - - arglist = [ - '--detail', '--network', - ] - verifylist = [ - ('detail', True), - ('network', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(detailed_column_header, columns) - self.assertEqual( - sorted(detailed_reference_data), sorted(ret_quotas)) - - def test_quota_list_details_volume(self): - detailed_quota = volume_fakes.create_one_detailed_quota() - - detailed_column_header = ( - 'Resource', - 'In Use', - 'Reserved', - 'Limit', - ) - detailed_reference_data = ( - self._get_detailed_reference_data(detailed_quota)) - - self.volume.quotas.get = mock.Mock(return_value=detailed_quota) - - arglist = [ - '--detail', - '--volume', - ] - verifylist = [ - ('detail', True), - ('volume', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(detailed_column_header, columns) - self.assertEqual(sorted(detailed_reference_data), sorted(ret_quotas)) - def test_quota_list_compute(self): # Two projects with non-default quotas - self.compute.quotas.get = mock.Mock( - side_effect=self.compute_quotas, - ) + self.compute_client.get_quota_set.side_effect = self.compute_quotas arglist = [ '--compute', @@ -332,12 +184,10 @@ def test_quota_list_compute(self): def test_quota_list_compute_default(self): # One of the projects is at defaults - self.compute.quotas.get = mock.Mock( - side_effect=[ - self.compute_quotas[0], - compute_fakes.FakeQuota.create_one_default_comp_quota(), - ], - ) + self.compute_client.get_quota_set.side_effect = [ + self.compute_quotas[0], + self.default_compute_quotas, + ] arglist = [ '--compute', @@ -354,14 +204,12 @@ def test_quota_list_compute_default(self): self.assertEqual(self.compute_reference_data, ret_quotas[0]) self.assertEqual(1, len(ret_quotas)) - def test_quota_list_compute_no_project_not_found(self): + def test_quota_list_compute_project_not_found(self): # Make one of the projects disappear - self.compute.quotas.get = mock.Mock( - side_effect=[ - self.compute_quotas[0], - exceptions.NotFound("NotFound"), - ], - ) + self.compute_client.get_quota_set.side_effect = [ + self.compute_quotas[0], + sdk_exceptions.NotFoundException("NotFound"), + ] arglist = [ '--compute', @@ -378,14 +226,12 @@ def test_quota_list_compute_no_project_not_found(self): self.assertEqual(self.compute_reference_data, ret_quotas[0]) self.assertEqual(1, len(ret_quotas)) - def test_quota_list_compute_no_project_4xx(self): - # Make one of the projects disappear - self.compute.quotas.get = mock.Mock( - side_effect=[ - self.compute_quotas[0], - exceptions.BadRequest("Bad request"), - ], - ) + def test_quota_list_compute_project_inaccessible(self): + # Make one of the projects inaccessible + self.compute_client.get_quota_set.side_effect = [ + self.compute_quotas[0], + sdk_exceptions.ForbiddenException("Forbidden"), + ] arglist = [ '--compute', @@ -402,13 +248,10 @@ def test_quota_list_compute_no_project_4xx(self): self.assertEqual(self.compute_reference_data, ret_quotas[0]) self.assertEqual(1, len(ret_quotas)) - def test_quota_list_compute_no_project_5xx(self): - # Make one of the projects disappear - self.compute.quotas.get = mock.Mock( - side_effect=[ - self.compute_quotas[0], - exceptions.HTTPNotImplemented("Not implemented??"), - ], + def test_quota_list_compute_server_error(self): + # Make the server "break" + self.compute_client.get_quota_set.side_effect = ( + sdk_exceptions.HttpException("Not implemented?") ) arglist = [ @@ -420,39 +263,14 @@ def test_quota_list_compute_no_project_5xx(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.HTTPNotImplemented, + sdk_exceptions.HttpException, self.cmd.take_action, parsed_args, ) - def test_quota_list_compute_by_project(self): - # Two projects with non-default quotas - self.compute.quotas.get = mock.Mock( - side_effect=self.compute_quotas, - ) - - arglist = [ - '--compute', - '--project', self.projects[0].name, - ] - verifylist = [ - ('compute', True), - ('project', self.projects[0].name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(self.compute_column_header, columns) - self.assertEqual(self.compute_reference_data, ret_quotas[0]) - self.assertEqual(1, len(ret_quotas)) - def test_quota_list_network(self): # Two projects with non-default quotas - self.network.get_quota = mock.Mock( - side_effect=self.network_quotas, - ) + self.network_client.get_quota.side_effect = self.network_quotas arglist = [ '--network', @@ -471,12 +289,10 @@ def test_quota_list_network(self): def test_quota_list_network_default(self): # Two projects with non-default quotas - self.network.get_quota = mock.Mock( - side_effect=[ - self.network_quotas[0], - network_fakes.FakeQuota.create_one_default_net_quota(), - ], - ) + self.network_client.get_quota.side_effect = [ + self.network_quotas[0], + self.default_network_quotas, + ] arglist = [ '--network', @@ -495,41 +311,16 @@ def test_quota_list_network_default(self): def test_quota_list_network_no_project(self): # Two projects with non-default quotas - self.network.get_quota = mock.Mock( - side_effect=[ - self.network_quotas[0], - exceptions.NotFound("NotFound"), - ], - ) - - arglist = [ - '--network', + self.network_client.get_quota.side_effect = [ + self.network_quotas[0], + sdk_exceptions.NotFoundException("NotFound"), ] - verifylist = [ - ('network', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(self.network_column_header, columns) - self.assertEqual(self.network_reference_data, ret_quotas[0]) - self.assertEqual(1, len(ret_quotas)) - - def test_quota_list_network_by_project(self): - # Two projects with non-default quotas - self.network.get_quota = mock.Mock( - side_effect=self.network_quotas, - ) arglist = [ '--network', - '--project', self.projects[0].name, ] verifylist = [ ('network', True), - ('project', self.projects[0].name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -542,9 +333,7 @@ def test_quota_list_network_by_project(self): def test_quota_list_volume(self): # Two projects with non-default quotas - self.volume.quotas.get = mock.Mock( - side_effect=self.volume_quotas, - ) + self.volume_sdk_client.get_quota_set.side_effect = self.volume_quotas arglist = [ '--volume', @@ -563,36 +352,10 @@ def test_quota_list_volume(self): def test_quota_list_volume_default(self): # Two projects with non-default quotas - self.volume.quotas.get = mock.Mock( - side_effect=[ - self.volume_quotas[0], - volume_fakes.create_one_default_vol_quota(), - ], - ) - - arglist = [ - '--volume', - ] - verifylist = [ - ('volume', True), + self.volume_sdk_client.get_quota_set.side_effect = [ + self.volume_quotas[0], + self.default_volume_quotas, ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(self.volume_column_header, columns) - self.assertEqual(self.volume_reference_data, ret_quotas[0]) - self.assertEqual(1, len(ret_quotas)) - - def test_quota_list_volume_no_project(self): - # Two projects with non-default quotas - self.volume.quotas.get = mock.Mock( - side_effect=[ - self.volume_quotas[0], - volume_fakes.create_one_default_vol_quota(), - ], - ) arglist = [ '--volume', @@ -609,95 +372,81 @@ def test_quota_list_volume_no_project(self): self.assertEqual(self.volume_reference_data, ret_quotas[0]) self.assertEqual(1, len(ret_quotas)) - def test_quota_list_volume_by_project(self): - # Two projects with non-default quotas - self.volume.quotas.get = mock.Mock( - side_effect=self.volume_quotas, - ) - - arglist = [ - '--volume', - '--project', self.projects[0].name, - ] - verifylist = [ - ('volume', True), - ('project', self.projects[0].name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - ret_quotas = list(data) - - self.assertEqual(self.volume_column_header, columns) - self.assertEqual(self.volume_reference_data, ret_quotas[0]) - self.assertEqual(1, len(ret_quotas)) - class TestQuotaSet(TestQuota): - def setUp(self): - super(TestQuotaSet, self).setUp() - - self.compute_quotas_mock.update.return_value = FakeQuotaResource( - None, - copy.deepcopy(compute_fakes.QUOTA), - loaded=True, - ) - self.compute_quotas_class_mock.update.return_value = FakeQuotaResource( - None, - copy.deepcopy(compute_fakes.QUOTA), - loaded=True, - ) - - self.volume_quotas_mock.update.return_value = FakeQuotaResource( - None, - copy.deepcopy(compute_fakes.QUOTA), - loaded=True, - ) - self.volume_quotas_class_mock.update.return_value = FakeQuotaResource( - None, - copy.deepcopy(compute_fakes.QUOTA), - loaded=True, - ) + super().setUp() - self.network_mock.update_quota = mock.Mock() + self.identity_sdk_client.find_project.return_value = self.projects[0] self.cmd = quota.SetQuota(self.app, None) def test_quota_set(self): + floating_ip_num = 100 + fix_ip_num = 100 + injected_file_num = 100 + injected_file_size_num = 10240 + injected_path_size_num = 255 + key_pair_num = 100 + core_num = 20 + ram_num = 51200 + instance_num = 10 + property_num = 128 + secgroup_rule_num = 20 + secgroup_num = 10 + servgroup_num = 10 + servgroup_members_num = 10 + arglist = [ - '--floating-ips', str(compute_fakes.floating_ip_num), - '--fixed-ips', str(compute_fakes.fix_ip_num), - '--injected-files', str(compute_fakes.injected_file_num), - '--injected-file-size', str(compute_fakes.injected_file_size_num), - '--injected-path-size', str(compute_fakes.injected_path_size_num), - '--key-pairs', str(compute_fakes.key_pair_num), - '--cores', str(compute_fakes.core_num), - '--ram', str(compute_fakes.ram_num), - '--instances', str(compute_fakes.instance_num), - '--properties', str(compute_fakes.property_num), - '--secgroup-rules', str(compute_fakes.secgroup_rule_num), - '--secgroups', str(compute_fakes.secgroup_num), - '--server-groups', str(compute_fakes.servgroup_num), - '--server-group-members', str(compute_fakes.servgroup_members_num), + '--floating-ips', + str(floating_ip_num), + '--fixed-ips', + str(fix_ip_num), + '--injected-files', + str(injected_file_num), + '--injected-file-size', + str(injected_file_size_num), + '--injected-path-size', + str(injected_path_size_num), + '--key-pairs', + str(key_pair_num), + '--cores', + str(core_num), + '--ram', + str(ram_num), + '--instances', + str(instance_num), + '--properties', + str(property_num), + '--secgroup-rules', + str(secgroup_rule_num), + '--secgroups', + str(secgroup_num), + '--server-groups', + str(servgroup_num), + '--server-group-members', + str(servgroup_members_num), self.projects[0].name, ] verifylist = [ - ('floating_ips', compute_fakes.floating_ip_num), - ('fixed_ips', compute_fakes.fix_ip_num), - ('injected_files', compute_fakes.injected_file_num), - ('injected_file_content_bytes', - compute_fakes.injected_file_size_num), - ('injected_file_path_bytes', compute_fakes.injected_path_size_num), - ('key_pairs', compute_fakes.key_pair_num), - ('cores', compute_fakes.core_num), - ('ram', compute_fakes.ram_num), - ('instances', compute_fakes.instance_num), - ('metadata_items', compute_fakes.property_num), - ('security_group_rules', compute_fakes.secgroup_rule_num), - ('security_groups', compute_fakes.secgroup_num), - ('server_groups', compute_fakes.servgroup_num), - ('server_group_members', compute_fakes.servgroup_members_num), + ('floating_ips', floating_ip_num), + ('fixed_ips', fix_ip_num), + ('injected_files', injected_file_num), + ( + 'injected_file_content_bytes', + injected_file_size_num, + ), + ('injected_file_path_bytes', injected_path_size_num), + ('key_pairs', key_pair_num), + ('cores', core_num), + ('ram', ram_num), + ('instances', instance_num), + ('metadata_items', property_num), + ('security_group_rules', secgroup_rule_num), + ('security_groups', secgroup_num), + ('server_groups', servgroup_num), + ('server_group_members', servgroup_members_num), + ('force', False), ('project', self.projects[0].name), ] self.app.client_manager.network_endpoint_enabled = False @@ -706,48 +455,60 @@ def test_quota_set(self): result = self.cmd.take_action(parsed_args) kwargs = { - 'floating_ips': compute_fakes.floating_ip_num, - 'fixed_ips': compute_fakes.fix_ip_num, - 'injected_files': compute_fakes.injected_file_num, - 'injected_file_content_bytes': - compute_fakes.injected_file_size_num, - 'injected_file_path_bytes': compute_fakes.injected_path_size_num, - 'key_pairs': compute_fakes.key_pair_num, - 'cores': compute_fakes.core_num, - 'ram': compute_fakes.ram_num, - 'instances': compute_fakes.instance_num, - 'metadata_items': compute_fakes.property_num, - 'security_group_rules': compute_fakes.secgroup_rule_num, - 'security_groups': compute_fakes.secgroup_num, - 'server_groups': compute_fakes.servgroup_num, - 'server_group_members': compute_fakes.servgroup_members_num, + 'floating_ips': floating_ip_num, + 'fixed_ips': fix_ip_num, + 'injected_files': injected_file_num, + 'injected_file_content_bytes': injected_file_size_num, # noqa: E501 + 'injected_file_path_bytes': injected_path_size_num, + 'key_pairs': key_pair_num, + 'cores': core_num, + 'ram': ram_num, + 'instances': instance_num, + 'metadata_items': property_num, + 'security_group_rules': secgroup_rule_num, + 'security_groups': secgroup_num, + 'server_groups': servgroup_num, + 'server_group_members': servgroup_members_num, } - self.compute_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs + self.compute_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs ) self.assertIsNone(result) def test_quota_set_volume(self): + gigabytes = 1000 + volumes = 11 + snapshots = 10 + backups = 10 + backup_gigabytes = 1000 + per_volume_gigabytes = -1 + arglist = [ - '--gigabytes', str(volume_fakes.QUOTA['gigabytes']), - '--snapshots', str(volume_fakes.QUOTA['snapshots']), - '--volumes', str(volume_fakes.QUOTA['volumes']), - '--backups', str(volume_fakes.QUOTA['backups']), - '--backup-gigabytes', str(volume_fakes.QUOTA['backup_gigabytes']), + '--gigabytes', + str(gigabytes), + '--snapshots', + str(snapshots), + '--volumes', + str(volumes), + '--backups', + str(backups), + '--backup-gigabytes', + str(backup_gigabytes), '--per-volume-gigabytes', - str(volume_fakes.QUOTA['per_volume_gigabytes']), + str(per_volume_gigabytes), self.projects[0].name, ] verifylist = [ - ('gigabytes', volume_fakes.QUOTA['gigabytes']), - ('snapshots', volume_fakes.QUOTA['snapshots']), - ('volumes', volume_fakes.QUOTA['volumes']), - ('backups', volume_fakes.QUOTA['backups']), - ('backup_gigabytes', volume_fakes.QUOTA['backup_gigabytes']), - ('per_volume_gigabytes', - volume_fakes.QUOTA['per_volume_gigabytes']), + ('gigabytes', gigabytes), + ('snapshots', snapshots), + ('volumes', volumes), + ('backups', backups), + ('backup_gigabytes', backup_gigabytes), + ( + 'per_volume_gigabytes', + per_volume_gigabytes, + ), ('project', self.projects[0].name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -755,41 +516,55 @@ def test_quota_set_volume(self): result = self.cmd.take_action(parsed_args) kwargs = { - 'gigabytes': volume_fakes.QUOTA['gigabytes'], - 'snapshots': volume_fakes.QUOTA['snapshots'], - 'volumes': volume_fakes.QUOTA['volumes'], - 'backups': volume_fakes.QUOTA['backups'], - 'backup_gigabytes': volume_fakes.QUOTA['backup_gigabytes'], - 'per_volume_gigabytes': volume_fakes.QUOTA['per_volume_gigabytes'] + 'gigabytes': gigabytes, + 'snapshots': snapshots, + 'volumes': volumes, + 'backups': backups, + 'backup_gigabytes': backup_gigabytes, + 'per_volume_gigabytes': per_volume_gigabytes, } - self.volume_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs + self.volume_sdk_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs ) self.assertIsNone(result) def test_quota_set_volume_with_volume_type(self): + gigabytes = 1000 + volumes = 11 + snapshots = 10 + backups = 10 + backup_gigabytes = 1000 + per_volume_gigabytes = -1 + arglist = [ - '--gigabytes', str(volume_fakes.QUOTA['gigabytes']), - '--snapshots', str(volume_fakes.QUOTA['snapshots']), - '--volumes', str(volume_fakes.QUOTA['volumes']), - '--backups', str(volume_fakes.QUOTA['backups']), - '--backup-gigabytes', str(volume_fakes.QUOTA['backup_gigabytes']), + '--gigabytes', + str(gigabytes), + '--snapshots', + str(snapshots), + '--volumes', + str(volumes), + '--backups', + str(backups), + '--backup-gigabytes', + str(backup_gigabytes), '--per-volume-gigabytes', - str(volume_fakes.QUOTA['per_volume_gigabytes']), - '--volume-type', 'volume_type_backend', + str(per_volume_gigabytes), + '--volume-type', + 'volume_type_backend', self.projects[0].name, ] verifylist = [ - ('gigabytes', volume_fakes.QUOTA['gigabytes']), - ('snapshots', volume_fakes.QUOTA['snapshots']), - ('volumes', volume_fakes.QUOTA['volumes']), - ('backups', volume_fakes.QUOTA['backups']), - ('backup_gigabytes', volume_fakes.QUOTA['backup_gigabytes']), - ('per_volume_gigabytes', - volume_fakes.QUOTA['per_volume_gigabytes']), + ('gigabytes', gigabytes), + ('snapshots', snapshots), + ('volumes', volumes), + ('backups', backups), + ('backup_gigabytes', backup_gigabytes), + ( + 'per_volume_gigabytes', + per_volume_gigabytes, + ), ('volume_type', 'volume_type_backend'), ('project', self.projects[0].name), ] @@ -798,103 +573,153 @@ def test_quota_set_volume_with_volume_type(self): result = self.cmd.take_action(parsed_args) kwargs = { - 'gigabytes_volume_type_backend': volume_fakes.QUOTA['gigabytes'], - 'snapshots_volume_type_backend': volume_fakes.QUOTA['snapshots'], - 'volumes_volume_type_backend': volume_fakes.QUOTA['volumes'], - 'backups': volume_fakes.QUOTA['backups'], - 'backup_gigabytes': volume_fakes.QUOTA['backup_gigabytes'], - 'per_volume_gigabytes': volume_fakes.QUOTA['per_volume_gigabytes'] + 'gigabytes_volume_type_backend': gigabytes, + 'snapshots_volume_type_backend': snapshots, + 'volumes_volume_type_backend': volumes, + 'backups': backups, + 'backup_gigabytes': backup_gigabytes, + 'per_volume_gigabytes': per_volume_gigabytes, } - self.volume_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs + self.volume_sdk_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs ) self.assertIsNone(result) def test_quota_set_network(self): + subnet = 10 + network = 10 + floatingip = 50 + subnetpool = -1 + security_group_rule = 100 + security_group = 10 + router = 10 + rbac_policy = -1 + port = 50 + arglist = [ - '--subnets', str(network_fakes.QUOTA['subnet']), - '--networks', str(network_fakes.QUOTA['network']), - '--floating-ips', str(network_fakes.QUOTA['floatingip']), - '--subnetpools', str(network_fakes.QUOTA['subnetpool']), + '--subnets', + str(subnet), + '--networks', + str(network), + '--floating-ips', + str(floatingip), + '--subnetpools', + str(subnetpool), '--secgroup-rules', - str(network_fakes.QUOTA['security_group_rule']), - '--secgroups', str(network_fakes.QUOTA['security_group']), - '--routers', str(network_fakes.QUOTA['router']), - '--rbac-policies', str(network_fakes.QUOTA['rbac_policy']), - '--ports', str(network_fakes.QUOTA['port']), + str(security_group_rule), + '--secgroups', + str(security_group), + '--routers', + str(router), + '--rbac-policies', + str(rbac_policy), + '--ports', + str(port), self.projects[0].name, ] verifylist = [ - ('subnet', network_fakes.QUOTA['subnet']), - ('network', network_fakes.QUOTA['network']), - ('floatingip', network_fakes.QUOTA['floatingip']), - ('subnetpool', network_fakes.QUOTA['subnetpool']), - ('security_group_rule', - network_fakes.QUOTA['security_group_rule']), - ('security_group', network_fakes.QUOTA['security_group']), - ('router', network_fakes.QUOTA['router']), - ('rbac_policy', network_fakes.QUOTA['rbac_policy']), - ('port', network_fakes.QUOTA['port']), + ('subnet', subnet), + ('network', network), + ('floatingip', floatingip), + ('subnetpool', subnetpool), + ( + 'security_group_rule', + security_group_rule, + ), + ('security_group', security_group), + ('router', router), + ('rbac_policy', rbac_policy), + ('port', port), + ('force', False), ('project', self.projects[0].name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) kwargs = { - 'subnet': network_fakes.QUOTA['subnet'], - 'network': network_fakes.QUOTA['network'], - 'floatingip': network_fakes.QUOTA['floatingip'], - 'subnetpool': network_fakes.QUOTA['subnetpool'], - 'security_group_rule': - network_fakes.QUOTA['security_group_rule'], - 'security_group': network_fakes.QUOTA['security_group'], - 'router': network_fakes.QUOTA['router'], - 'rbac_policy': network_fakes.QUOTA['rbac_policy'], - 'port': network_fakes.QUOTA['port'], + 'check_limit': True, + 'subnet': subnet, + 'network': network, + 'floatingip': floatingip, + 'subnetpool': subnetpool, + 'security_group_rule': security_group_rule, + 'security_group': security_group, + 'router': router, + 'rbac_policy': rbac_policy, + 'port': port, } - self.network_mock.update_quota.assert_called_once_with( - self.projects[0].id, - **kwargs + self.network_client.update_quota.assert_called_once_with( + self.projects[0].id, **kwargs ) self.assertIsNone(result) def test_quota_set_with_class(self): + floating_ip_num = 100 + fix_ip_num = 100 + injected_file_num = 100 + injected_file_size_num = 10240 + injected_path_size_num = 255 + key_pair_num = 100 + core_num = 20 + ram_num = 51200 + instance_num = 10 + property_num = 128 + servgroup_num = 10 + servgroup_members_num = 10 + volumes = 11 + network = 10 + arglist = [ - '--injected-files', str(compute_fakes.injected_file_num), - '--injected-file-size', str(compute_fakes.injected_file_size_num), - '--injected-path-size', str(compute_fakes.injected_path_size_num), - '--key-pairs', str(compute_fakes.key_pair_num), - '--cores', str(compute_fakes.core_num), - '--ram', str(compute_fakes.ram_num), - '--instances', str(compute_fakes.instance_num), - '--properties', str(compute_fakes.property_num), - '--server-groups', str(compute_fakes.servgroup_num), - '--server-group-members', str(compute_fakes.servgroup_members_num), - '--gigabytes', str(compute_fakes.floating_ip_num), - '--snapshots', str(compute_fakes.fix_ip_num), - '--volumes', str(volume_fakes.QUOTA['volumes']), - '--network', str(network_fakes.QUOTA['network']), + '--injected-files', + str(injected_file_num), + '--injected-file-size', + str(injected_file_size_num), + '--injected-path-size', + str(injected_path_size_num), + '--key-pairs', + str(key_pair_num), + '--cores', + str(core_num), + '--ram', + str(ram_num), + '--instances', + str(instance_num), + '--properties', + str(property_num), + '--server-groups', + str(servgroup_num), + '--server-group-members', + str(servgroup_members_num), + '--gigabytes', + str(floating_ip_num), + '--snapshots', + str(fix_ip_num), + '--volumes', + str(volumes), + '--network', + str(network), '--class', self.projects[0].name, ] verifylist = [ - ('injected_files', compute_fakes.injected_file_num), - ('injected_file_content_bytes', - compute_fakes.injected_file_size_num), - ('injected_file_path_bytes', compute_fakes.injected_path_size_num), - ('key_pairs', compute_fakes.key_pair_num), - ('cores', compute_fakes.core_num), - ('ram', compute_fakes.ram_num), - ('instances', compute_fakes.instance_num), - ('metadata_items', compute_fakes.property_num), - ('server_groups', compute_fakes.servgroup_num), - ('server_group_members', compute_fakes.servgroup_members_num), - ('gigabytes', compute_fakes.floating_ip_num), - ('snapshots', compute_fakes.fix_ip_num), - ('volumes', volume_fakes.QUOTA['volumes']), - ('network', network_fakes.QUOTA['network']), + ('injected_files', injected_file_num), + ( + 'injected_file_content_bytes', + injected_file_size_num, + ), + ('injected_file_path_bytes', injected_path_size_num), + ('key_pairs', key_pair_num), + ('cores', core_num), + ('ram', ram_num), + ('instances', instance_num), + ('metadata_items', property_num), + ('server_groups', servgroup_num), + ('server_group_members', servgroup_members_num), + ('gigabytes', floating_ip_num), + ('snapshots', fix_ip_num), + ('volumes', volumes), + ('network', network), ('quota_class', True), ('project', self.projects[0].name), ] @@ -903,51 +728,157 @@ def test_quota_set_with_class(self): result = self.cmd.take_action(parsed_args) kwargs_compute = { - 'injected_files': compute_fakes.injected_file_num, - 'injected_file_content_bytes': - compute_fakes.injected_file_size_num, - 'injected_file_path_bytes': compute_fakes.injected_path_size_num, - 'key_pairs': compute_fakes.key_pair_num, - 'cores': compute_fakes.core_num, - 'ram': compute_fakes.ram_num, - 'instances': compute_fakes.instance_num, - 'metadata_items': compute_fakes.property_num, - 'server_groups': compute_fakes.servgroup_num, - 'server_group_members': compute_fakes.servgroup_members_num, + 'injected_files': injected_file_num, + 'injected_file_content_bytes': injected_file_size_num, # noqa: E501 + 'injected_file_path_bytes': injected_path_size_num, + 'key_pairs': key_pair_num, + 'cores': core_num, + 'ram': ram_num, + 'instances': instance_num, + 'metadata_items': property_num, + 'server_groups': servgroup_num, + 'server_group_members': servgroup_members_num, } kwargs_volume = { - 'gigabytes': compute_fakes.floating_ip_num, - 'snapshots': compute_fakes.fix_ip_num, - 'volumes': volume_fakes.QUOTA['volumes'], + 'gigabytes': floating_ip_num, + 'snapshots': fix_ip_num, + 'volumes': volumes, } - self.compute_quotas_class_mock.update.assert_called_with( - self.projects[0].name, - **kwargs_compute + self.compute_client.update_quota_class_set.assert_called_with( + self.projects[0].name, **kwargs_compute ) - self.volume_quotas_class_mock.update.assert_called_with( - self.projects[0].name, - **kwargs_volume + self.volume_sdk_client.update_quota_class_set.assert_called_with( + self.projects[0].name, **kwargs_volume ) - self.assertNotCalled(self.network_mock.update_quota) + self.assertNotCalled(self.network_client.update_quota) + self.assertIsNone(result) + + def test_quota_set_default(self): + floating_ip_num = 100 + fix_ip_num = 100 + injected_file_num = 100 + injected_file_size_num = 10240 + injected_path_size_num = 255 + key_pair_num = 100 + core_num = 20 + ram_num = 51200 + instance_num = 10 + property_num = 128 + servgroup_num = 10 + servgroup_members_num = 10 + volumes = 11 + network = 10 + + arglist = [ + '--injected-files', + str(injected_file_num), + '--injected-file-size', + str(injected_file_size_num), + '--injected-path-size', + str(injected_path_size_num), + '--key-pairs', + str(key_pair_num), + '--cores', + str(core_num), + '--ram', + str(ram_num), + '--instances', + str(instance_num), + '--properties', + str(property_num), + '--server-groups', + str(servgroup_num), + '--server-group-members', + str(servgroup_members_num), + '--gigabytes', + str(floating_ip_num), + '--snapshots', + str(fix_ip_num), + '--volumes', + str(volumes), + '--network', + str(network), + '--default', + ] + verifylist = [ + ('injected_files', injected_file_num), + ( + 'injected_file_content_bytes', + injected_file_size_num, + ), + ('injected_file_path_bytes', injected_path_size_num), + ('key_pairs', key_pair_num), + ('cores', core_num), + ('ram', ram_num), + ('instances', instance_num), + ('metadata_items', property_num), + ('server_groups', servgroup_num), + ('server_group_members', servgroup_members_num), + ('gigabytes', floating_ip_num), + ('snapshots', fix_ip_num), + ('volumes', volumes), + ('network', network), + ('default', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + kwargs_compute = { + 'injected_files': injected_file_num, + 'injected_file_content_bytes': injected_file_size_num, # noqa: E501 + 'injected_file_path_bytes': injected_path_size_num, + 'key_pairs': key_pair_num, + 'cores': core_num, + 'ram': ram_num, + 'instances': instance_num, + 'metadata_items': property_num, + 'server_groups': servgroup_num, + 'server_group_members': servgroup_members_num, + } + kwargs_volume = { + 'gigabytes': floating_ip_num, + 'snapshots': fix_ip_num, + 'volumes': volumes, + } + + self.compute_client.update_quota_class_set.assert_called_with( + 'default', **kwargs_compute + ) + self.volume_sdk_client.update_quota_class_set.assert_called_with( + 'default', **kwargs_volume + ) + self.assertNotCalled(self.network_client.update_quota) self.assertIsNone(result) def test_quota_set_with_force(self): + core_num = 20 + ram_num = 51200 + instance_num = 10 + volumes = 11 + subnet = 10 + arglist = [ - '--cores', str(compute_fakes.core_num), - '--ram', str(compute_fakes.ram_num), - '--instances', str(compute_fakes.instance_num), - '--volumes', str(volume_fakes.QUOTA['volumes']), - '--subnets', str(network_fakes.QUOTA['subnet']), + '--cores', + str(core_num), + '--ram', + str(ram_num), + '--instances', + str(instance_num), + '--volumes', + str(volumes), + '--subnets', + str(subnet), '--force', self.projects[0].name, ] verifylist = [ - ('cores', compute_fakes.core_num), - ('ram', compute_fakes.ram_num), - ('instances', compute_fakes.instance_num), - ('volumes', volume_fakes.QUOTA['volumes']), - ('subnet', network_fakes.QUOTA['subnet']), + ('cores', core_num), + ('ram', ram_num), + ('instances', instance_num), + ('volumes', volumes), + ('subnet', subnet), ('force', True), ('project', self.projects[0].name), ] @@ -957,44 +888,44 @@ def test_quota_set_with_force(self): result = self.cmd.take_action(parsed_args) kwargs_compute = { - 'cores': compute_fakes.core_num, - 'ram': compute_fakes.ram_num, - 'instances': compute_fakes.instance_num, + 'cores': core_num, + 'ram': ram_num, + 'instances': instance_num, 'force': True, } kwargs_volume = { - 'volumes': volume_fakes.QUOTA['volumes'], + 'volumes': volumes, } kwargs_network = { - 'subnet': network_fakes.QUOTA['subnet'], + 'subnet': subnet, 'force': True, } - self.compute_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs_compute + self.compute_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs_compute ) - self.volume_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs_volume + self.volume_sdk_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs_volume ) - self.network_mock.update_quota.assert_called_once_with( - self.projects[0].id, - **kwargs_network + self.network_client.update_quota.assert_called_once_with( + self.projects[0].id, **kwargs_network ) self.assertIsNone(result) def test_quota_set_with_no_force(self): arglist = [ - '--subnets', str(network_fakes.QUOTA['subnet']), - '--volumes', str(volume_fakes.QUOTA['volumes']), - '--cores', str(compute_fakes.core_num), + '--subnets', + str(10), + '--volumes', + str(30), + '--cores', + str(20), '--no-force', self.projects[0].name, ] verifylist = [ - ('subnet', network_fakes.QUOTA['subnet']), - ('volumes', volume_fakes.QUOTA['volumes']), - ('cores', compute_fakes.core_num), + ('subnet', 10), + ('volumes', 30), + ('cores', 20), ('force', False), ('project', self.projects[0].name), ] @@ -1003,79 +934,84 @@ def test_quota_set_with_no_force(self): result = self.cmd.take_action(parsed_args) kwargs_compute = { - 'cores': compute_fakes.core_num, - 'force': False, + 'cores': 20, } kwargs_volume = { - 'volumes': volume_fakes.QUOTA['volumes'], + 'volumes': 30, } kwargs_network = { - 'subnet': network_fakes.QUOTA['subnet'], + 'subnet': 10, 'check_limit': True, } - self.compute_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs_compute + self.compute_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs_compute ) - self.volume_quotas_mock.update.assert_called_once_with( - self.projects[0].id, - **kwargs_volume + self.volume_sdk_client.update_quota_set.assert_called_once_with( + self.projects[0].id, **kwargs_volume ) - self.network_mock.update_quota.assert_called_once_with( - self.projects[0].id, - **kwargs_network + self.network_client.update_quota.assert_called_once_with( + self.projects[0].id, **kwargs_network ) self.assertIsNone(result) class TestQuotaShow(TestQuota): + _network_quota_details = { + 'floating_ips': {'limit': 0, 'reserved': 0, 'used': 0}, + 'health_monitors': {'limit': 0, 'reserved': 0, 'used': 0}, + 'l7_policies': {'limit': 0, 'reserved': 0, 'used': 0}, + 'listeners': {'limit': 0, 'reserved': 0, 'used': 0}, + 'load_balancers': {'limit': 0, 'reserved': 0, 'used': 0}, + 'networks': {'limit': 0, 'reserved': 0, 'used': 0}, + 'pools': {'limit': 0, 'reserved': 0, 'used': 0}, + 'ports': {'limit': 0, 'reserved': 0, 'used': 0}, + 'rbac_policies': {'limit': 0, 'reserved': 0, 'used': 0}, + 'routers': {'limit': 0, 'reserved': 0, 'used': 0}, + 'security_group_rules': {'limit': 0, 'reserved': 0, 'used': 0}, + 'security_groups': {'limit': 0, 'reserved': 0, 'used': 0}, + 'subnet_pools': {'limit': 0, 'reserved': 0, 'used': 0}, + 'subnets': {'limit': 0, 'reserved': 0, 'used': 0}, + } def setUp(self): super().setUp() - self.compute_quota = compute_fakes.FakeQuota.create_one_comp_quota() - self.compute_quotas_mock.get.return_value = self.compute_quota - self.compute_default_quota = \ - compute_fakes.FakeQuota.create_one_default_comp_quota() - self.compute_quotas_mock.defaults.return_value = \ - self.compute_default_quota - self.compute_quotas_class_mock.get.return_value = FakeQuotaResource( - None, - copy.deepcopy(compute_fakes.QUOTA), - loaded=True, - ) - - self.volume_quota = volume_fakes.create_one_vol_quota() - self.volume_quotas_mock.get.return_value = self.volume_quota - self.volume_default_quota = volume_fakes.create_one_default_vol_quota() - self.volume_quotas_mock.defaults.return_value = \ - self.volume_default_quota - self.volume_quotas_class_mock.get.return_value = FakeQuotaResource( - None, - copy.deepcopy(volume_fakes.QUOTA), - loaded=True, - ) - - fake_network_endpoint = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ENDPOINT), - loaded=True, - ) - - self.service_catalog_mock.get_endpoints.return_value = { - 'network': fake_network_endpoint - } + self.identity_sdk_client.find_project.return_value = self.projects[0] + + self.compute_client.get_quota_set.return_value = ( + sdk_fakes.generate_fake_resource(_compute_quota_set.QuotaSet) + ) + self.default_compute_quotas = sdk_fakes.generate_fake_resource( + _compute_quota_set.QuotaSet + ) + self.compute_client.get_quota_set_defaults.return_value = ( + self.default_compute_quotas + ) - self.app.client_manager.network = network_fakes.FakeNetworkV2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, + self.volume_sdk_client.get_quota_set.return_value = ( + sdk_fakes.generate_fake_resource(_volume_quota_set.QuotaSet) + ) + self.default_volume_quotas = sdk_fakes.generate_fake_resource( + _volume_quota_set.QuotaSet + ) + self.volume_sdk_client.get_quota_set_defaults.return_value = ( + self.default_volume_quotas ) - self.network = self.app.client_manager.network - self.network.get_quota = mock.Mock( - return_value=network_fakes.QUOTA, + + def get_network_quota_mock(*args, **kwargs): + if kwargs.get("details"): + return sdk_fakes.generate_fake_resource( + _network_quota_set.QuotaDetails, + **self._network_quota_details, + ) + return sdk_fakes.generate_fake_resource(_network_quota_set.Quota) + + self.network_client.get_quota.side_effect = get_network_quota_mock + self.default_network_quotas = sdk_fakes.generate_fake_resource( + _network_quota_set.QuotaDefault ) - self.network.get_quota_default = mock.Mock( - return_value=network_fakes.QUOTA, + self.network_client.get_quota_default.return_value = ( + self.default_network_quotas ) self.cmd = quota.ShowQuota(self.app, None) @@ -1092,19 +1028,39 @@ def test_quota_show(self): self.cmd.take_action(parsed_args) - self.compute_quotas_mock.get.assert_called_once_with( + self.compute_client.get_quota_set.assert_called_once_with( self.projects[0].id, - detail=False, + usage=False, ) - self.volume_quotas_mock.get.assert_called_once_with( + self.volume_sdk_client.get_quota_set.assert_called_once_with( self.projects[0].id, usage=False, ) - self.network.get_quota.assert_called_once_with( + self.network_client.get_quota.assert_called_once_with( self.projects[0].id, details=False, ) - self.assertNotCalled(self.network.get_quota_default) + self.assertNotCalled(self.network_client.get_quota_default) + + def test_quota_show__missing_services(self): + self.app.client_manager.compute_endpoint_enabled = False + self.app.client_manager.volume_endpoint_enabled = False + self.app.client_manager.network_endpoint_enabled = False + + arglist = [ + self.projects[0].name, + ] + verifylist = [ + ('service', 'all'), + ('project', self.projects[0].name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + + self.compute_client.get_quota_set.assert_not_called() + self.volume_sdk_client.get_quota_set.assert_not_called() + self.network_client.get_quota.assert_not_called() def test_quota_show__with_compute(self): arglist = [ @@ -1119,12 +1075,12 @@ def test_quota_show__with_compute(self): self.cmd.take_action(parsed_args) - self.compute_quotas_mock.get.assert_called_once_with( + self.compute_client.get_quota_set.assert_called_once_with( self.projects[0].id, - detail=False, + usage=False, ) - self.volume_quotas_mock.get.assert_not_called() - self.network.get_quota.assert_not_called() + self.volume_sdk_client.get_quota_set.assert_not_called() + self.network_client.get_quota.assert_not_called() def test_quota_show__with_volume(self): arglist = [ @@ -1139,12 +1095,12 @@ def test_quota_show__with_volume(self): self.cmd.take_action(parsed_args) - self.compute_quotas_mock.get.assert_not_called() - self.volume_quotas_mock.get.assert_called_once_with( + self.compute_client.get_quota_set.assert_not_called() + self.volume_sdk_client.get_quota_set.assert_called_once_with( self.projects[0].id, usage=False, ) - self.network.get_quota.assert_not_called() + self.network_client.get_quota.assert_not_called() def test_quota_show__with_network(self): arglist = [ @@ -1159,67 +1115,97 @@ def test_quota_show__with_network(self): self.cmd.take_action(parsed_args) - self.compute_quotas_mock.get.assert_not_called() - self.volume_quotas_mock.get.assert_not_called() - self.network.get_quota.assert_called_once_with( + self.compute_client.get_quota_set.assert_not_called() + self.volume_sdk_client.get_quota_set.assert_not_called() + self.network_client.get_quota.assert_called_once_with( self.projects[0].id, details=False, ) - self.assertNotCalled(self.network.get_quota_default) + self.assertNotCalled(self.network_client.get_quota_default) + + def test_quota_show__with_network_and_usage(self): + # ensure we do not interfere with other tests + self._network_quota_details = copy.deepcopy( + self._network_quota_details + ) + # set a couple of resources + self._network_quota_details["floating_ips"].update( + limit=30, reserved=20, used=7 + ) + self._network_quota_details["security_group_rules"].update( + limit=9, reserved=7, used=5 + ) - def test_quota_show__with_default(self): arglist = [ - '--default', + '--network', + '--usage', self.projects[0].name, ] verifylist = [ - ('default', True), + ('service', 'network'), ('project', self.projects[0].name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + headers, result_gen = self.cmd.take_action(parsed_args) - self.compute_quotas_mock.defaults.assert_called_once_with( - self.projects[0].id, - ) - self.volume_quotas_mock.defaults.assert_called_once_with( - self.projects[0].id, + self.assertEqual(('Resource', 'Limit', 'In Use', 'Reserved'), headers) + + result = sorted(result_gen) + + self.assertEqual( + [ + ('floating-ips', 30, 7, 20), + ('health_monitors', 0, 0, 0), + ('l7_policies', 0, 0, 0), + ('listeners', 0, 0, 0), + ('load_balancers', 0, 0, 0), + ('networks', 0, 0, 0), + ('pools', 0, 0, 0), + ('ports', 0, 0, 0), + ('rbac_policies', 0, 0, 0), + ('routers', 0, 0, 0), + ('secgroup-rules', 9, 5, 7), + ('secgroups', 0, 0, 0), + ('subnet_pools', 0, 0, 0), + ('subnets', 0, 0, 0), + ], + result, ) - self.network.get_quota_default.assert_called_once_with( + + self.compute_client.get_quota_set.assert_not_called() + self.volume_sdk_client.get_quota_set.assert_not_called() + self.network_client.get_quota.assert_called_once_with( self.projects[0].id, + details=True, ) - self.assertNotCalled(self.network.get_quota) + self.assertNotCalled(self.network_client.get_quota_default) - def test_quota_show__with_class(self): + def test_quota_show__with_default(self): arglist = [ - '--class', - 'default', + '--default', + self.projects[0].name, ] verifylist = [ - ('quota_class', True), - ('project', 'default'), # project is actually a class here + ('default', True), + ('project', self.projects[0].name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.compute_quotas_class_mock.get.assert_called_once_with('default') - self.volume_quotas_class_mock.get.assert_called_once_with('default') - # neutron doesn't have the concept of quota classes - self.assertNotCalled(self.network.get_quota) - self.assertNotCalled(self.network.get_quota_default) + self.compute_client.get_quota_set_defaults.assert_called_once_with( + self.projects[0].id, + ) + self.volume_sdk_client.get_quota_set_defaults.assert_called_once_with( + self.projects[0].id, + ) + self.network_client.get_quota_default.assert_called_once_with( + self.projects[0].id, + ) + self.assertNotCalled(self.network_client.get_quota) def test_quota_show__with_usage(self): - # update mocks to return detailed quota instead - self.compute_quota = \ - compute_fakes.FakeQuota.create_one_comp_detailed_quota() - self.compute_quotas_mock.get.return_value = self.compute_quota - self.volume_quota = volume_fakes.create_one_detailed_quota() - self.volume_quotas_mock.get.return_value = self.volume_quota - self.network.get_quota.return_value = \ - network_fakes.FakeQuota.create_one_net_detailed_quota() - arglist = [ '--usage', self.projects[0].name, @@ -1232,15 +1218,15 @@ def test_quota_show__with_usage(self): self.cmd.take_action(parsed_args) - self.compute_quotas_mock.get.assert_called_once_with( + self.compute_client.get_quota_set.assert_called_once_with( self.projects[0].id, - detail=True, + usage=True, ) - self.volume_quotas_mock.get.assert_called_once_with( + self.volume_sdk_client.get_quota_set.assert_called_once_with( self.projects[0].id, usage=True, ) - self.network.get_quota.assert_called_once_with( + self.network_client.get_quota.assert_called_once_with( self.projects[0].id, details=True, ) @@ -1254,16 +1240,16 @@ def test_quota_show__no_project(self): self.cmd.take_action(parsed_args) - self.compute_quotas_mock.get.assert_called_once_with( - identity_fakes.project_id, detail=False + self.compute_client.get_quota_set.assert_called_once_with( + self.projects[1].id, usage=False ) - self.volume_quotas_mock.get.assert_called_once_with( - identity_fakes.project_id, usage=False + self.volume_sdk_client.get_quota_set.assert_called_once_with( + self.projects[1].id, usage=False ) - self.network.get_quota.assert_called_once_with( - identity_fakes.project_id, details=False + self.network_client.get_quota.assert_called_once_with( + self.projects[1].id, details=False ) - self.assertNotCalled(self.network.get_quota_default) + self.assertNotCalled(self.network_client.get_quota_default) class TestQuotaDelete(TestQuota): @@ -1272,7 +1258,11 @@ class TestQuotaDelete(TestQuota): def setUp(self): super().setUp() - self.network_mock.delete_quota = mock.Mock() + self.identity_sdk_client.find_project.return_value = self.projects[0] + + self.compute_client.revert_quota_set.return_value = None + self.volume_sdk_client.revert_quota_set.return_value = None + self.network_client.delete_quota.return_value = None self.cmd = quota.DeleteQuota(self.app, None) @@ -1291,14 +1281,16 @@ def test_delete(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.projects_mock.get.assert_called_once_with(self.projects[0].id) - self.compute_quotas_mock.delete.assert_called_once_with( + self.identity_sdk_client.find_project.assert_called_once_with( + self.projects[0].id, ignore_missing=False + ) + self.compute_client.revert_quota_set.assert_called_once_with( self.projects[0].id, ) - self.volume_quotas_mock.delete.assert_called_once_with( + self.volume_sdk_client.revert_quota_set.assert_called_once_with( self.projects[0].id, ) - self.network_mock.delete_quota.assert_called_once_with( + self.network_client.delete_quota.assert_called_once_with( self.projects[0].id, ) @@ -1318,12 +1310,14 @@ def test_delete__compute(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.projects_mock.get.assert_called_once_with(self.projects[0].id) - self.compute_quotas_mock.delete.assert_called_once_with( + self.identity_sdk_client.find_project.assert_called_once_with( + self.projects[0].id, ignore_missing=False + ) + self.compute_client.revert_quota_set.assert_called_once_with( self.projects[0].id, ) - self.volume_quotas_mock.delete.assert_not_called() - self.network_mock.delete_quota.assert_not_called() + self.volume_sdk_client.revert_quota_set.assert_not_called() + self.network_client.delete_quota.assert_not_called() def test_delete__volume(self): """Delete volume quotas only""" @@ -1341,12 +1335,14 @@ def test_delete__volume(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.projects_mock.get.assert_called_once_with(self.projects[0].id) - self.compute_quotas_mock.delete.assert_not_called() - self.volume_quotas_mock.delete.assert_called_once_with( + self.identity_sdk_client.find_project.assert_called_once_with( + self.projects[0].id, ignore_missing=False + ) + self.compute_client.revert_quota_set.assert_not_called() + self.volume_sdk_client.revert_quota_set.assert_called_once_with( self.projects[0].id, ) - self.network_mock.delete_quota.assert_not_called() + self.network_client.delete_quota.assert_not_called() def test_delete__network(self): """Delete network quotas only""" @@ -1364,9 +1360,11 @@ def test_delete__network(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.projects_mock.get.assert_called_once_with(self.projects[0].id) - self.compute_quotas_mock.delete.assert_not_called() - self.volume_quotas_mock.delete.assert_not_called() - self.network_mock.delete_quota.assert_called_once_with( + self.identity_sdk_client.find_project.assert_called_once_with( + self.projects[0].id, ignore_missing=False + ) + self.compute_client.revert_quota_set.assert_not_called() + self.volume_sdk_client.revert_quota_set.assert_not_called() + self.network_client.delete_quota.assert_called_once_with( self.projects[0].id, ) diff --git a/openstackclient/tests/unit/compute/v2/fakes.py b/openstackclient/tests/unit/compute/v2/fakes.py index f7f0750956..37f93b772b 100644 --- a/openstackclient/tests/unit/compute/v2/fakes.py +++ b/openstackclient/tests/unit/compute/v2/fakes.py @@ -13,1326 +13,575 @@ # under the License. # -import copy import random +import re from unittest import mock import uuid -from novaclient import api_versions +from keystoneauth1 import discover +from openstack.compute.v2 import _proxy +from openstack.compute.v2 import availability_zone as _availability_zone +from openstack.compute.v2 import extension as _extension from openstack.compute.v2 import flavor as _flavor -from openstack.compute.v2 import hypervisor as _hypervisor +from openstack.compute.v2 import limits as _limits from openstack.compute.v2 import migration as _migration from openstack.compute.v2 import server as _server -from openstack.compute.v2 import server_group as _server_group +from openstack.compute.v2 import server_action as _server_action from openstack.compute.v2 import server_interface as _server_interface from openstack.compute.v2 import server_migration as _server_migration -from openstack.compute.v2 import service -from openstack.compute.v2 import volume_attachment +from openstack.compute.v2 import volume_attachment as _volume_attachment -from openstackclient.api import compute_v2 -from openstackclient.tests.unit import fakes -from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes +from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit.image.v2 import fakes as image_fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes from openstackclient.tests.unit import utils -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes - -floating_ip_num = 100 -fix_ip_num = 100 -injected_file_num = 100 -injected_file_size_num = 10240 -injected_path_size_num = 255 -key_pair_num = 100 -core_num = 20 -ram_num = 51200 -instance_num = 10 -property_num = 128 -secgroup_rule_num = 20 -secgroup_num = 10 -servgroup_num = 10 -servgroup_members_num = 10 -project_name = 'project_test' -QUOTA = { - 'project': project_name, - 'floating-ips': floating_ip_num, - 'fix-ips': fix_ip_num, - 'injected-files': injected_file_num, - 'injected-file-size': injected_file_size_num, - 'injected-path-size': injected_path_size_num, - 'key-pairs': key_pair_num, - 'cores': core_num, - 'ram': ram_num, - 'instances': instance_num, - 'properties': property_num, - 'secgroup_rules': secgroup_rule_num, - 'secgroups': secgroup_num, - 'server-groups': servgroup_num, - 'server-group-members': servgroup_members_num -} - -QUOTA_columns = tuple(sorted(QUOTA)) -QUOTA_data = tuple(QUOTA[x] for x in sorted(QUOTA)) - - -class FakeAggregate(object): - """Fake one aggregate.""" - - @staticmethod - def create_one_aggregate(attrs=None): - """Create a fake aggregate. - - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id and other attributes - """ - attrs = attrs or {} - - # Set default attribute - aggregate_info = { - "name": "aggregate-name-" + uuid.uuid4().hex, - "availability_zone": "ag_zone", - "hosts": [], - "id": "aggregate-id-" + uuid.uuid4().hex, - "metadata": { - "availability_zone": "ag_zone", - "key1": "value1", - } - } - - # Overwrite default attributes. - aggregate_info.update(attrs) - - aggregate = fakes.FakeResource( - info=copy.deepcopy(aggregate_info), - loaded=True) - return aggregate - - @staticmethod - def create_aggregates(attrs=None, count=2): - """Create multiple fake aggregates. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of aggregates to fake - :return: - A list of FakeResource objects faking the aggregates - """ - aggregates = [] - for i in range(0, count): - aggregates.append(FakeAggregate.create_one_aggregate(attrs)) - - return aggregates - - @staticmethod - def get_aggregates(aggregates=None, count=2): - """Get an iterable MagicMock object with a list of faked aggregates. - - If aggregates list is provided, then initialize the Mock object - with the list. Otherwise create one. - - :param List aggregates: - A list of FakeResource objects faking aggregates - :param int count: - The number of aggregates to fake - :return: - An iterable Mock object with side_effect set to a list of faked - aggregates - """ - if aggregates is None: - aggregates = FakeAggregate.create_aggregates(count) - return mock.Mock(side_effect=aggregates) +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes -class FakeComputev2Client(object): +class FakeClientMixin: + def setUp(self): + super().setUp() - def __init__(self, **kwargs): - self.agents = mock.Mock() - self.agents.resource_class = fakes.FakeResource(None, {}) + self.app.client_manager.compute = mock.Mock(_proxy.Proxy) + self.compute_client = self.app.client_manager.compute + self.set_compute_api_version() # default to the lowest - self.aggregates = mock.Mock() - self.aggregates.resource_class = fakes.FakeResource(None, {}) + def set_compute_api_version(self, version: str = '2.1'): + """Set a fake server version. - self.availability_zones = mock.Mock() - self.availability_zones.resource_class = fakes.FakeResource(None, {}) + :param version: The fake microversion to "support". This should be a + string of format '2.xx'. + :returns: None + """ + assert re.match(r'2.\d+', version) - self.images = mock.Mock() - self.images.resource_class = fakes.FakeResource(None, {}) + self.compute_client.default_microversion = version + self.compute_client.get_endpoint_data.return_value = ( + discover.EndpointData( + min_microversion='2.1', # nova has not bumped this yet + max_microversion=version, + ) + ) - self.limits = mock.Mock() - self.limits.resource_class = fakes.FakeResource(None, {}) - self.servers = mock.Mock() - self.servers.resource_class = fakes.FakeResource(None, {}) +class TestComputev2( + identity_fakes.FakeClientMixin, + network_fakes.FakeClientMixin, + image_fakes.FakeClientMixin, + volume_fakes.FakeClientMixin, + FakeClientMixin, + utils.TestCommand, +): ... - self.services = mock.Mock() - self.services.resource_class = fakes.FakeResource(None, {}) - self.extensions = mock.Mock() - self.extensions.resource_class = fakes.FakeResource(None, {}) +def create_one_agent(attrs=None): + """Create a fake agent. - self.flavors = mock.Mock() + :param dict attrs: A dictionary with all attributes + :return: A dicionarty faking the agent + """ - self.flavor_access = mock.Mock() - self.flavor_access.resource_class = fakes.FakeResource(None, {}) + attrs = attrs or {} - self.quotas = mock.Mock() - self.quotas.resource_class = fakes.FakeResource(None, {}) + # set default attributes. + agent_attrs = { + 'agent_id': 'agent-id-' + uuid.uuid4().hex, + 'os': 'agent-os-' + uuid.uuid4().hex, + 'architecture': 'agent-architecture', + 'version': '8.0', + 'url': 'http://127.0.0.1', + 'md5hash': 'agent-md5hash', + 'hypervisor': 'hypervisor', + } - self.quota_classes = mock.Mock() - self.quota_classes.resource_class = fakes.FakeResource(None, {}) + assert not set(attrs) - set(agent_attrs), 'unknown keys' - self.usage = mock.Mock() - self.usage.resource_class = fakes.FakeResource(None, {}) + # Overwrite default attributes. + agent_attrs.update(attrs) - self.volumes = mock.Mock() - self.volumes.resource_class = fakes.FakeResource(None, {}) + return agent_attrs - self.hypervisors = mock.Mock() - self.hypervisors.resource_class = fakes.FakeResource(None, {}) - self.hypervisors_stats = mock.Mock() - self.hypervisors_stats.resource_class = fakes.FakeResource(None, {}) +def create_agents(attrs=None, count=2): + """Create multiple fake agents. - self.keypairs = mock.Mock() - self.keypairs.resource_class = fakes.FakeResource(None, {}) + :param dict attrs: A dictionary with all attributes + :param int count: The number of agents to fake + :return: A list of dictionaries faking the agents + """ + agents = [] + for i in range(0, count): + agents.append(create_one_agent(attrs)) - self.hosts = mock.Mock() - self.hosts.resource_class = fakes.FakeResource(None, {}) + return agents - self.server_groups = mock.Mock() - self.server_groups.resource_class = fakes.FakeResource(None, {}) - self.server_migrations = mock.Mock() - self.server_migrations.resource_class = fakes.FakeResource(None, {}) +def create_one_extension(attrs=None): + """Create a fake extension. - self.instance_action = mock.Mock() - self.instance_action.resource_class = fakes.FakeResource(None, {}) + :param dict attrs: A dictionary with all attributes + :return: A fake :class:`~openstack.compute.v2.extension.Extension` object + """ + attrs = attrs or {} - self.migrations = mock.Mock() - self.migrations.resource_class = fakes.FakeResource(None, {}) + # Set default attributes. + extension_info = { + 'alias': 'NMN', + 'description': 'description-' + uuid.uuid4().hex, + 'links': [ + { + "href": "https://github.com/openstack/compute-api", + "type": "text/html", + "rel": "describedby", + } + ], + 'name': 'name-' + uuid.uuid4().hex, + 'namespace': ( + 'http://docs.openstack.org/compute/ext/multinic/api/v1.1' + ), + 'updated_at': '2014-01-07T12:00:0-00:00', + } - self.auth_token = kwargs['token'] + # Overwrite default attributes. + extension_info.update(attrs) - self.management_url = kwargs['endpoint'] + extension = _extension.Extension(**extension_info) + return extension - self.api_version = api_versions.APIVersion('2.1') +def create_one_security_group(attrs=None): + """Create a fake security group. -class TestComputev2(utils.TestCommand): + :param dict attrs: A dictionary with all attributes + :return: A dictionary faking the security group + """ + attrs = attrs or {} - def setUp(self): - super(TestComputev2, self).setUp() + # Set default attributes. + security_group_attrs = { + 'id': 'security-group-id-' + uuid.uuid4().hex, + 'name': 'security-group-name-' + uuid.uuid4().hex, + 'description': 'security-group-description-' + uuid.uuid4().hex, + 'tenant_id': 'project-id-' + uuid.uuid4().hex, + 'rules': [], + } - self.app.client_manager.compute = FakeComputev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) + assert not set(attrs) - set(security_group_attrs), 'unknown keys' - self.app.client_manager.compute.api = compute_v2.APIv2( - session=self.app.client_manager.session, - endpoint=fakes.AUTH_URL, - ) + # Overwrite default attributes. + security_group_attrs.update(attrs) + return security_group_attrs - self.app.client_manager.identity = identity_fakes.FakeIdentityv2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.image = image_fakes.FakeImagev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) +def create_security_groups(attrs=None, count=2): + """Create multiple fake security groups. - self.app.client_manager.network = network_fakes.FakeNetworkV2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) + :param dict attrs: A dictionary with all attributes + :param int count: The number of security groups to fake + :return: A list of dictionaries faking the security groups + """ + security_groups = [] + for i in range(0, count): + security_groups.append(create_one_security_group(attrs)) - self.app.client_manager.volume = volume_fakes.FakeVolumeClient( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) + return security_groups -class FakeAgent(object): - """Fake one or more agent.""" +def create_one_security_group_rule(attrs=None): + """Create a fake security group rule. - @staticmethod - def create_one_agent(attrs=None): - """Create a fake agent. + :param dict attrs: A dictionary with all attributes + :return: A dictionary faking the security group rule + """ + attrs = attrs or {} - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with agent_id, os, and so on - """ + # Set default attributes. + security_group_rule_attrs = { + 'from_port': 0, + 'group': {}, + 'id': 'security-group-rule-id-' + uuid.uuid4().hex, + 'ip_protocol': 'tcp', + 'ip_range': {'cidr': '0.0.0.0/0'}, + 'parent_group_id': 'security-group-id-' + uuid.uuid4().hex, + 'to_port': 0, + } - attrs = attrs or {} - - # set default attributes. - agent_info = { - 'agent_id': 'agent-id-' + uuid.uuid4().hex, - 'os': 'agent-os-' + uuid.uuid4().hex, - 'architecture': 'agent-architecture', - 'version': '8.0', - 'url': 'http://127.0.0.1', - 'md5hash': 'agent-md5hash', - 'hypervisor': 'hypervisor', - } - - # Overwrite default attributes. - agent_info.update(attrs) - - agent = fakes.FakeResource(info=copy.deepcopy(agent_info), - loaded=True) - return agent - - @staticmethod - def create_agents(attrs=None, count=2): - """Create multiple fake agents. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of agents to fake - :return: - A list of FakeResource objects faking the agents - """ - agents = [] - for i in range(0, count): - agents.append(FakeAgent.create_one_agent(attrs)) + assert not set(attrs) - set(security_group_rule_attrs), 'unknown keys' - return agents + # Overwrite default attributes. + security_group_rule_attrs.update(attrs) + return security_group_rule_attrs -class FakeExtension(object): - """Fake one or more extension.""" - @staticmethod - def create_one_extension(attrs=None): - """Create a fake extension. +def create_security_group_rules(attrs=None, count=2): + """Create multiple fake security group rules. - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, namespace, etc. - """ - attrs = attrs or {} - - # Set default attributes. - extension_info = { - 'name': 'name-' + uuid.uuid4().hex, - 'namespace': ( - 'http://docs.openstack.org/compute/ext/multinic/api/v1.1'), - 'description': 'description-' + uuid.uuid4().hex, - 'updated': '2014-01-07T12:00:0-00:00', - 'alias': 'NMN', - 'links': ('[{"href":' - '"https://github.com/openstack/compute-api", "type":' - ' "text/html", "rel": "describedby"}]') - } - - # Overwrite default attributes. - extension_info.update(attrs) - - extension = fakes.FakeResource( - info=copy.deepcopy(extension_info), - loaded=True) - return extension - - -class FakeSecurityGroup(object): - """Fake one or more security groups.""" - - @staticmethod - def create_one_security_group(attrs=None): - """Create a fake security group. - - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, name, etc. - """ - attrs = attrs or {} - - # Set default attributes. - security_group_attrs = { - 'id': 'security-group-id-' + uuid.uuid4().hex, - 'name': 'security-group-name-' + uuid.uuid4().hex, - 'description': 'security-group-description-' + uuid.uuid4().hex, - 'tenant_id': 'project-id-' + uuid.uuid4().hex, - 'rules': [], - } - - # Overwrite default attributes. - security_group_attrs.update(attrs) - return security_group_attrs - - @staticmethod - def create_security_groups(attrs=None, count=2): - """Create multiple fake security groups. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of security groups to fake - :return: - A list of FakeResource objects faking the security groups - """ - security_groups = [] - for i in range(0, count): - security_groups.append( - FakeSecurityGroup.create_one_security_group(attrs)) - - return security_groups - - @staticmethod - def get_security_groups(security_groups=None, count=2): - """Get an iterable MagicMock with a list of faked security groups. - - If security groups list is provided, then initialize the Mock object - with the list. Otherwise create one. - - :param List security_groups: - A list of FakeResource objects faking security groups - :param int count: - The number of security groups to fake - :return: - An iterable Mock object with side_effect set to a list of faked - security groups - """ - if security_groups is None: - security_groups = FakeSecurityGroup.create_security_groups(count) - return mock.Mock(side_effect=security_groups) + :param dict attrs: A dictionary with all attributes + :param int count: The number of security group rules to fake + :return: A list of dictionaries faking the security group rules + """ + security_group_rules = [] + for i in range(0, count): + security_group_rules.append(create_one_security_group_rule(attrs)) + return security_group_rules -class FakeSecurityGroupRule(object): - """Fake one or more security group rules.""" - @staticmethod - def create_one_security_group_rule(attrs=None): - """Create a fake security group rule. +def create_one_server(attrs=None): + """Create a fake server - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, etc. - """ - attrs = attrs or {} - - # Set default attributes. - security_group_rule_attrs = { - 'from_port': 0, - 'group': {}, - 'id': 'security-group-rule-id-' + uuid.uuid4().hex, - 'ip_protocol': 'tcp', - 'ip_range': {'cidr': '0.0.0.0/0'}, - 'parent_group_id': 'security-group-id-' + uuid.uuid4().hex, - 'to_port': 0, - } - - # Overwrite default attributes. - security_group_rule_attrs.update(attrs) - - return security_group_rule_attrs - - @staticmethod - def create_security_group_rules(attrs=None, count=2): - """Create multiple fake security group rules. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of security group rules to fake - :return: - A list of FakeResource objects faking the security group rules - """ - security_group_rules = [] - for i in range(0, count): - security_group_rules.append( - FakeSecurityGroupRule.create_one_security_group_rule(attrs)) + :param dict attrs: A dictionary with all attributes + :return: A fake :class:`~openstack.compute.v2.server.Server` object, + """ + attrs = attrs or {} - return security_group_rules + # Set default attributes. + server_info = { + 'id': 'server-id-' + uuid.uuid4().hex, + 'name': 'server-name-' + uuid.uuid4().hex, + 'metadata': {}, + 'image': { + 'id': 'image-id-' + uuid.uuid4().hex, + }, + 'flavor': { + 'id': 'flavor-id-' + uuid.uuid4().hex, + }, + 'OS-EXT-STS:power_state': 1, + } + # Overwrite default attributes. + server_info.update(attrs) + server = _server.Server(**server_info) -class FakeServer(object): - """Fake one or more compute servers.""" + # Override methods + server.trigger_crash_dump = mock.MagicMock() - @staticmethod - def create_one_server(attrs=None, methods=None): - """Create a fake server. + return server - :param dict attrs: - A dictionary with all attributes - :param dict methods: - A dictionary with all methods - :return: - A FakeResource object, with id, name, metadata, and so on - """ - attrs = attrs or {} - methods = methods or {} - - # Set default attributes. - server_info = { - 'id': 'server-id-' + uuid.uuid4().hex, - 'name': 'server-name-' + uuid.uuid4().hex, - 'metadata': {}, - 'image': { - 'id': 'image-id-' + uuid.uuid4().hex, - }, - 'flavor': { - 'id': 'flavor-id-' + uuid.uuid4().hex, - }, - 'OS-EXT-STS:power_state': 1, - } - - # Overwrite default attributes. - server_info.update(attrs) - - server = fakes.FakeResource(info=copy.deepcopy(server_info), - methods=methods, - loaded=True) - return server - - @staticmethod - def create_servers(attrs=None, methods=None, count=2): - """Create multiple fake servers. - - :param dict attrs: - A dictionary with all attributes - :param dict methods: - A dictionary with all methods - :param int count: - The number of servers to fake - :return: - A list of FakeResource objects faking the servers - """ - servers = [] - for i in range(0, count): - servers.append(FakeServer.create_one_server(attrs, methods)) - - return servers - - @staticmethod - def create_one_sdk_server(attrs=None, methods=None): - """Create a fake server for testing migration to sdk - - :param dict attrs: - A dictionary with all attributes - :param dict methods: - A dictionary with all methods - :return: - A openstack.compute.v2.server.Server object, - with id, name, metadata, and so on - """ - attrs = attrs or {} - methods = methods or {} - - # Set default attributes. - server_info = { - 'id': 'server-id-' + uuid.uuid4().hex, - 'name': 'server-name-' + uuid.uuid4().hex, - 'metadata': {}, - 'image': { - 'id': 'image-id-' + uuid.uuid4().hex, - }, - 'flavor': { - 'id': 'flavor-id-' + uuid.uuid4().hex, - }, - 'OS-EXT-STS:power_state': 1, - } - - # Overwrite default attributes. - server_info.update(attrs) - server = _server.Server(**server_info) - - # Override methods - server.trigger_crash_dump = mock.MagicMock() - - return server - - @staticmethod - def create_sdk_servers(attrs=None, methods=None, count=2): - """Create multiple fake servers for testing migration to sdk - - :param dict attrs: - A dictionary with all attributes - :param dict methods: - A dictionary with all methods - :param int count: - The number of servers to fake - :return: - A list of openstack.compute.v2.server.Server objects - faking the servers - """ - servers = [] - for i in range(0, count): - servers.append(FakeServer.create_one_sdk_server(attrs, methods)) - - return servers - - @staticmethod - def get_servers(servers=None, count=2): - """Get an iterable MagicMock object with a list of faked servers. - - If servers list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List servers: - A list of FakeResource objects faking servers - :param int count: - The number of servers to fake - :return: - An iterable Mock object with side_effect set to a list of faked - servers - """ - if servers is None: - servers = FakeServer.create_servers(count) - return mock.Mock(side_effect=servers) +def create_servers(attrs=None, count=2): + """Create multiple fake servers -class FakeServerEvent(object): - """Fake one or more server event.""" + :param dict attrs: A dictionary with all attributes + :param int count: The number of servers to fake + :return: A list of fake :class:`openstack.compute.v2.server.Server` objects + """ + servers = [] + for i in range(0, count): + servers.append(create_one_server(attrs)) - @staticmethod - def create_one_server_event(attrs=None): - """Create a fake server event. + return servers - :param attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id and other attributes - """ - attrs = attrs or {} - - # Set default attributes - server_event_info = { - "instance_uuid": "server-event-" + uuid.uuid4().hex, - "user_id": "user-id-" + uuid.uuid4().hex, - "start_time": "2017-02-27T07:47:13.000000", - "request_id": "req-" + uuid.uuid4().hex, - "action": "create", - "message": None, - "project_id": "project-id-" + uuid.uuid4().hex, - "events": [{ + +def create_one_server_action(attrs=None): + """Create a fake server action. + + :param attrs: A dictionary with all attributes + :return: A fake :class:`~openstack.compute.v2.server_action.ServerAction` + object + """ + attrs = attrs or {} + + # Set default attributes + server_action_info = { + "server_id": "server-event-" + uuid.uuid4().hex, + "user_id": "user-id-" + uuid.uuid4().hex, + "start_time": "2017-02-27T07:47:13.000000", + "request_id": "req-" + uuid.uuid4().hex, + "action": "create", + "message": None, + "project_id": "project-id-" + uuid.uuid4().hex, + "events": [ + { "finish_time": "2017-02-27T07:47:25.000000", "start_time": "2017-02-27T07:47:15.000000", "traceback": None, "event": "compute__do_build_and_run_instance", - "result": "Success" - }] - } - # Overwrite default attributes - server_event_info.update(attrs) - - server_event = fakes.FakeResource( - info=copy.deepcopy(server_event_info), - loaded=True, - ) - return server_event + "result": "Success", + } + ], + } + # Overwrite default attributes + server_action_info.update(attrs) + # We handle events separately since they're nested resources + events = [ + _server_action.ServerActionEvent(**event) + for event in server_action_info.pop('events') + ] -class FakeService(object): - """Fake one or more services.""" + server_action = _server_action.ServerAction( + **server_action_info, + events=events, + ) + return server_action - @staticmethod - def create_one_service(attrs=None): - """Create a fake service. - :param dict attrs: - A dictionary with all attributes - :return: - A fake Service object, with id, host, binary, and so on - """ - attrs = attrs or {} - - # Set default attributes. - service_info = { - 'id': 'id-' + uuid.uuid4().hex, - 'host': 'host-' + uuid.uuid4().hex, - 'binary': 'binary-' + uuid.uuid4().hex, - 'status': 'enabled', - 'availability_zone': 'zone-' + uuid.uuid4().hex, - 'state': 'state-' + uuid.uuid4().hex, - 'updated_at': 'time-' + uuid.uuid4().hex, - 'disabled_reason': 'earthquake', - # Introduced in API microversion 2.11 - 'is_forced_down': False, - } - - # Overwrite default attributes. - service_info.update(attrs) - - return service.Service(**service_info) - - @staticmethod - def create_services(attrs=None, count=2): - """Create multiple fake services. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of services to fake - :return: - A list of FakeResource objects faking the services - """ - services = [] - for i in range(0, count): - services.append(FakeService.create_one_service(attrs)) +def create_one_flavor(attrs=None): + """Create a fake flavor. + + :param dict attrs: A dictionary with all attributes + :return: A fake :class:`~openstack.compute.v2.flavor.Flavor` object + """ + attrs = attrs or {} + + # Set default attributes. + flavor_info = { + 'id': 'flavor-id-' + uuid.uuid4().hex, + 'name': 'flavor-name-' + uuid.uuid4().hex, + 'ram': 8192, + 'vcpus': 4, + 'disk': 128, + 'swap': 0, + 'rxtx_factor': 1.0, + 'OS-FLV-DISABLED:disabled': False, + 'os-flavor-access:is_public': True, + 'description': 'description', + 'OS-FLV-EXT-DATA:ephemeral': 0, + 'extra_specs': {'property': 'value'}, + } - return services + # Overwrite default attributes. + flavor_info.update(attrs) + flavor = _flavor.Flavor(**flavor_info) -class FakeFlavor(object): - """Fake one or more flavors.""" + return flavor - @staticmethod - def create_one_flavor(attrs=None): - """Create a fake flavor. - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, name, ram, vcpus, and so on - """ - attrs = attrs or {} +def create_flavors(attrs=None, count=2): + """Create multiple fake flavors. - # Set default attributes. - flavor_info = { - 'id': 'flavor-id-' + uuid.uuid4().hex, - 'name': 'flavor-name-' + uuid.uuid4().hex, - 'ram': 8192, - 'vcpus': 4, - 'disk': 128, - 'swap': 0, - 'rxtx_factor': 1.0, - 'OS-FLV-DISABLED:disabled': False, - 'os-flavor-access:is_public': True, - 'description': 'description', - 'OS-FLV-EXT-DATA:ephemeral': 0, - 'extra_specs': {'property': 'value'}, - } - - # Overwrite default attributes. - flavor_info.update(attrs) - - flavor = _flavor.Flavor(**flavor_info) - - return flavor - - @staticmethod - def create_flavors(attrs=None, count=2): - """Create multiple fake flavors. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of flavors to fake - :return: - A list of FakeResource objects faking the flavors - """ - flavors = [] - for i in range(0, count): - flavors.append(FakeFlavor.create_one_flavor(attrs)) - - return flavors - - @staticmethod - def get_flavors(flavors=None, count=2): - """Get an iterable MagicMock object with a list of faked flavors. - - If flavors list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List flavors: - A list of FakeResource objects faking flavors - :param int count: - The number of flavors to fake - :return: - An iterable Mock object with side_effect set to a list of faked - flavors - """ - if flavors is None: - flavors = FakeFlavor.create_flavors(count) - return mock.Mock(side_effect=flavors) + :param dict attrs: A dictionary with all attributes + :param int count: The number of flavors to fake + :return: A list of fake :class:`openstack.compute.v2.flavor.Flavor` objects + """ + flavors = [] + for i in range(0, count): + flavors.append(create_one_flavor(attrs)) + return flavors -class FakeFlavorAccess(object): - """Fake one or more flavor accesses.""" - @staticmethod - def create_one_flavor_access(attrs=None): - """Create a fake flavor access. +def create_one_flavor_access(attrs=None): + """Create a fake flavor access. - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with flavor_id, tenat_id - """ - attrs = attrs or {} + :param dict attrs: A dictionary with all attributes + :return: A dictionary faking the flavor access + """ + attrs = attrs or {} - # Set default attributes. - flavor_access_info = { - 'flavor_id': 'flavor-id-' + uuid.uuid4().hex, - 'tenant_id': 'tenant-id-' + uuid.uuid4().hex, - } + # Set default attributes. + flavor_access_info = { + 'flavor_id': 'flavor-id-' + uuid.uuid4().hex, + 'tenant_id': 'tenant-id-' + uuid.uuid4().hex, + } - # Overwrite default attributes. - flavor_access_info.update(attrs) + assert not set(attrs) - set(flavor_access_info), 'unknown keys' - flavor_access = fakes.FakeResource( - info=copy.deepcopy(flavor_access_info), loaded=True) + # Overwrite default attributes. + flavor_access_info.update(attrs) - return flavor_access + return flavor_access_info -class FakeKeypair(object): - """Fake one or more keypairs.""" +def create_one_availability_zone(attrs=None): + """Create a fake AZ. - @staticmethod - def create_one_keypair(attrs=None, no_pri=False): - """Create a fake keypair + :param dict attrs: A dictionary with all attributes + :return: A fake + :class:`~openstack.compute.v2.availability_zone.AvailabilityZone` object + """ + attrs = attrs or {} - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, name, fingerprint, and so on - """ - attrs = attrs or {} - - # Set default attributes. - keypair_info = { - 'name': 'keypair-name-' + uuid.uuid4().hex, - 'type': 'ssh', - 'fingerprint': 'dummy', - 'public_key': 'dummy', - 'user_id': 'user' - } - if not no_pri: - keypair_info['private_key'] = 'private_key' - - # Overwrite default attributes. - keypair_info.update(attrs) - - keypair = fakes.FakeResource(info=copy.deepcopy(keypair_info), - loaded=True) - - return keypair - - @staticmethod - def create_keypairs(attrs=None, count=2): - """Create multiple fake keypairs. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of keypairs to fake - :return: - A list of FakeResource objects faking the keypairs - """ + # Set default attributes. + host_name = uuid.uuid4().hex + service_name = uuid.uuid4().hex + availability_zone_info = { + 'name': uuid.uuid4().hex, + 'state': {'available': True}, + 'hosts': { + host_name: { + service_name: { + 'available': True, + 'active': True, + 'updated_at': '2023-01-01T00:00:00.000000', + } + } + }, + } - keypairs = [] - for i in range(0, count): - keypairs.append(FakeKeypair.create_one_keypair(attrs)) + # Overwrite default attributes. + availability_zone_info.update(attrs) - return keypairs + availability_zone = _availability_zone.AvailabilityZone( + **availability_zone_info + ) + return availability_zone - @staticmethod - def get_keypairs(keypairs=None, count=2): - """Get an iterable MagicMock object with a list of faked keypairs. - If keypairs list is provided, then initialize the Mock object with the - list. Otherwise create one. +def create_availability_zones(attrs=None, count=2): + """Create multiple fake AZs. - :param List keypairs: - A list of FakeResource objects faking keypairs - :param int count: - The number of keypairs to fake - :return: - An iterable Mock object with side_effect set to a list of faked - keypairs - """ - if keypairs is None: - keypairs = FakeKeypair.create_keypairs(count) - return mock.Mock(side_effect=keypairs) + :param dict attrs: A dictionary with all attributes + :param int count: The number of availability zones to fake + :return: A list of fake + openstack.compute.v2.availability_zone.AvailabilityZone objects + """ + availability_zones = [] + for i in range(0, count): + availability_zone = create_one_availability_zone(attrs) + availability_zones.append(availability_zone) + return availability_zones -class FakeAvailabilityZone(object): - """Fake one or more compute availability zones (AZs).""" - @staticmethod - def create_one_availability_zone(attrs=None): - """Create a fake AZ. +def create_one_floating_ip(attrs=None): + """Create a fake floating IP. - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object with zoneName, zoneState, etc. - """ - attrs = attrs or {} - - # Set default attributes. - host_name = uuid.uuid4().hex - service_name = uuid.uuid4().hex - service_updated_at = uuid.uuid4().hex - availability_zone = { - 'zoneName': uuid.uuid4().hex, - 'zoneState': {'available': True}, - 'hosts': {host_name: {service_name: { - 'available': True, - 'active': True, - 'updated_at': service_updated_at, - }}}, - } - - # Overwrite default attributes. - availability_zone.update(attrs) - - availability_zone = fakes.FakeResource( - info=copy.deepcopy(availability_zone), - loaded=True) - return availability_zone - - @staticmethod - def create_availability_zones(attrs=None, count=2): - """Create multiple fake AZs. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of AZs to fake - :return: - A list of FakeResource objects faking the AZs - """ - availability_zones = [] - for i in range(0, count): - availability_zone = \ - FakeAvailabilityZone.create_one_availability_zone(attrs) - availability_zones.append(availability_zone) + :param dict attrs: A dictionary with all attributes + :return: A dictionary faking the floating IP + """ + attrs = attrs or {} - return availability_zones + # Set default attributes. + floating_ip_attrs = { + 'id': 'floating-ip-id-' + uuid.uuid4().hex, + 'ip': '1.0.9.0', + 'fixed_ip': '2.0.9.0', + 'instance_id': 'server-id-' + uuid.uuid4().hex, + 'pool': 'public', + } + assert not set(attrs) - set(floating_ip_attrs), 'unknown keys' -class FakeFloatingIP(object): - """Fake one or more floating ip.""" + # Overwrite default attributes. + floating_ip_attrs.update(attrs) - @staticmethod - def create_one_floating_ip(attrs=None): - """Create a fake floating ip. + return floating_ip_attrs - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, ip, and so on - """ - attrs = attrs or {} - - # Set default attributes. - floating_ip_attrs = { - 'id': 'floating-ip-id-' + uuid.uuid4().hex, - 'ip': '1.0.9.0', - 'fixed_ip': '2.0.9.0', - 'instance_id': 'server-id-' + uuid.uuid4().hex, - 'pool': 'public', - } - - # Overwrite default attributes. - floating_ip_attrs.update(attrs) - - return floating_ip_attrs - - @staticmethod - def create_floating_ips(attrs=None, count=2): - """Create multiple fake floating ips. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of floating ips to fake - :return: - A list of FakeResource objects faking the floating ips - """ - floating_ips = [] - for i in range(0, count): - floating_ips.append(FakeFloatingIP.create_one_floating_ip(attrs)) - return floating_ips - - @staticmethod - def get_floating_ips(floating_ips=None, count=2): - """Get an iterable MagicMock object with a list of faked floating ips. - - If floating_ips list is provided, then initialize the Mock object - with the list. Otherwise create one. - - :param List floating_ips: - A list of FakeResource objects faking floating ips - :param int count: - The number of floating ips to fake - :return: - An iterable Mock object with side_effect set to a list of faked - floating ips - """ - if floating_ips is None: - floating_ips = FakeFloatingIP.create_floating_ips(count) - return mock.Mock(side_effect=floating_ips) +def create_floating_ips(attrs=None, count=2): + """Create multiple fake floating IPs. -class FakeFloatingIPPool(object): - """Fake one or more floating ip pools.""" + :param dict attrs: A dictionary with all attributes + :param int count: The number of floating IPs to fake + :return: A list of dictionaries faking the floating IPs + """ + floating_ips = [] + for i in range(0, count): + floating_ips.append(create_one_floating_ip(attrs)) + return floating_ips - @staticmethod - def create_one_floating_ip_pool(attrs=None): - """Create a fake floating ip pool. - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with name, etc - """ - if attrs is None: - attrs = {} +def create_one_floating_ip_pool(attrs=None): + """Create a fake floating IP pool. + + :param dict attrs: A dictionary with all attributes + :return: A dictionary faking the floating IP pool + """ + if attrs is None: + attrs = {} + + # Set default attributes. + floating_ip_pool_attrs = { + 'name': 'floating-ip-pool-name-' + uuid.uuid4().hex, + } - # Set default attributes. - floating_ip_pool_attrs = { - 'name': 'floating-ip-pool-name-' + uuid.uuid4().hex, - } + assert not set(attrs) - set(floating_ip_pool_attrs), 'unknown keys' - # Overwrite default attributes. - floating_ip_pool_attrs.update(attrs) + # Overwrite default attributes. + floating_ip_pool_attrs.update(attrs) - return floating_ip_pool_attrs + return floating_ip_pool_attrs - @staticmethod - def create_floating_ip_pools(attrs=None, count=2): - """Create multiple fake floating ip pools. - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of floating ip pools to fake - :return: - A list of FakeResource objects faking the floating ip pools - """ - floating_ip_pools = [] - for i in range(0, count): - floating_ip_pools.append( - FakeFloatingIPPool.create_one_floating_ip_pool(attrs) - ) - return floating_ip_pools +def create_floating_ip_pools(attrs=None, count=2): + """Create multiple fake floating IP pools. + :param dict attrs: A dictionary with all attributes + :param int count: The number of floating IP pools to fake + :return: A list of dictionaries faking the floating IP pools + """ + floating_ip_pools = [] + for i in range(0, count): + floating_ip_pools.append(create_one_floating_ip_pool(attrs)) + return floating_ip_pools -class FakeNetwork(object): - """Fake one or more networks.""" - @staticmethod - def create_one_network(attrs=None): - """Create a fake network. +def create_one_network(attrs=None): + """Create a fake network. - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, label, cidr and so on - """ - attrs = attrs or {} - - # Set default attributes. - network_attrs = { - 'bridge': 'br100', - 'bridge_interface': None, - 'broadcast': '10.0.0.255', - 'cidr': '10.0.0.0/24', - 'cidr_v6': None, - 'created_at': '2016-02-11T11:17:37.000000', - 'deleted': False, - 'deleted_at': None, - 'dhcp_server': '10.0.0.1', - 'dhcp_start': '10.0.0.2', - 'dns1': '8.8.4.4', - 'dns2': None, - 'enable_dhcp': True, - 'gateway': '10.0.0.1', - 'gateway_v6': None, - 'host': None, - 'id': 'network-id-' + uuid.uuid4().hex, - 'injected': False, - 'label': 'network-label-' + uuid.uuid4().hex, - 'mtu': None, - 'multi_host': False, - 'netmask': '255.255.255.0', - 'netmask_v6': None, - 'priority': None, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'rxtx_base': None, - 'share_address': False, - 'updated_at': None, - 'vlan': None, - 'vpn_private_address': None, - 'vpn_public_address': None, - 'vpn_public_port': None, - } - - # Overwrite default attributes. - network_attrs.update(attrs) - - return network_attrs - - @staticmethod - def create_networks(attrs=None, count=2): - """Create multiple fake networks. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of networks to fake - :return: - A list of FakeResource objects faking the networks - """ - networks = [] - for i in range(0, count): - networks.append(FakeNetwork.create_one_network(attrs)) - - return networks - - @staticmethod - def get_networks(networks=None, count=2): - """Get an iterable MagicMock object with a list of faked networks. - - If networks list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List networks: - A list of FakeResource objects faking networks - :param int count: - The number of networks to fake - :return: - An iterable Mock object with side_effect set to a list of faked - networks - """ - if networks is None: - networks = FakeNetwork.create_networks(count=count) - return mock.Mock(side_effect=networks) + :param dict attrs: A dictionary with all attributes + :return: A dictionary faking the network + """ + attrs = attrs or {} + # Set default attributes. + network_attrs = { + 'bridge': 'br100', + 'bridge_interface': None, + 'broadcast': '10.0.0.255', + 'cidr': '10.0.0.0/24', + 'cidr_v6': None, + 'created_at': '2016-02-11T11:17:37.000000', + 'deleted': False, + 'deleted_at': None, + 'dhcp_server': '10.0.0.1', + 'dhcp_start': '10.0.0.2', + 'dns1': '8.8.4.4', + 'dns2': None, + 'enable_dhcp': True, + 'gateway': '10.0.0.1', + 'gateway_v6': None, + 'host': None, + 'id': 'network-id-' + uuid.uuid4().hex, + 'injected': False, + 'label': 'network-label-' + uuid.uuid4().hex, + 'mtu': None, + 'multi_host': False, + 'netmask': '255.255.255.0', + 'netmask_v6': None, + 'priority': None, + 'project_id': 'project-id-' + uuid.uuid4().hex, + 'rxtx_base': None, + 'share_address': False, + 'updated_at': None, + 'vlan': None, + 'vpn_private_address': None, + 'vpn_public_address': None, + 'vpn_public_port': None, + } -class FakeHost(object): - """Fake one host.""" + assert not set(attrs) - set(network_attrs), 'unknown keys' - @staticmethod - def create_one_host(attrs=None): - """Create a fake host. + # Overwrite default attributes. + network_attrs.update(attrs) - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with uuid and other attributes - """ - attrs = attrs or {} - - # Set default attributes. - host_info = { - "service_id": 1, - "host": "host1", - "uuid": 'host-id-' + uuid.uuid4().hex, - "vcpus": 10, - "memory_mb": 100, - "local_gb": 100, - "vcpus_used": 5, - "memory_mb_used": 50, - "local_gb_used": 10, - "hypervisor_type": "xen", - "hypervisor_version": 1, - "hypervisor_hostname": "devstack1", - "free_ram_mb": 50, - "free_disk_gb": 50, - "current_workload": 10, - "running_vms": 1, - "cpu_info": "", - "disk_available_least": 1, - "host_ip": "10.10.10.10", - "supported_instances": "", - "metrics": "", - "pci_stats": "", - "extra_resources": "", - "stats": "", - "numa_topology": "", - "ram_allocation_ratio": 1.0, - "cpu_allocation_ratio": 1.0, - "zone": 'zone-' + uuid.uuid4().hex, - "host_name": 'name-' + uuid.uuid4().hex, - "service": 'service-' + uuid.uuid4().hex, - "cpu": 4, - "disk_gb": 100, - 'project': 'project-' + uuid.uuid4().hex, - } - host_info.update(attrs) - return host_info - - -class FakeUsage(object): - """Fake one or more usage.""" - - @staticmethod - def create_one_usage(attrs=None): - """Create a fake usage. - - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with tenant_id and other attributes - """ - if attrs is None: - attrs = {} - - # Set default attributes. - usage_info = { - 'tenant_id': 'usage-tenant-id-' + uuid.uuid4().hex, - 'total_memory_mb_usage': 512.0, - 'total_vcpus_usage': 1.0, - 'total_local_gb_usage': 1.0, - 'server_usages': [ - { - 'ended_at': None, - 'flavor': 'usage-flavor-' + uuid.uuid4().hex, - 'hours': 1.0, - 'local_gb': 1, - 'memory_mb': 512, - 'name': 'usage-name-' + uuid.uuid4().hex, - 'instance_id': uuid.uuid4().hex, - 'state': 'active', - 'uptime': 3600, - 'vcpus': 1 - } - ] - } + return network_attrs - # Overwrite default attributes. - usage_info.update(attrs) - usage = fakes.FakeResource(info=copy.deepcopy(usage_info), - loaded=True) +def create_networks(attrs=None, count=2): + """Create multiple fake networks. - return usage + :param dict attrs: A dictionary with all attributes + :param int count: The number of networks to fake + :return: A list of dictionaries faking the networks + """ + networks = [] + for i in range(0, count): + networks.append(create_one_network(attrs)) - @staticmethod - def create_usages(attrs=None, count=2): - """Create multiple fake services. + return networks - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of services to fake - :return: - A list of FakeResource objects faking the services - """ - usages = [] - for i in range(0, count): - usages.append(FakeUsage.create_one_usage(attrs)) - - return usages - - -class FakeQuota(object): - """Fake quota""" - - @staticmethod - def create_one_comp_quota(attrs=None): - """Create one quota""" - - attrs = attrs or {} - - quota_attrs = { - 'id': 'project-id-' + uuid.uuid4().hex, - 'cores': 20, - 'fixed_ips': 30, - 'injected_files': 100, - 'injected_file_content_bytes': 10240, - 'injected_file_path_bytes': 255, - 'instances': 50, - 'key_pairs': 20, - 'metadata_items': 10, - 'ram': 51200, - 'server_groups': 10, - 'server_group_members': 10 - } - - quota_attrs.update(attrs) - quota = fakes.FakeResource( - info=copy.deepcopy(quota_attrs), - loaded=True) - - quota.project_id = quota_attrs['id'] - - return quota - - @staticmethod - def create_one_default_comp_quota(attrs=None): - """Create one quota""" - - attrs = attrs or {} - - quota_attrs = { - 'id': 'project-id-' + uuid.uuid4().hex, - 'cores': 10, - 'fixed_ips': 10, - 'injected_files': 100, - 'injected_file_content_bytes': 10240, - 'injected_file_path_bytes': 255, - 'instances': 20, - 'key_pairs': 20, - 'metadata_items': 10, - 'ram': 51200, - 'server_groups': 10, - 'server_group_members': 10 - } - - quota_attrs.update(attrs) - quota = fakes.FakeResource( - info=copy.deepcopy(quota_attrs), - loaded=True) - - quota.project_id = quota_attrs['id'] - - return quota - - @staticmethod - def create_one_comp_detailed_quota(attrs=None): - """Create one quota""" - - attrs = attrs or {} - - quota_attrs = { - 'id': 'project-id-' + uuid.uuid4().hex, - 'cores': {'reserved': 0, 'in_use': 0, 'limit': 20}, - 'fixed_ips': {'reserved': 0, 'in_use': 0, 'limit': 30}, - 'injected_files': {'reserved': 0, 'in_use': 0, 'limit': 100}, - 'injected_file_content_bytes': { - 'reserved': 0, 'in_use': 0, 'limit': 10240}, - 'injected_file_path_bytes': { - 'reserved': 0, 'in_use': 0, 'limit': 255}, - 'instances': {'reserved': 0, 'in_use': 0, 'limit': 50}, - 'key_pairs': {'reserved': 0, 'in_use': 0, 'limit': 20}, - 'metadata_items': {'reserved': 0, 'in_use': 0, 'limit': 10}, - 'ram': {'reserved': 0, 'in_use': 0, 'limit': 51200}, - 'server_groups': {'reserved': 0, 'in_use': 0, 'limit': 10}, - 'server_group_members': {'reserved': 0, 'in_use': 0, 'limit': 10} - } - - quota_attrs.update(attrs) - quota = fakes.FakeResource( - info=copy.deepcopy(quota_attrs), - loaded=True) - - quota.project_id = quota_attrs['id'] - - return quota - - -class FakeLimits(object): - """Fake limits""" - - def __init__(self, absolute_attrs=None, rate_attrs=None): - self.absolute_limits_attrs = { + +def create_limits(attrs=None): + """Create a fake limits object.""" + attrs = attrs or {} + + limits_attrs = { + 'absolute': { 'maxServerMeta': 128, 'maxTotalInstances': 10, 'maxPersonality': 5, @@ -1352,94 +601,47 @@ def __init__(self, absolute_attrs=None, rate_attrs=None): 'maxTotalFloatingIps': 10, 'totalSecurityGroupsUsed': 0, 'maxTotalCores': 20, - } - absolute_attrs = absolute_attrs or {} - self.absolute_limits_attrs.update(absolute_attrs) - - self.rate_limits_attrs = [{ - "uri": "*", - "limit": [ - { - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z" - }, - { - "value": 10, - "verb": "PUT", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z" - }, - { - "value": 100, - "verb": "DELETE", - "remaining": 100, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z" - } - ] - }] - - @property - def absolute(self): - for (name, value) in self.absolute_limits_attrs.items(): - yield FakeAbsoluteLimit(name, value) - - def absolute_limits(self): - reference_data = [] - for (name, value) in self.absolute_limits_attrs.items(): - reference_data.append((name, value)) - return reference_data - - @property - def rate(self): - for group in self.rate_limits_attrs: - uri = group['uri'] - for rate in group['limit']: - yield FakeRateLimit(rate['verb'], uri, rate['value'], - rate['remaining'], rate['unit'], - rate['next-available']) - - def rate_limits(self): - reference_data = [] - for group in self.rate_limits_attrs: - uri = group['uri'] - for rate in group['limit']: - reference_data.append((rate['verb'], uri, rate['value'], - rate['remaining'], rate['unit'], - rate['next-available'])) - return reference_data - - -class FakeAbsoluteLimit(object): - """Data model that represents an absolute limit""" - - def __init__(self, name, value): - self.name = name - self.value = value - - -class FakeRateLimit(object): - """Data model that represents a flattened view of a single rate limit""" - - def __init__(self, verb, uri, value, remain, - unit, next_available): - self.verb = verb - self.uri = uri - self.value = value - self.remain = remain - self.unit = unit - self.next_available = next_available + }, + 'rate': [ + { + "uri": "*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z", + }, + { + "value": 10, + "verb": "PUT", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z", + }, + { + "value": 100, + "verb": "DELETE", + "remaining": 100, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z", + }, + ], + } + ], + } + limits_attrs.update(attrs) + + limits = _limits.Limits(**limits_attrs) + return limits def create_one_migration(attrs=None): """Create a fake migration. :param dict attrs: A dictionary with all attributes - :return: A fake openstack.compute.v2.migration.Migration object + :return: A fake :class:`~openstack.compute.v2.migration.Migration` object """ attrs = attrs or {} @@ -1475,7 +677,7 @@ def create_migrations(attrs=None, count=2): :param dict attrs: A dictionary with all attributes :param int count: The number of migrations to fake - :return: A list of fake openstack.compute.v2.migration.Migration objects + :return: A list of fake :class:`openstack.compute.v2.migration.Migration` objects """ migrations = [] for i in range(0, count): @@ -1488,7 +690,8 @@ def create_one_server_migration(attrs=None): """Create a fake server migration. :param dict attrs: A dictionary with all attributes - :return A fake openstack.compute.v2.server_migration.ServerMigration object + :return: A fake + :class:`~openstack.compute.v2.server_migration.ServerMigration` object """ attrs = attrs or {} @@ -1535,8 +738,7 @@ def create_server_migrations(attrs=None, methods=None, count=2): """ migrations = [] for i in range(0, count): - migrations.append( - create_one_server_migration(attrs, methods)) + migrations.append(create_one_server_migration(attrs, methods)) return migrations @@ -1545,8 +747,8 @@ def create_one_volume_attachment(attrs=None): """Create a fake volume attachment. :param dict attrs: A dictionary with all attributes - :return: A fake openstack.compute.v2.volume_attachment.VolumeAttachment - object + :return: A fake + :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` object """ attrs = attrs or {} @@ -1568,7 +770,7 @@ def create_one_volume_attachment(attrs=None): # Overwrite default attributes. volume_attachment_info.update(attrs) - return volume_attachment.VolumeAttachment(**volume_attachment_info) + return _volume_attachment.VolumeAttachment(**volume_attachment_info) def create_volume_attachments(attrs=None, count=2): @@ -1586,104 +788,13 @@ def create_volume_attachments(attrs=None, count=2): return volume_attachments -def create_one_hypervisor(attrs=None): - """Create a fake hypervisor. - - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, hypervisor_hostname, and so on - """ - attrs = attrs or {} - - # Set default attributes. - hypervisor_info = { - 'id': 'hypervisor-id-' + uuid.uuid4().hex, - 'hypervisor_hostname': 'hypervisor-hostname-' + uuid.uuid4().hex, - 'status': 'enabled', - 'host_ip': '192.168.0.10', - 'cpu_info': { - 'aaa': 'aaa', - }, - 'free_disk_gb': 50, - 'hypervisor_version': 2004001, - 'disk_available_least': 50, - 'local_gb': 50, - 'free_ram_mb': 1024, - 'service': { - 'host': 'aaa', - 'disabled_reason': None, - 'id': 1, - }, - 'vcpus_used': 0, - 'hypervisor_type': 'QEMU', - 'local_gb_used': 0, - 'vcpus': 4, - 'memory_mb_used': 512, - 'memory_mb': 1024, - 'current_workload': 0, - 'state': 'up', - 'running_vms': 0, - } - - # Overwrite default attributes. - hypervisor_info.update(attrs) - - hypervisor = _hypervisor.Hypervisor(**hypervisor_info, loaded=True) - return hypervisor - - -def create_hypervisors(attrs=None, count=2): - """Create multiple fake hypervisors. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of hypervisors to fake - :return: - A list of FakeResource objects faking the hypervisors - """ - hypervisors = [] - for i in range(0, count): - hypervisors.append(create_one_hypervisor(attrs)) - - return hypervisors - - -def create_one_server_group(attrs=None): - """Create a fake server group - - :param dict attrs: - A dictionary with all attributes - :return: - A fake ServerGroup object, with id and other attributes - """ - if attrs is None: - attrs = {} - - # Set default attributes. - server_group_info = { - 'id': 'server-group-id-' + uuid.uuid4().hex, - 'member_ids': '', - 'metadata': {}, - 'name': 'server-group-name-' + uuid.uuid4().hex, - 'project_id': 'server-group-project-id-' + uuid.uuid4().hex, - 'user_id': 'server-group-user-id-' + uuid.uuid4().hex, - } - - # Overwrite default attributes. - server_group_info.update(attrs) - - server_group = _server_group.ServerGroup(**server_group_info) - return server_group - - def create_one_server_interface(attrs=None): - """Create a fake SDK ServerInterface. + """Create a fake ServerInterface. :param dict attrs: A dictionary with all attributes :param dict methods: A dictionary with all methods - :return: A fake ServerInterface object with various attributes set + :return: A fake + :class:`~openstack.compute.v2.server_interface.ServerInterface` object """ attrs = attrs or {} diff --git a/openstackclient/tests/unit/compute/v2/test_agent.py b/openstackclient/tests/unit/compute/v2/test_agent.py index c6d4f2b655..f04ba1066e 100644 --- a/openstackclient/tests/unit/compute/v2/test_agent.py +++ b/openstackclient/tests/unit/compute/v2/test_agent.py @@ -11,214 +11,223 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# +import http +import random from unittest import mock -from unittest.mock import call +import uuid from osc_lib import exceptions from openstackclient.compute.v2 import agent from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes +from openstackclient.tests.unit import fakes from openstackclient.tests.unit import utils as tests_utils -class TestAgent(compute_fakes.TestComputev2): - - attr = {} - attr['agent_id'] = 1 - fake_agent = compute_fakes.FakeAgent.create_one_agent(attr) - - columns = ( - 'agent_id', - 'architecture', - 'hypervisor', - 'md5hash', - 'os', - 'url', - 'version', - ) - - data = ( - fake_agent.agent_id, - fake_agent.architecture, - fake_agent.hypervisor, - fake_agent.md5hash, - fake_agent.os, - fake_agent.url, - fake_agent.version, - ) - - def setUp(self): - super(TestAgent, self).setUp() - - self.agents_mock = self.app.client_manager.compute.agents - self.agents_mock.reset_mock() - +def _generate_fake_agent(): + return { + 'agent_id': random.randint(1, 1000), + 'os': 'agent-os-' + uuid.uuid4().hex, + 'architecture': 'agent-architecture', + 'version': '8.0', + 'url': 'http://127.0.0.1', + 'md5hash': 'agent-md5hash', + 'hypervisor': 'hypervisor', + } -class TestAgentCreate(TestAgent): +class TestAgentCreate(compute_fakes.TestComputev2): def setUp(self): - super(TestAgentCreate, self).setUp() + super().setUp() - self.agents_mock.create.return_value = self.fake_agent + self._agent = _generate_fake_agent() + self.columns = ( + 'agent_id', + 'architecture', + 'hypervisor', + 'md5hash', + 'os', + 'url', + 'version', + ) + self.data = ( + self._agent['agent_id'], + self._agent['architecture'], + self._agent['hypervisor'], + self._agent['md5hash'], + self._agent['os'], + self._agent['url'], + self._agent['version'], + ) + + self.compute_client.post.return_value = fakes.FakeResponse( + data={'agent': self._agent} + ) self.cmd = agent.CreateAgent(self.app, None) def test_agent_create(self): arglist = [ - self.fake_agent.os, - self.fake_agent.architecture, - self.fake_agent.version, - self.fake_agent.url, - self.fake_agent.md5hash, - self.fake_agent.hypervisor, + self._agent['os'], + self._agent['architecture'], + self._agent['version'], + self._agent['url'], + self._agent['md5hash'], + self._agent['hypervisor'], ] - verifylist = [ - ('os', self.fake_agent.os), - ('architecture', self.fake_agent.architecture), - ('version', self.fake_agent.version), - ('url', self.fake_agent.url), - ('md5hash', self.fake_agent.md5hash), - ('hypervisor', self.fake_agent.hypervisor), + ('os', self._agent['os']), + ('architecture', self._agent['architecture']), + ('version', self._agent['version']), + ('url', self._agent['url']), + ('md5hash', self._agent['md5hash']), + ('hypervisor', self._agent['hypervisor']), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.agents_mock.create.assert_called_with(parsed_args.os, - parsed_args.architecture, - parsed_args.version, - parsed_args.url, - parsed_args.md5hash, - parsed_args.hypervisor) + + self.compute_client.post.assert_called_with( + '/os-agents', + json={ + 'agent': { + 'hypervisor': parsed_args.hypervisor, + 'os': parsed_args.os, + 'architecture': parsed_args.architecture, + 'version': parsed_args.version, + 'url': parsed_args.url, + 'md5hash': parsed_args.md5hash, + }, + }, + microversion='2.1', + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) -class TestAgentDelete(TestAgent): - - fake_agents = compute_fakes.FakeAgent.create_agents(count=2) - +class TestAgentDelete(compute_fakes.TestComputev2): def setUp(self): - super(TestAgentDelete, self).setUp() + super().setUp() + + self.compute_client.delete.return_value = fakes.FakeResponse( + status_code=http.HTTPStatus.NO_CONTENT + ) - self.agents_mock.get.return_value = self.fake_agents self.cmd = agent.DeleteAgent(self.app, None) def test_delete_one_agent(self): - arglist = [ - self.fake_agents[0].agent_id - ] - + arglist = ['123'] verifylist = [ - ('id', [self.fake_agents[0].agent_id]), + ('id', ['123']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.agents_mock.delete.assert_called_with( - self.fake_agents[0].agent_id) + + self.compute_client.delete.assert_called_once_with( + '/os-agents/123', + microversion='2.1', + ) self.assertIsNone(result) def test_delete_multiple_agents(self): - arglist = [] - for n in self.fake_agents: - arglist.append(n.agent_id) + arglist = ['1', '2', '3'] verifylist = [ - ('id', arglist), + ('id', ['1', '2', '3']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [] - for n in self.fake_agents: - calls.append(call(n.agent_id)) - self.agents_mock.delete.assert_has_calls(calls) + calls = [ + mock.call(f'/os-agents/{x}', microversion='2.1') for x in arglist + ] + self.compute_client.delete.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_agents_exception(self): - arglist = [ - self.fake_agents[0].agent_id, - self.fake_agents[1].agent_id, - 'x-y-z', - ] + arglist = ['1', '2', '999'] verifylist = [ - ('id', arglist), + ('id', ['1', '2', '999']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ret_delete = [ - None, - None, - exceptions.NotFound('404') + self.compute_client.delete.side_effect = [ + fakes.FakeResponse(status_code=http.HTTPStatus.NO_CONTENT), + fakes.FakeResponse(status_code=http.HTTPStatus.NO_CONTENT), + fakes.FakeResponse(status_code=http.HTTPStatus.NOT_FOUND), ] - self.agents_mock.delete = mock.Mock(side_effect=ret_delete) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) calls = [ - call(self.fake_agents[0].agent_id), - call(self.fake_agents[1].agent_id), + mock.call(f'/os-agents/{x}', microversion='2.1') for x in arglist ] - self.agents_mock.delete.assert_has_calls(calls) + self.compute_client.delete.assert_has_calls(calls) def test_agent_delete_no_input(self): arglist = [] verifylist = None - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) - - -class TestAgentList(TestAgent): - - agents = compute_fakes.FakeAgent.create_agents(count=3) - list_columns = ( - "Agent ID", - "Hypervisor", - "OS", - "Architecture", - "Version", - "Md5Hash", - "URL", - ) - - list_data = [] - for _agent in agents: - list_data.append(( - _agent.agent_id, - _agent.hypervisor, - _agent.os, - _agent.architecture, - _agent.version, - _agent.md5hash, - _agent.url, - )) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) - def setUp(self): - super(TestAgentList, self).setUp() +class TestAgentList(compute_fakes.TestComputev2): + def setUp(self): + super().setUp() + + _agents = [_generate_fake_agent() for _ in range(3)] + + self.columns = ( + "Agent ID", + "Hypervisor", + "OS", + "Architecture", + "Version", + "Md5Hash", + "URL", + ) + self.data = [ + ( + _agent['agent_id'], + _agent['hypervisor'], + _agent['os'], + _agent['architecture'], + _agent['version'], + _agent['md5hash'], + _agent['url'], + ) + for _agent in _agents + ] - self.agents_mock.list.return_value = self.agents + self.compute_client.get.return_value = fakes.FakeResponse( + data={'agents': _agents}, + ) self.cmd = agent.ListAgent(self.app, None) def test_agent_list(self): - arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.list_columns, columns) - self.assertEqual(self.list_data, list(data)) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + self.compute_client.get.assert_called_once_with( + '/os-agents', + microversion='2.1', + ) def test_agent_list_with_hypervisor(self): - arglist = [ '--hypervisor', 'hypervisor', @@ -230,91 +239,129 @@ def test_agent_list_with_hypervisor(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.list_columns, columns) - self.assertEqual(self.list_data, list(data)) - + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + self.compute_client.get.assert_called_once_with( + '/os-agents?hypervisor=hypervisor', + microversion='2.1', + ) -class TestAgentSet(TestAgent): +class TestAgentSet(compute_fakes.TestComputev2): def setUp(self): - super(TestAgentSet, self).setUp() + super().setUp() + + self.agent = _generate_fake_agent() + self.compute_client.get.return_value = fakes.FakeResponse( + data={'agents': [self.agent]}, + ) + self.compute_client.put.return_value = fakes.FakeResponse() - self.agents_mock.update.return_value = self.fake_agent - self.agents_mock.list.return_value = [self.fake_agent] self.cmd = agent.SetAgent(self.app, None) def test_agent_set_nothing(self): arglist = [ - '1', + str(self.agent['agent_id']), ] verifylist = [ - ('id', '1'), + ('id', self.agent['agent_id']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.agents_mock.update.assert_called_with(parsed_args.id, - self.fake_agent.version, - self.fake_agent.url, - self.fake_agent.md5hash) + self.compute_client.put.assert_called_once_with( + f'/os-agents/{self.agent["agent_id"]}', + json={ + 'para': { + 'version': self.agent['version'], + 'url': self.agent['url'], + 'md5hash': self.agent['md5hash'], + }, + }, + microversion='2.1', + ) self.assertIsNone(result) def test_agent_set_version(self): arglist = [ - '1', - '--agent-version', 'new-version', + str(self.agent['agent_id']), + '--agent-version', + 'new-version', ] verifylist = [ - ('id', '1'), + ('id', self.agent['agent_id']), ('version', 'new-version'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.agents_mock.update.assert_called_with(parsed_args.id, - parsed_args.version, - self.fake_agent.url, - self.fake_agent.md5hash) + self.compute_client.put.assert_called_once_with( + f'/os-agents/{self.agent["agent_id"]}', + json={ + 'para': { + 'version': parsed_args.version, + 'url': self.agent['url'], + 'md5hash': self.agent['md5hash'], + }, + }, + microversion='2.1', + ) self.assertIsNone(result) def test_agent_set_url(self): arglist = [ - '1', - '--url', 'new-url', + str(self.agent['agent_id']), + '--url', + 'new-url', ] verifylist = [ - ('id', '1'), + ('id', self.agent['agent_id']), ('url', 'new-url'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.agents_mock.update.assert_called_with(parsed_args.id, - self.fake_agent.version, - parsed_args.url, - self.fake_agent.md5hash) + self.compute_client.put.assert_called_once_with( + f'/os-agents/{self.agent["agent_id"]}', + json={ + 'para': { + 'version': self.agent['version'], + 'url': parsed_args.url, + 'md5hash': self.agent['md5hash'], + }, + }, + microversion='2.1', + ) self.assertIsNone(result) def test_agent_set_md5hash(self): arglist = [ - '1', - '--md5hash', 'new-md5hash', + str(self.agent['agent_id']), + '--md5hash', + 'new-md5hash', ] verifylist = [ - ('id', '1'), + ('id', self.agent['agent_id']), ('md5hash', 'new-md5hash'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.agents_mock.update.assert_called_with(parsed_args.id, - self.fake_agent.version, - self.fake_agent.url, - parsed_args.md5hash) + self.compute_client.put.assert_called_once_with( + f'/os-agents/{self.agent["agent_id"]}', + json={ + 'para': { + 'version': self.agent['version'], + 'url': self.agent['url'], + 'md5hash': parsed_args.md5hash, + }, + }, + microversion='2.1', + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/compute/v2/test_aggregate.py b/openstackclient/tests/unit/compute/v2/test_aggregate.py index 3a7a81cb1a..b68e76edc5 100644 --- a/openstackclient/tests/unit/compute/v2/test_aggregate.py +++ b/openstackclient/tests/unit/compute/v2/test_aggregate.py @@ -16,8 +16,9 @@ from unittest import mock from unittest.mock import call +from openstack.compute.v2 import aggregate as _aggregate from openstack import exceptions as sdk_exceptions -from openstack import utils as sdk_utils +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions @@ -27,49 +28,46 @@ class TestAggregate(compute_fakes.TestComputev2): - - fake_ag = compute_fakes.FakeAggregate.create_one_aggregate() - columns = ( 'availability_zone', + 'created_at', + 'deleted_at', 'hosts', 'id', + 'is_deleted', 'name', 'properties', - ) - - data = ( - fake_ag.availability_zone, - format_columns.ListColumn(fake_ag.hosts), - fake_ag.id, - fake_ag.name, - format_columns.DictColumn(fake_ag.metadata), + 'updated_at', + 'uuid', ) def setUp(self): - super(TestAggregate, self).setUp() - - # Get a shortcut to the AggregateManager Mock - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.aggregates = mock.Mock() - self.sdk_client.find_aggregate = mock.Mock() - self.sdk_client.create_aggregate = mock.Mock() - self.sdk_client.update_aggregate = mock.Mock() - self.sdk_client.update_aggregate = mock.Mock() - self.sdk_client.set_aggregate_metadata = mock.Mock() - self.sdk_client.add_host_to_aggregate = mock.Mock() - self.sdk_client.remove_host_from_aggregate = mock.Mock() + super().setUp() + self.fake_ag = sdk_fakes.generate_fake_resource( + _aggregate.Aggregate, + metadata={'availability_zone': 'ag_zone', 'key1': 'value1'}, + ) + self.data = ( + self.fake_ag.availability_zone, + self.fake_ag.created_at, + self.fake_ag.deleted_at, + format_columns.ListColumn(self.fake_ag.hosts), + self.fake_ag.id, + self.fake_ag.is_deleted, + self.fake_ag.name, + format_columns.DictColumn(self.fake_ag.metadata), + self.fake_ag.updated_at, + self.fake_ag.uuid, + ) -class TestAggregateAddHost(TestAggregate): +class TestAggregateAddHost(TestAggregate): def setUp(self): - super(TestAggregateAddHost, self).setUp() + super().setUp() - self.sdk_client.find_aggregate.return_value = self.fake_ag - self.sdk_client.add_host_to_aggregate.return_value = self.fake_ag + self.compute_client.find_aggregate.return_value = self.fake_ag + self.compute_client.add_host_to_aggregate.return_value = self.fake_ag self.cmd = aggregate.AddAggregateHost(self.app, None) def test_aggregate_add_host(self): @@ -83,21 +81,22 @@ def test_aggregate_add_host(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.add_host_to_aggregate.assert_called_once_with( - self.fake_ag.id, parsed_args.host) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.add_host_to_aggregate.assert_called_once_with( + self.fake_ag.id, parsed_args.host + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestAggregateCreate(TestAggregate): - def setUp(self): - super(TestAggregateCreate, self).setUp() + super().setUp() - self.sdk_client.create_aggregate.return_value = self.fake_ag - self.sdk_client.set_aggregate_metadata.return_value = self.fake_ag + self.compute_client.create_aggregate.return_value = self.fake_ag + self.compute_client.set_aggregate_metadata.return_value = self.fake_ag self.cmd = aggregate.CreateAggregate(self.app, None) def test_aggregate_create(self): @@ -109,14 +108,16 @@ def test_aggregate_create(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_aggregate.assert_called_once_with( - name=parsed_args.name) + self.compute_client.create_aggregate.assert_called_once_with( + name=parsed_args.name + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_aggregate_create_with_zone(self): arglist = [ - '--zone', 'zone1', + '--zone', + 'zone1', 'ag1', ] verifylist = [ @@ -126,15 +127,18 @@ def test_aggregate_create_with_zone(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_aggregate.assert_called_once_with( - name=parsed_args.name, availability_zone=parsed_args.zone) + self.compute_client.create_aggregate.assert_called_once_with( + name=parsed_args.name, availability_zone=parsed_args.zone + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_aggregate_create_with_property(self): arglist = [ - '--property', 'key1=value1', - '--property', 'key2=value2', + '--property', + 'key1=value1', + '--property', + 'key2=value2', 'ag1', ] verifylist = [ @@ -143,38 +147,44 @@ def test_aggregate_create_with_property(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_aggregate.assert_called_once_with( - name=parsed_args.name) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, parsed_args.properties) + self.compute_client.create_aggregate.assert_called_once_with( + name=parsed_args.name + ) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, parsed_args.properties + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestAggregateDelete(TestAggregate): + def setUp(self): + super().setUp() - fake_ags = compute_fakes.FakeAggregate.create_aggregates(count=2) + self.fake_ags = list( + sdk_fakes.generate_fake_resources(_aggregate.Aggregate, 2) + ) - def setUp(self): - super(TestAggregateDelete, self).setUp() + self.compute_client.find_aggregate.side_effect = [ + self.fake_ags[0], + self.fake_ags[1], + ] - self.sdk_client.find_aggregate = ( - compute_fakes.FakeAggregate.get_aggregates(self.fake_ags)) self.cmd = aggregate.DeleteAggregate(self.app, None) def test_aggregate_delete(self): - arglist = [ - self.fake_ags[0].id - ] + arglist = [self.fake_ags[0].id] verifylist = [ ('aggregate', [self.fake_ags[0].id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - self.fake_ags[0].id, ignore_missing=False) - self.sdk_client.delete_aggregate.assert_called_once_with( - self.fake_ags[0].id, ignore_missing=False) + self.compute_client.find_aggregate.assert_called_once_with( + self.fake_ags[0].id, ignore_missing=False + ) + self.compute_client.delete_aggregate.assert_called_once_with( + self.fake_ags[0].id, ignore_missing=False + ) def test_delete_multiple_aggregates(self): arglist = [] @@ -190,8 +200,8 @@ def test_delete_multiple_aggregates(self): calls = [] for a in self.fake_ags: calls.append(call(a.id, ignore_missing=False)) - self.sdk_client.find_aggregate.assert_has_calls(calls) - self.sdk_client.delete_aggregate.assert_has_calls(calls) + self.compute_client.find_aggregate.assert_has_calls(calls) + self.compute_client.delete_aggregate.assert_has_calls(calls) def test_delete_multiple_agggregates_with_exception(self): arglist = [ @@ -204,72 +214,81 @@ def test_delete_multiple_agggregates_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.find_aggregate.side_effect = [ - self.fake_ags[0], sdk_exceptions.NotFoundException] + self.compute_client.find_aggregate.side_effect = [ + self.fake_ags[0], + sdk_exceptions.NotFoundException, + ] try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 aggregates failed to delete.', - str(e)) + self.assertEqual('1 of 2 aggregates failed to delete.', str(e)) calls = [] for a in arglist: calls.append(call(a, ignore_missing=False)) - self.sdk_client.find_aggregate.assert_has_calls(calls) - self.sdk_client.delete_aggregate.assert_called_with( - self.fake_ags[0].id, ignore_missing=False) + self.compute_client.find_aggregate.assert_has_calls(calls) + self.compute_client.delete_aggregate.assert_called_with( + self.fake_ags[0].id, ignore_missing=False + ) class TestAggregateList(TestAggregate): + def setUp(self): + super().setUp() - list_columns = ( - "ID", - "Name", - "Availability Zone", - ) + self.compute_client.aggregates.return_value = [self.fake_ag] + self.cmd = aggregate.ListAggregate(self.app, None) - list_columns_long = ( - "ID", - "Name", - "Availability Zone", - "Properties", - "Hosts", - ) + def test_aggregate_list(self): + self.set_compute_api_version('2.41') - list_data = (( - TestAggregate.fake_ag.id, - TestAggregate.fake_ag.name, - TestAggregate.fake_ag.availability_zone, - ), ) - - list_data_long = (( - TestAggregate.fake_ag.id, - TestAggregate.fake_ag.name, - TestAggregate.fake_ag.availability_zone, - format_columns.DictColumn({ - key: value - for key, value in TestAggregate.fake_ag.metadata.items() - if key != 'availability_zone' - }), - format_columns.ListColumn(TestAggregate.fake_ag.hosts), - ), ) + parsed_args = self.check_parser(self.cmd, [], []) + columns, data = self.cmd.take_action(parsed_args) - def setUp(self): - super(TestAggregateList, self).setUp() + expected_columns = ( + "ID", + "UUID", + "Name", + "Availability Zone", + ) + expected_data = ( + ( + self.fake_ag.id, + self.fake_ag.uuid, + self.fake_ag.name, + self.fake_ag.availability_zone, + ), + ) - self.sdk_client.aggregates.return_value = [self.fake_ag] - self.cmd = aggregate.ListAggregate(self.app, None) + self.assertEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) - def test_aggregate_list(self): + def test_aggregate_list_pre_v241(self): + self.set_compute_api_version('2.40') parsed_args = self.check_parser(self.cmd, [], []) columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.list_columns, columns) - self.assertCountEqual(self.list_data, tuple(data)) + expected_columns = ( + "ID", + "Name", + "Availability Zone", + ) + expected_data = ( + ( + self.fake_ag.id, + self.fake_ag.name, + self.fake_ag.availability_zone, + ), + ) + + self.assertEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) def test_aggregate_list_with_long(self): + self.set_compute_api_version('2.41') + arglist = [ '--long', ] @@ -279,17 +298,43 @@ def test_aggregate_list_with_long(self): parsed_args = self.check_parser(self.cmd, arglist, vertifylist) columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.list_columns_long, columns) - self.assertCountEqual(self.list_data_long, tuple(data)) + expected_columns = ( + "ID", + "UUID", + "Name", + "Availability Zone", + "Properties", + "Hosts", + ) + expected_data = ( + ( + self.fake_ag.id, + self.fake_ag.uuid, + self.fake_ag.name, + self.fake_ag.availability_zone, + format_columns.DictColumn( + { + key: value + for key, value in self.fake_ag.metadata.items() + if key != 'availability_zone' + } + ), + format_columns.ListColumn(self.fake_ag.hosts), + ), + ) + self.assertEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) -class TestAggregateRemoveHost(TestAggregate): +class TestAggregateRemoveHost(TestAggregate): def setUp(self): - super(TestAggregateRemoveHost, self).setUp() + super().setUp() - self.sdk_client.find_aggregate.return_value = self.fake_ag - self.sdk_client.remove_host_from_aggregate.return_value = self.fake_ag + self.compute_client.find_aggregate.return_value = self.fake_ag + self.compute_client.remove_host_from_aggregate.return_value = ( + self.fake_ag + ) self.cmd = aggregate.RemoveAggregateHost(self.app, None) def test_aggregate_remove_host(self): @@ -303,20 +348,21 @@ def test_aggregate_remove_host(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.remove_host_from_aggregate.assert_called_once_with( - self.fake_ag.id, parsed_args.host) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.remove_host_from_aggregate.assert_called_once_with( + self.fake_ag.id, parsed_args.host + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestAggregateSet(TestAggregate): - def setUp(self): - super(TestAggregateSet, self).setUp() + super().setUp() - self.sdk_client.find_aggregate.return_value = self.fake_ag + self.compute_client.find_aggregate.return_value = self.fake_ag self.cmd = aggregate.SetAggregate(self.app, None) def test_aggregate_set_no_option(self): @@ -329,15 +375,17 @@ def test_aggregate_set_no_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.assertNotCalled(self.sdk_client.update_aggregate) - self.assertNotCalled(self.sdk_client.set_aggregate_metadata) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.assertNotCalled(self.compute_client.update_aggregate) + self.assertNotCalled(self.compute_client.set_aggregate_metadata) self.assertIsNone(result) def test_aggregate_set_with_name(self): arglist = [ - '--name', 'new_name', + '--name', + 'new_name', 'ag1', ] verifylist = [ @@ -347,16 +395,19 @@ def test_aggregate_set_with_name(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.update_aggregate.assert_called_once_with( - self.fake_ag.id, name=parsed_args.name) - self.assertNotCalled(self.sdk_client.set_aggregate_metadata) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.update_aggregate.assert_called_once_with( + self.fake_ag.id, name=parsed_args.name + ) + self.assertNotCalled(self.compute_client.set_aggregate_metadata) self.assertIsNone(result) def test_aggregate_set_with_zone(self): arglist = [ - '--zone', 'new_zone', + '--zone', + 'new_zone', 'ag1', ] verifylist = [ @@ -366,17 +417,21 @@ def test_aggregate_set_with_zone(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.update_aggregate.assert_called_once_with( - self.fake_ag.id, availability_zone=parsed_args.zone) - self.assertNotCalled(self.sdk_client.set_aggregate_metadata) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.update_aggregate.assert_called_once_with( + self.fake_ag.id, availability_zone=parsed_args.zone + ) + self.assertNotCalled(self.compute_client.set_aggregate_metadata) self.assertIsNone(result) def test_aggregate_set_with_property(self): arglist = [ - '--property', 'key1=value1', - '--property', 'key2=value2', + '--property', + 'key1=value1', + '--property', + 'key2=value2', 'ag1', ] verifylist = [ @@ -386,17 +441,20 @@ def test_aggregate_set_with_property(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.assertNotCalled(self.sdk_client.update_aggregate) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, parsed_args.properties) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.assertNotCalled(self.compute_client.update_aggregate) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, parsed_args.properties + ) self.assertIsNone(result) def test_aggregate_set_with_no_property_and_property(self): arglist = [ '--no-property', - '--property', 'key2=value2', + '--property', + 'key2=value2', 'ag1', ] verifylist = [ @@ -406,11 +464,13 @@ def test_aggregate_set_with_no_property_and_property(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.assertNotCalled(self.sdk_client.update_aggregate) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, {'key1': None, 'key2': 'value2'}) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.assertNotCalled(self.compute_client.update_aggregate) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, {'key1': None, 'key2': 'value2'} + ) self.assertIsNone(result) def test_aggregate_set_with_no_property(self): @@ -424,16 +484,19 @@ def test_aggregate_set_with_no_property(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.assertNotCalled(self.sdk_client.update_aggregate) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, {'key1': None}) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.assertNotCalled(self.compute_client.update_aggregate) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, {'key1': None} + ) self.assertIsNone(result) def test_aggregate_set_with_zone_and_no_property(self): arglist = [ - '--zone', 'new_zone', + '--zone', + 'new_zone', '--no-property', 'ag1', ] @@ -444,43 +507,51 @@ def test_aggregate_set_with_zone_and_no_property(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.update_aggregate.assert_called_once_with( - self.fake_ag.id, availability_zone=parsed_args.zone) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, {'key1': None}) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.update_aggregate.assert_called_once_with( + self.fake_ag.id, availability_zone=parsed_args.zone + ) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, {'key1': None} + ) self.assertIsNone(result) class TestAggregateShow(TestAggregate): - columns = ( 'availability_zone', + 'created_at', + 'deleted_at', 'hosts', 'id', + 'is_deleted', 'name', 'properties', - ) - - data = ( - TestAggregate.fake_ag.availability_zone, - format_columns.ListColumn(TestAggregate.fake_ag.hosts), - TestAggregate.fake_ag.id, - TestAggregate.fake_ag.name, - format_columns.DictColumn({ - key: value - for key, value in TestAggregate.fake_ag.metadata.items() - if key != 'availability_zone' - }), + 'updated_at', + 'uuid', ) def setUp(self): - super(TestAggregateShow, self).setUp() + super().setUp() - self.sdk_client.find_aggregate.return_value = self.fake_ag + self.compute_client.find_aggregate.return_value = self.fake_ag self.cmd = aggregate.ShowAggregate(self.app, None) + self.data = ( + self.fake_ag.availability_zone, + self.fake_ag.created_at, + self.fake_ag.deleted_at, + format_columns.ListColumn(self.fake_ag.hosts), + self.fake_ag.id, + self.fake_ag.is_deleted, + self.fake_ag.name, + format_columns.DictColumn(self.fake_ag.metadata), + self.fake_ag.updated_at, + self.fake_ag.uuid, + ) + def test_aggregate_show(self): arglist = [ 'ag1', @@ -490,24 +561,25 @@ def test_aggregate_show(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, tuple(data)) class TestAggregateUnset(TestAggregate): - def setUp(self): - super(TestAggregateUnset, self).setUp() + super().setUp() - self.sdk_client.find_aggregate.return_value = self.fake_ag + self.compute_client.find_aggregate.return_value = self.fake_ag self.cmd = aggregate.UnsetAggregate(self.app, None) def test_aggregate_unset(self): arglist = [ - '--property', 'unset_key', + '--property', + 'unset_key', 'ag1', ] verifylist = [ @@ -517,14 +589,17 @@ def test_aggregate_unset(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, {'unset_key': None}) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, {'unset_key': None} + ) self.assertIsNone(result) def test_aggregate_unset_multiple_properties(self): arglist = [ - '--property', 'unset_key1', - '--property', 'unset_key2', + '--property', + 'unset_key1', + '--property', + 'unset_key2', 'ag1', ] verifylist = [ @@ -534,8 +609,9 @@ def test_aggregate_unset_multiple_properties(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.set_aggregate_metadata.assert_called_once_with( - self.fake_ag.id, {'unset_key1': None, 'unset_key2': None}) + self.compute_client.set_aggregate_metadata.assert_called_once_with( + self.fake_ag.id, {'unset_key1': None, 'unset_key2': None} + ) self.assertIsNone(result) def test_aggregate_unset_no_option(self): @@ -548,60 +624,57 @@ def test_aggregate_unset_no_option(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertNotCalled(self.sdk_client.set_aggregate_metadata) + self.assertNotCalled(self.compute_client.set_aggregate_metadata) self.assertIsNone(result) class TestAggregateCacheImage(TestAggregate): - images = image_fakes.create_images(count=2) def setUp(self): - super(TestAggregateCacheImage, self).setUp() + super().setUp() - self.sdk_client.find_aggregate.return_value = self.fake_ag + self.compute_client.find_aggregate.return_value = self.fake_ag self.find_image_mock = mock.Mock(side_effect=self.images) - self.app.client_manager.sdk_connection.image.find_image = \ + self.app.client_manager.sdk_connection.image.find_image = ( self.find_image_mock + ) self.cmd = aggregate.CacheImageForAggregate(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_aggregate_not_supported(self, sm_mock): - arglist = [ - 'ag1', - 'im1' - ] + def test_aggregate_cache_pre_v281(self): + self.set_compute_api_version('2.80') + + arglist = ['ag1', 'im1'] verifylist = [ ('aggregate', 'ag1'), ('image', ['im1']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args + exceptions.CommandError, self.cmd.take_action, parsed_args ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_aggregate_add_single_image(self, sm_mock): - arglist = [ - 'ag1', - 'im1' - ] + def test_aggregate_cache_add_single_image(self): + self.set_compute_api_version('2.81') + + arglist = ['ag1', 'im1'] verifylist = [ ('aggregate', 'ag1'), ('image', ['im1']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.aggregate_precache_images.assert_called_once_with( - self.fake_ag.id, [self.images[0].id]) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.aggregate_precache_images.assert_called_once_with( + self.fake_ag.id, [self.images[0].id] + ) + + def test_aggregate_cache_add_multiple_images(self): + self.set_compute_api_version('2.81') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_aggregate_add_multiple_images(self, sm_mock): arglist = [ 'ag1', 'im1', @@ -613,7 +686,9 @@ def test_aggregate_add_multiple_images(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.find_aggregate.assert_called_once_with( - parsed_args.aggregate, ignore_missing=False) - self.sdk_client.aggregate_precache_images.assert_called_once_with( - self.fake_ag.id, [self.images[0].id, self.images[1].id]) + self.compute_client.find_aggregate.assert_called_once_with( + parsed_args.aggregate, ignore_missing=False + ) + self.compute_client.aggregate_precache_images.assert_called_once_with( + self.fake_ag.id, [self.images[0].id, self.images[1].id] + ) diff --git a/openstackclient/tests/unit/compute/v2/test_console.py b/openstackclient/tests/unit/compute/v2/test_console.py index db9603c9b9..8d9d36ccd2 100644 --- a/openstackclient/tests/unit/compute/v2/test_console.py +++ b/openstackclient/tests/unit/compute/v2/test_console.py @@ -13,107 +13,83 @@ # under the License. # -from unittest import mock + +from openstack.compute.v2 import server as _server +from openstack.test import fakes as sdk_fakes from openstackclient.compute.v2 import console from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit import utils -class TestConsole(compute_fakes.TestComputev2): - - def setUp(self): - super(TestConsole, self).setUp() - - # SDK mock - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.find_server = mock.Mock() - self.sdk_client.get_server_console_output = mock.Mock() - - -class TestConsoleLog(TestConsole): - _server = compute_fakes.FakeServer.create_one_server() - +class TestConsoleLog(compute_fakes.TestComputev2): def setUp(self): - super(TestConsoleLog, self).setUp() + super().setUp() - self.sdk_client.find_server.return_value = self._server + self._server = sdk_fakes.generate_fake_resource(_server.Server) + self.compute_client.find_server.return_value = self._server self.cmd = console.ShowConsoleLog(self.app, None) def test_show_no_args(self): - arglist = [ - ] - verifylist = [ - ] - self.assertRaises(utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) + arglist = [] + verifylist = [] + self.assertRaises( + utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show(self): - arglist = [ - 'fake_server' - ] - verifylist = [ - ('server', 'fake_server') - ] + arglist = ['fake_server'] + verifylist = [('server', 'fake_server')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - output = { - 'output': '1st line\n2nd line\n' - } - self.sdk_client.get_server_console_output.return_value = output + output = {'output': '1st line\n2nd line\n'} + self.compute_client.get_server_console_output.return_value = output self.cmd.take_action(parsed_args) - self.sdk_client.find_server.assert_called_with( - name_or_id='fake_server', ignore_missing=False) - self.sdk_client.get_server_console_output.assert_called_with( - self._server.id, - length=None + self.compute_client.find_server.assert_called_with( + name_or_id='fake_server', ignore_missing=False + ) + self.compute_client.get_server_console_output.assert_called_with( + self._server.id, length=None ) stdout = self.app.stdout.content self.assertEqual(stdout[0], output['output']) def test_show_lines(self): - arglist = [ - 'fake_server', - '--lines', '15' - ] - verifylist = [ - ('server', 'fake_server'), - ('lines', 15) - ] + arglist = ['fake_server', '--lines', '15'] + verifylist = [('server', 'fake_server'), ('lines', 15)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - output = { - 'output': '1st line\n2nd line' - } - self.sdk_client.get_server_console_output.return_value = output + output = {'output': '1st line\n2nd line'} + self.compute_client.get_server_console_output.return_value = output self.cmd.take_action(parsed_args) - self.sdk_client.find_server.assert_called_with( - name_or_id='fake_server', ignore_missing=False) - self.sdk_client.get_server_console_output.assert_called_with( - self._server.id, - length=15 + self.compute_client.find_server.assert_called_with( + name_or_id='fake_server', ignore_missing=False + ) + self.compute_client.get_server_console_output.assert_called_with( + self._server.id, length=15 ) -class TestConsoleUrlShow(TestConsole): - _server = compute_fakes.FakeServer.create_one_server() - +class TestConsoleUrlShow(compute_fakes.TestComputev2): def setUp(self): - super(TestConsoleUrlShow, self).setUp() - self.sdk_client.find_server.return_value = self._server - fake_console_data = {'url': 'http://localhost', - 'protocol': 'fake_protocol', - 'type': 'fake_type'} - self.sdk_client.create_console = mock.Mock( - return_value=fake_console_data) + super().setUp() + + self._server = sdk_fakes.generate_fake_resource(_server.Server) + self.compute_client.find_server.return_value = self._server + + fake_console_data = { + 'url': 'http://localhost', + 'protocol': 'fake_protocol', + 'type': 'fake_type', + } + self.compute_client.create_console.return_value = fake_console_data self.columns = ( 'protocol', @@ -123,7 +99,7 @@ def setUp(self): self.data = ( fake_console_data['protocol'], fake_console_data['type'], - fake_console_data['url'] + fake_console_data['url'], ) self.cmd = console.ShowConsoleURL(self.app, None) @@ -138,9 +114,9 @@ def test_console_url_show_by_default(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='novnc') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='novnc' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -155,9 +131,9 @@ def test_console_url_show_with_novnc(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='novnc') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='novnc' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -172,13 +148,13 @@ def test_console_url_show_with_xvpvnc(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='xvpvnc') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='xvpvnc' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - def test_console_url_show_with_spice(self): + def test_console_url_show_with_spice_html5(self): arglist = [ '--spice', 'foo_vm', @@ -189,9 +165,26 @@ def test_console_url_show_with_spice(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='spice-html5') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='spice-html5' + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_console_url_show_with_spice_direct(self): + arglist = [ + '--spice-direct', + 'foo_vm', + ] + verifylist = [ + ('url_type', 'spice-direct'), + ('server', 'foo_vm'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='spice-direct' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -206,9 +199,9 @@ def test_console_url_show_with_rdp(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='rdp-html5') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='rdp-html5' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -223,9 +216,9 @@ def test_console_url_show_with_serial(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='serial') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='serial' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -240,8 +233,8 @@ def test_console_url_show_with_mks(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_console.assert_called_once_with( - self._server.id, - console_type='webmks') + self.compute_client.create_console.assert_called_once_with( + self._server.id, console_type='webmks' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/compute/v2/test_console_connection.py b/openstackclient/tests/unit/compute/v2/test_console_connection.py new file mode 100644 index 0000000000..ab9cb0c05c --- /dev/null +++ b/openstackclient/tests/unit/compute/v2/test_console_connection.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.compute.v2 import console_auth_token as _console_auth_token +from openstack.test import fakes as sdk_fakes + +from openstackclient.compute.v2 import console_connection +from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes + + +class TestConsoleTokens(compute_fakes.TestComputev2): + def setUp(self): + super().setUp() + + self._console_auth_token = sdk_fakes.generate_fake_resource( + _console_auth_token.ConsoleAuthToken, + host='127.0.0.1', + instance_uuid=uuid.uuid4().hex, + internal_access_path=None, + port=5900, + tls_port=5901, + ) + self.compute_client.validate_console_auth_token.return_value = ( + self._console_auth_token + ) + + self.columns = ( + 'host', + 'instance_uuid', + 'internal_access_path', + 'port', + 'tls_port', + ) + self.data = ( + self._console_auth_token.host, + self._console_auth_token.instance_uuid, + self._console_auth_token.internal_access_path, + self._console_auth_token.port, + self._console_auth_token.tls_port, + ) + + self.cmd = console_connection.ShowConsoleConnectionInformation( + self.app, None + ) + + def test_console_connection_show(self): + arglist = [ + 'token', + ] + verifylist = [ + ('token', 'token'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.compute_client.validate_console_auth_token.assert_called_once_with( + 'token' + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/compute/v2/test_flavor.py b/openstackclient/tests/unit/compute/v2/test_flavor.py index 33ebf54622..25bc8eaa76 100644 --- a/openstackclient/tests/unit/compute/v2/test_flavor.py +++ b/openstackclient/tests/unit/compute/v2/test_flavor.py @@ -16,102 +16,83 @@ from openstack.compute.v2 import flavor as _flavor from openstack import exceptions as sdk_exceptions -from openstack import utils as sdk_utils +from openstack.identity.v3 import project as _project +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions from openstackclient.compute.v2 import flavor from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit import utils as tests_utils class TestFlavor(compute_fakes.TestComputev2): - def setUp(self): - super(TestFlavor, self).setUp() - - # SDK mock - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.flavors = mock.Mock() - self.sdk_client.find_flavor = mock.Mock() - self.sdk_client.delete_flavor = mock.Mock() - self.sdk_client.update_flavor = mock.Mock() - self.sdk_client.flavor_add_tenant_access = mock.Mock() - self.sdk_client.flavor_remove_tenant_access = mock.Mock() - self.sdk_client.create_flavor_extra_specs = mock.Mock() - self.sdk_client.update_flavor_extra_specs_property = mock.Mock() - self.sdk_client.delete_flavor_extra_specs_property = mock.Mock() - - self.projects_mock = self.app.client_manager.identity.projects + super().setUp() + + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() class TestFlavorCreate(TestFlavor): - - flavor = compute_fakes.FakeFlavor.create_one_flavor( - attrs={'links': 'flavor-links'}) - project = identity_fakes.FakeProject.create_one_project() - - columns = ( - 'OS-FLV-DISABLED:disabled', - 'OS-FLV-EXT-DATA:ephemeral', - 'description', - 'disk', - 'id', - 'name', - 'os-flavor-access:is_public', - 'properties', - 'ram', - 'rxtx_factor', - 'swap', - 'vcpus' - ) - - data = ( - flavor.is_disabled, - flavor.ephemeral, - flavor.description, - flavor.disk, - flavor.id, - flavor.name, - flavor.is_public, - format_columns.DictColumn(flavor.extra_specs), - flavor.ram, - flavor.rxtx_factor, - flavor.swap, - flavor.vcpus, - ) - data_private = ( - flavor.is_disabled, - flavor.ephemeral, - flavor.description, - flavor.disk, - flavor.id, - flavor.name, - False, - format_columns.DictColumn(flavor.extra_specs), - flavor.ram, - flavor.rxtx_factor, - flavor.swap, - flavor.vcpus, - ) - def setUp(self): - super(TestFlavorCreate, self).setUp() + super().setUp() + + self.flavor = sdk_fakes.generate_fake_resource( + _flavor.Flavor, links='flavor-links' + ) + self.project = sdk_fakes.generate_fake_resource(_project.Project) + + self.columns = ( + 'OS-FLV-DISABLED:disabled', + 'OS-FLV-EXT-DATA:ephemeral', + 'description', + 'disk', + 'id', + 'name', + 'os-flavor-access:is_public', + 'properties', + 'ram', + 'rxtx_factor', + 'swap', + 'vcpus', + ) + self.data = ( + self.flavor.is_disabled, + self.flavor.ephemeral, + self.flavor.description, + self.flavor.disk, + self.flavor.id, + self.flavor.name, + self.flavor.is_public, + format_columns.DictColumn(self.flavor.extra_specs), + self.flavor.ram, + self.flavor.rxtx_factor, + self.flavor.swap, + self.flavor.vcpus, + ) + self.data_private = ( + self.flavor.is_disabled, + self.flavor.ephemeral, + self.flavor.description, + self.flavor.disk, + self.flavor.id, + self.flavor.name, + False, + format_columns.DictColumn(self.flavor.extra_specs), + self.flavor.ram, + self.flavor.rxtx_factor, + self.flavor.swap, + self.flavor.vcpus, + ) - # Return a project self.projects_mock.get.return_value = self.project - self.sdk_client.create_flavor.return_value = self.flavor + self.compute_client.create_flavor.return_value = self.flavor + self.cmd = flavor.CreateFlavor(self.app, None) def test_flavor_create_default_options(self): - - arglist = [ - self.flavor.name - ] + arglist = [self.flavor.name] verifylist = [ ('name', self.flavor.name), ] @@ -130,24 +111,36 @@ def test_flavor_create_default_options(self): } columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_flavor.assert_called_once_with(**default_args) + self.compute_client.create_flavor.assert_called_once_with( + **default_args + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_flavor_create_all_options(self): + self.set_compute_api_version('2.55') arglist = [ - '--id', self.flavor.id, - '--ram', str(self.flavor.ram), - '--disk', str(self.flavor.disk), - '--ephemeral', str(self.flavor.ephemeral), - '--swap', str(self.flavor.swap), - '--vcpus', str(self.flavor.vcpus), - '--rxtx-factor', str(self.flavor.rxtx_factor), + '--id', + self.flavor.id, + '--ram', + str(self.flavor.ram), + '--disk', + str(self.flavor.disk), + '--ephemeral', + str(self.flavor.ephemeral), + '--swap', + str(self.flavor.swap), + '--vcpus', + str(self.flavor.vcpus), + '--rxtx-factor', + str(self.flavor.rxtx_factor), '--public', - '--description', str(self.flavor.description), - '--property', 'property=value', + '--description', + str(self.flavor.description), + '--property', + 'property=value', self.flavor.name, ] verifylist = [ @@ -175,7 +168,7 @@ def test_flavor_create_all_options(self): 'swap': self.flavor.swap, 'rxtx_factor': self.flavor.rxtx_factor, 'is_public': self.flavor.is_public, - 'description': self.flavor.description + 'description': self.flavor.description, } props = {'property': 'value'} @@ -189,37 +182,49 @@ def test_flavor_create_all_options(self): # convert expected data tuple to list to be able to modify it cmp_data = list(self.data) cmp_data[7] = format_columns.DictColumn(props) - self.sdk_client.create_flavor.return_value = create_flavor - self.sdk_client.create_flavor_extra_specs.return_value = \ + self.compute_client.create_flavor.return_value = create_flavor + self.compute_client.create_flavor_extra_specs.return_value = ( expected_flavor + ) - with mock.patch.object(sdk_utils, 'supports_microversion', - return_value=True): - columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_flavor.assert_called_once_with(**args) - self.sdk_client.create_flavor_extra_specs.assert_called_once_with( - create_flavor, props) - self.sdk_client.get_flavor_access.assert_not_called() + columns, data = self.cmd.take_action(parsed_args) + self.compute_client.create_flavor.assert_called_once_with(**args) + self.compute_client.create_flavor_extra_specs.assert_called_once_with( + create_flavor, props + ) + self.compute_client.get_flavor_access.assert_not_called() self.assertEqual(self.columns, columns) self.assertCountEqual(tuple(cmp_data), data) def test_flavor_create_other_options(self): + self.set_compute_api_version('2.55') self.flavor.is_public = False arglist = [ - '--id', 'auto', - '--ram', str(self.flavor.ram), - '--disk', str(self.flavor.disk), - '--ephemeral', str(self.flavor.ephemeral), - '--swap', str(self.flavor.swap), - '--vcpus', str(self.flavor.vcpus), - '--rxtx-factor', str(self.flavor.rxtx_factor), + '--id', + 'auto', + '--ram', + str(self.flavor.ram), + '--disk', + str(self.flavor.disk), + '--ephemeral', + str(self.flavor.ephemeral), + '--swap', + str(self.flavor.swap), + '--vcpus', + str(self.flavor.vcpus), + '--rxtx-factor', + str(self.flavor.rxtx_factor), '--private', - '--description', str(self.flavor.description), - '--project', self.project.id, - '--property', 'key1=value1', - '--property', 'key2=value2', + '--description', + str(self.flavor.description), + '--project', + self.project.id, + '--property', + 'key1=value1', + '--property', + 'key2=value2', self.flavor.name, ] verifylist = [ @@ -230,7 +235,7 @@ def test_flavor_create_other_options(self): ('vcpus', self.flavor.vcpus), ('rxtx_factor', self.flavor.rxtx_factor), ('public', False), - ('description', 'description'), + ('description', self.flavor.description), ('project', self.project.id), ('properties', {'key1': 'value1', 'key2': 'value2'}), ('name', self.flavor.name), @@ -242,12 +247,12 @@ def test_flavor_create_other_options(self): 'ram': self.flavor.ram, 'vcpus': self.flavor.vcpus, 'disk': self.flavor.disk, - 'id': 'auto', + 'id': None, 'ephemeral': self.flavor.ephemeral, 'swap': self.flavor.swap, 'rxtx_factor': self.flavor.rxtx_factor, 'is_public': False, - 'description': self.flavor.description + 'description': self.flavor.description, } props = {'key1': 'value1', 'key2': 'value2'} @@ -262,26 +267,28 @@ def test_flavor_create_other_options(self): # convert expected data tuple to list to be able to modify it cmp_data = list(self.data_private) cmp_data[7] = format_columns.DictColumn(props) - self.sdk_client.create_flavor.return_value = create_flavor - self.sdk_client.create_flavor_extra_specs.return_value = \ + self.compute_client.create_flavor.return_value = create_flavor + self.compute_client.create_flavor_extra_specs.return_value = ( expected_flavor + ) - with mock.patch.object(sdk_utils, 'supports_microversion', - return_value=True): - columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_flavor.assert_called_once_with(**args) - self.sdk_client.flavor_add_tenant_access.assert_called_with( + columns, data = self.cmd.take_action(parsed_args) + + self.compute_client.create_flavor.assert_called_once_with(**args) + self.compute_client.flavor_add_tenant_access.assert_called_with( self.flavor.id, self.project.id, ) - self.sdk_client.create_flavor_extra_specs.assert_called_with( - create_flavor, props) + self.compute_client.create_flavor_extra_specs.assert_called_with( + create_flavor, props + ) self.assertEqual(self.columns, columns) self.assertCountEqual(cmp_data, data) def test_public_flavor_create_with_project(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, self.flavor.name, ] verifylist = [ @@ -290,30 +297,42 @@ def test_public_flavor_create_with_project(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_flavor_create_no_options(self): arglist = [] verifylist = None - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + def test_flavor_create_with_description(self): + self.set_compute_api_version('2.55') - def test_flavor_create_with_description_api_newer(self): arglist = [ - '--id', self.flavor.id, - '--ram', str(self.flavor.ram), - '--disk', str(self.flavor.disk), - '--ephemeral', str(self.flavor.ephemeral), - '--swap', str(self.flavor.swap), - '--vcpus', str(self.flavor.vcpus), - '--rxtx-factor', str(self.flavor.rxtx_factor), - '--private', - '--description', 'fake description', + '--id', + self.flavor.id, + '--ram', + str(self.flavor.ram), + '--disk', + str(self.flavor.disk), + '--ephemeral', + str(self.flavor.ephemeral), + '--swap', + str(self.flavor.swap), + '--vcpus', + str(self.flavor.vcpus), + '--rxtx-factor', + str(self.flavor.rxtx_factor), + '--public', + '--description', + 'fake description', self.flavor.name, ] verifylist = [ @@ -324,15 +343,13 @@ def test_flavor_create_with_description_api_newer(self): ('swap', self.flavor.swap), ('vcpus', self.flavor.vcpus), ('rxtx_factor', self.flavor.rxtx_factor), - ('public', False), + ('public', True), ('description', 'fake description'), ('name', self.flavor.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(sdk_utils, 'supports_microversion', - return_value=True): - columns, data = self.cmd.take_action(parsed_args) + columns, data = self.cmd.take_action(parsed_args) args = { 'name': self.flavor.name, @@ -343,21 +360,27 @@ def test_flavor_create_with_description_api_newer(self): 'ephemeral': self.flavor.ephemeral, 'swap': self.flavor.swap, 'rxtx_factor': self.flavor.rxtx_factor, - 'is_public': self.flavor.is_public, - 'description': 'fake description' + 'is_public': True, + 'description': 'fake description', } - self.sdk_client.create_flavor.assert_called_once_with(**args) + self.compute_client.create_flavor.assert_called_once_with(**args) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data_private, data) + self.assertCountEqual(self.data, data) + + def test_flavor_create_with_description_pre_v255(self): + self.set_compute_api_version('2.54') - def test_flavor_create_with_description_api_older(self): arglist = [ - '--id', self.flavor.id, - '--ram', str(self.flavor.ram), - '--vcpus', str(self.flavor.vcpus), - '--description', 'description', + '--id', + self.flavor.id, + '--ram', + str(self.flavor.ram), + '--vcpus', + str(self.flavor.vcpus), + '--description', + 'description', self.flavor.name, ] verifylist = [ @@ -368,39 +391,40 @@ def test_flavor_create_with_description_api_older(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(sdk_utils, 'supports_microversion', - return_value=False): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestFlavorDelete(TestFlavor): - - flavors = compute_fakes.FakeFlavor.create_flavors(count=2) - def setUp(self): - super(TestFlavorDelete, self).setUp() + super().setUp() + + self.flavors = list( + sdk_fakes.generate_fake_resources(_flavor.Flavor, 2) + ) - self.sdk_client.delete_flavor.return_value = None + self.compute_client.delete_flavor.return_value = None self.cmd = flavor.DeleteFlavor(self.app, None) def test_flavor_delete(self): - arglist = [ - self.flavors[0].id - ] + arglist = [self.flavors[0].id] verifylist = [ ('flavor', [self.flavors[0].id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.find_flavor.return_value = self.flavors[0] + self.compute_client.find_flavor.return_value = self.flavors[0] result = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with(self.flavors[0].id, - ignore_missing=False) - self.sdk_client.delete_flavor.assert_called_with(self.flavors[0].id) + self.compute_client.find_flavor.assert_called_with( + self.flavors[0].id, ignore_missing=False + ) + self.compute_client.delete_flavor.assert_called_with( + self.flavors[0].id + ) self.assertIsNone(result) def test_delete_multiple_flavors(self): @@ -413,7 +437,7 @@ def test_delete_multiple_flavors(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.find_flavor.side_effect = self.flavors + self.compute_client.find_flavor.side_effect = self.flavors result = self.cmd.take_action(parsed_args) @@ -421,8 +445,8 @@ def test_delete_multiple_flavors(self): mock.call(i.id, ignore_missing=False) for i in self.flavors ] delete_calls = [mock.call(i.id) for i in self.flavors] - self.sdk_client.find_flavor.assert_has_calls(find_calls) - self.sdk_client.delete_flavor.assert_has_calls(delete_calls) + self.compute_client.find_flavor.assert_has_calls(find_calls) + self.compute_client.delete_flavor.assert_has_calls(delete_calls) self.assertIsNone(result) def test_multi_flavors_delete_with_exception(self): @@ -430,14 +454,12 @@ def test_multi_flavors_delete_with_exception(self): self.flavors[0].id, 'unexist_flavor', ] - verifylist = [ - ('flavor', [self.flavors[0].id, 'unexist_flavor']) - ] + verifylist = [('flavor', [self.flavors[0].id, 'unexist_flavor'])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.find_flavor.side_effect = [ + self.compute_client.find_flavor.side_effect = [ self.flavors[0], - sdk_exceptions.ResourceNotFound + sdk_exceptions.ResourceNotFound, ] try: @@ -451,53 +473,55 @@ def test_multi_flavors_delete_with_exception(self): mock.call('unexist_flavor', ignore_missing=False), ] delete_calls = [mock.call(self.flavors[0].id)] - self.sdk_client.find_flavor.assert_has_calls(find_calls) - self.sdk_client.delete_flavor.assert_has_calls(delete_calls) + self.compute_client.find_flavor.assert_has_calls(find_calls) + self.compute_client.delete_flavor.assert_has_calls(delete_calls) class TestFlavorList(TestFlavor): + def setUp(self): + super().setUp() - _flavor = compute_fakes.FakeFlavor.create_one_flavor() - - columns = ( - 'ID', - 'Name', - 'RAM', - 'Disk', - 'Ephemeral', - 'VCPUs', - 'Is Public', - ) - columns_long = columns + ( - 'Swap', - 'RXTX Factor', - 'Properties' - ) - - data = (( - _flavor.id, - _flavor.name, - _flavor.ram, - _flavor.disk, - _flavor.ephemeral, - _flavor.vcpus, - _flavor.is_public, - ),) - data_long = (data[0] + ( - _flavor.swap, - _flavor.rxtx_factor, - format_columns.DictColumn(_flavor.extra_specs) - ), ) + self._flavor = sdk_fakes.generate_fake_resource( + _flavor.Flavor, extra_specs={'property': 'value'} + ) - def setUp(self): - super(TestFlavorList, self).setUp() + self.columns = ( + 'ID', + 'Name', + 'RAM', + 'Disk', + 'Ephemeral', + 'VCPUs', + 'Is Public', + ) + self.columns_long = self.columns + ( + 'Swap', + 'RXTX Factor', + 'Properties', + ) - self.api_mock = mock.Mock() - self.api_mock.side_effect = [[self._flavor], [], ] + self.data = ( + ( + self._flavor.id, + self._flavor.name, + self._flavor.ram, + self._flavor.disk, + self._flavor.ephemeral, + self._flavor.vcpus, + self._flavor.is_public, + ), + ) + self.data_long = ( + self.data[0] + + ( + self._flavor.swap, + self._flavor.rxtx_factor, + format_columns.DictColumn(self._flavor.extra_specs), + ), + ) - self.sdk_client.flavors = self.api_mock + self.compute_client.flavors.side_effect = [[self._flavor], []] - # Get the command object to test self.cmd = flavor.ListFlavor(self.app, None) def test_flavor_list_no_options(self): @@ -520,10 +544,8 @@ def test_flavor_list_no_options(self): 'is_public': True, } - self.sdk_client.flavors.assert_called_with( - **kwargs - ) - self.sdk_client.fetch_flavor_extra_specs.assert_not_called() + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -548,10 +570,8 @@ def test_flavor_list_all_flavors(self): 'is_public': None, } - self.sdk_client.flavors.assert_called_with( - **kwargs - ) - self.sdk_client.fetch_flavor_extra_specs.assert_not_called() + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -576,10 +596,8 @@ def test_flavor_list_private_flavors(self): 'is_public': False, } - self.sdk_client.flavors.assert_called_with( - **kwargs - ) - self.sdk_client.fetch_flavor_extra_specs.assert_not_called() + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -604,10 +622,8 @@ def test_flavor_list_public_flavors(self): 'is_public': True, } - self.sdk_client.flavors.assert_called_with( - **kwargs - ) - self.sdk_client.fetch_flavor_extra_specs.assert_not_called() + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -632,36 +648,39 @@ def test_flavor_list_long(self): 'is_public': True, } - self.sdk_client.flavors.assert_called_with( - **kwargs - ) - self.sdk_client.fetch_flavor_extra_specs.assert_not_called() + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_not_called() self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, tuple(data)) def test_flavor_list_long_no_extra_specs(self): # use flavor with no extra specs for this test - flavor = compute_fakes.FakeFlavor.create_one_flavor( - attrs={"extra_specs": {}}) - self.data = (( - flavor.id, - flavor.name, - flavor.ram, - flavor.disk, - flavor.ephemeral, - flavor.vcpus, - flavor.is_public, - ),) - self.data_long = (self.data[0] + ( - flavor.swap, - flavor.rxtx_factor, - format_columns.DictColumn(flavor.extra_specs) - ),) - self.api_mock.side_effect = [[flavor], [], ] - - self.sdk_client.flavors = self.api_mock - self.sdk_client.fetch_flavor_extra_specs = mock.Mock(return_value=None) + flavor = sdk_fakes.generate_fake_resource( + _flavor.Flavor, extra_specs={} + ) + self.data = ( + ( + flavor.id, + flavor.name, + flavor.ram, + flavor.disk, + flavor.ephemeral, + flavor.vcpus, + flavor.is_public, + ), + ) + self.data_long = ( + self.data[0] + + ( + flavor.swap, + flavor.rxtx_factor, + format_columns.DictColumn(flavor.extra_specs), + ), + ) + + self.compute_client.flavors.side_effect = [[flavor], []] + self.compute_client.fetch_flavor_extra_specs.return_value = None arglist = [ '--long', @@ -682,19 +701,20 @@ def test_flavor_list_long_no_extra_specs(self): 'is_public': True, } - self.sdk_client.flavors.assert_called_with( - **kwargs + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_called_once_with( + flavor ) - self.sdk_client.fetch_flavor_extra_specs.assert_called_once_with( - flavor) self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, tuple(data)) def test_flavor_list_min_disk_min_ram(self): arglist = [ - '--min-disk', '10', - '--min-ram', '2048', + '--min-disk', + '10', + '--min-ram', + '2048', ] verifylist = [ ('min_disk', 10), @@ -715,76 +735,61 @@ def test_flavor_list_min_disk_min_ram(self): 'min_ram': 2048, } - self.sdk_client.flavors.assert_called_with( - **kwargs - ) - self.sdk_client.fetch_flavor_extra_specs.assert_not_called() + self.compute_client.flavors.assert_called_with(**kwargs) + self.compute_client.fetch_flavor_extra_specs.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) class TestFlavorSet(TestFlavor): - - # Return value of self.sdk_client.find_flavor(). - flavor = compute_fakes.FakeFlavor.create_one_flavor( - attrs={'os-flavor-access:is_public': False}) - project = identity_fakes.FakeProject.create_one_project() - def setUp(self): - super(TestFlavorSet, self).setUp() + super().setUp() + + self.flavor = sdk_fakes.generate_fake_resource( + _flavor.Flavor, is_public=False, extra_specs={'property': 'value'} + ) + self.project = sdk_fakes.generate_fake_resource(_project.Project) - self.sdk_client.find_flavor.return_value = self.flavor - # Return a project + self.compute_client.find_flavor.return_value = self.flavor self.projects_mock.get.return_value = self.project self.cmd = flavor.SetFlavor(self.app, None) def test_flavor_set_property(self): - arglist = [ - '--property', 'FOO="B A R"', - 'baremetal' - ] + arglist = ['--property', 'FOO="B A R"', 'baremetal'] verifylist = [ ('properties', {'FOO': '"B A R"'}), - ('flavor', 'baremetal') + ('flavor', 'baremetal'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) + self.compute_client.create_flavor_extra_specs.assert_called_with( + self.flavor.id, {'FOO': '"B A R"'} ) - self.sdk_client.create_flavor_extra_specs.assert_called_with( - self.flavor.id, - {'FOO': '"B A R"'}) self.assertIsNone(result) def test_flavor_set_no_property(self): - arglist = [ - '--no-property', - 'baremetal' - ] - verifylist = [ - ('no_property', True), - ('flavor', 'baremetal') - ] + arglist = ['--no-property', 'baremetal'] + verifylist = [('no_property', True), ('flavor', 'baremetal')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) + self.compute_client.delete_flavor_extra_specs_property.assert_called_with( + self.flavor.id, 'property' ) - self.sdk_client.delete_flavor_extra_specs_property.assert_called_with( - self.flavor.id, 'property') self.assertIsNone(result) def test_flavor_set_project(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, self.flavor.id, ] verifylist = [ @@ -795,16 +800,14 @@ def test_flavor_set_project(self): result = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False ) - self.sdk_client.flavor_add_tenant_access.assert_called_with( + self.compute_client.flavor_add_tenant_access.assert_called_with( self.flavor.id, self.project.id, ) - self.sdk_client.create_flavor_extra_specs.assert_not_called() + self.compute_client.create_flavor_extra_specs.assert_not_called() self.assertIsNone(result) def test_flavor_set_no_project(self): @@ -816,26 +819,38 @@ def test_flavor_set_no_project(self): ('project', None), ('flavor', self.flavor.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_flavor_set_no_flavor(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('project', self.project.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_flavor_set_with_unexist_flavor(self): - self.sdk_client.find_flavor.side_effect = [ + self.compute_client.find_flavor.side_effect = [ sdk_exceptions.ResourceNotFound() ] arglist = [ - '--project', self.project.id, + '--project', + self.project.id, 'unexist_flavor', ] verifylist = [ @@ -844,9 +859,9 @@ def test_flavor_set_with_unexist_flavor(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_flavor_set_nothing(self): arglist = [ @@ -858,17 +873,18 @@ def test_flavor_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False ) - self.sdk_client.flavor_add_tenant_access.assert_not_called() + self.compute_client.flavor_add_tenant_access.assert_not_called() self.assertIsNone(result) - def test_flavor_set_description_api_newer(self): + def test_flavor_set_description(self): + self.set_compute_api_version('2.55') + arglist = [ - '--description', 'description', + '--description', + 'description', self.flavor.id, ] verifylist = [ @@ -876,18 +892,19 @@ def test_flavor_set_description_api_newer(self): ('flavor', self.flavor.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.compute.api_version = 2.55 - with mock.patch.object(sdk_utils, - 'supports_microversion', - return_value=True): - result = self.cmd.take_action(parsed_args) - self.sdk_client.update_flavor.assert_called_with( - flavor=self.flavor.id, description='description') - self.assertIsNone(result) - - def test_flavor_set_description_api_older(self): + + result = self.cmd.take_action(parsed_args) + self.compute_client.update_flavor.assert_called_with( + flavor=self.flavor.id, description='description' + ) + self.assertIsNone(result) + + def test_flavor_set_description_pre_v254(self): + self.set_compute_api_version('2.54') + arglist = [ - '--description', 'description', + '--description', + 'description', self.flavor.id, ] verifylist = [ @@ -895,16 +912,17 @@ def test_flavor_set_description_api_older(self): ('flavor', self.flavor.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.compute.api_version = 2.54 - with mock.patch.object(sdk_utils, - 'supports_microversion', - return_value=False): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) - - def test_flavor_set_description_using_name_api_newer(self): + + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_flavor_set_description_using_name(self): + self.set_compute_api_version('2.55') + arglist = [ - '--description', 'description', + '--description', + 'description', self.flavor.name, ] verifylist = [ @@ -912,19 +930,19 @@ def test_flavor_set_description_using_name_api_newer(self): ('flavor', self.flavor.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.compute.api_version = 2.55 - with mock.patch.object(sdk_utils, - 'supports_microversion', - return_value=True): - result = self.cmd.take_action(parsed_args) - self.sdk_client.update_flavor.assert_called_with( - flavor=self.flavor.id, description='description') - self.assertIsNone(result) + result = self.cmd.take_action(parsed_args) + self.compute_client.update_flavor.assert_called_with( + flavor=self.flavor.id, description='description' + ) + self.assertIsNone(result) + + def test_flavor_set_description_using_name_pre_v255(self): + self.set_compute_api_version('2.54') - def test_flavor_set_description_using_name_api_older(self): arglist = [ - '--description', 'description', + '--description', + 'description', self.flavor.name, ] verifylist = [ @@ -932,59 +950,53 @@ def test_flavor_set_description_using_name_api_older(self): ('flavor', self.flavor.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.compute.api_version = 2.54 - with mock.patch.object(sdk_utils, - 'supports_microversion', - return_value=False): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestFlavorShow(TestFlavor): - - # Return value of self.sdk_client.find_flavor(). - flavor_access = compute_fakes.FakeFlavorAccess.create_one_flavor_access() - flavor = compute_fakes.FakeFlavor.create_one_flavor() - - columns = ( - 'OS-FLV-DISABLED:disabled', - 'OS-FLV-EXT-DATA:ephemeral', - 'access_project_ids', - 'description', - 'disk', - 'id', - 'name', - 'os-flavor-access:is_public', - 'properties', - 'ram', - 'rxtx_factor', - 'swap', - 'vcpus' - ) - - data = ( - flavor.is_disabled, - flavor.ephemeral, - None, - flavor.description, - flavor.disk, - flavor.id, - flavor.name, - flavor.is_public, - format_columns.DictColumn(flavor.extra_specs), - flavor.ram, - flavor.rxtx_factor, - flavor.swap, - flavor.vcpus, - ) - def setUp(self): - super(TestFlavorShow, self).setUp() - - # Return value of _find_resource() - self.sdk_client.find_flavor.return_value = self.flavor - self.sdk_client.get_flavor_access.return_value = [self.flavor_access] + super().setUp() + + self.flavor_access = compute_fakes.create_one_flavor_access() + self.flavor = sdk_fakes.generate_fake_resource(_flavor.Flavor) + + self.columns = ( + 'OS-FLV-DISABLED:disabled', + 'OS-FLV-EXT-DATA:ephemeral', + 'access_project_ids', + 'description', + 'disk', + 'id', + 'name', + 'os-flavor-access:is_public', + 'properties', + 'ram', + 'rxtx_factor', + 'swap', + 'vcpus', + ) + self.data = ( + self.flavor.is_disabled, + self.flavor.ephemeral, + None, + self.flavor.description, + self.flavor.disk, + self.flavor.id, + self.flavor.name, + self.flavor.is_public, + format_columns.DictColumn(self.flavor.extra_specs), + self.flavor.ram, + self.flavor.rxtx_factor, + self.flavor.swap, + self.flavor.vcpus, + ) + self.compute_client.find_flavor.return_value = self.flavor + self.compute_client.get_flavor_access.return_value = [ + self.flavor_access + ] self.cmd = flavor.ShowFlavor(self.app, None) def test_show_no_options(self): @@ -992,8 +1004,13 @@ def test_show_no_options(self): verifylist = [] # Missing required args should boil here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_public_flavor_show(self): arglist = [ @@ -1011,12 +1028,10 @@ def test_public_flavor_show(self): self.assertCountEqual(self.data, data) def test_private_flavor_show(self): - private_flavor = compute_fakes.FakeFlavor.create_one_flavor( - attrs={ - 'os-flavor-access:is_public': False, - } + private_flavor = sdk_fakes.generate_fake_resource( + _flavor.Flavor, is_public=False ) - self.sdk_client.find_flavor.return_value = private_flavor + self.compute_client.find_flavor.return_value = private_flavor arglist = [ private_flavor.name, @@ -1028,7 +1043,7 @@ def test_private_flavor_show(self): data_with_project = ( private_flavor.is_disabled, private_flavor.ephemeral, - [self.flavor_access.tenant_id], + [self.flavor_access['tenant_id']], private_flavor.description, private_flavor.disk, private_flavor.id, @@ -1045,34 +1060,29 @@ def test_private_flavor_show(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.get_flavor_access.assert_called_with( - flavor=private_flavor.id) + self.compute_client.get_flavor_access.assert_called_with( + flavor=private_flavor.id + ) self.assertEqual(self.columns, columns) self.assertCountEqual(data_with_project, data) class TestFlavorUnset(TestFlavor): - - # Return value of self.sdk_client.find_flavor(). - flavor = compute_fakes.FakeFlavor.create_one_flavor( - attrs={'os-flavor-access:is_public': False}) - project = identity_fakes.FakeProject.create_one_project() - def setUp(self): - super(TestFlavorUnset, self).setUp() + super().setUp() - self.sdk_client.find_flavor.return_value = self.flavor - # Return a project + self.flavor = sdk_fakes.generate_fake_resource( + _flavor.Flavor, is_public=False + ) + self.project = sdk_fakes.generate_fake_resource(_project.Project) + + self.compute_client.find_flavor.return_value = self.flavor self.projects_mock.get.return_value = self.project - self.cmd = flavor.UnsetFlavor(self.app, None) - self.mock_shortcut = self.sdk_client.delete_flavor_extra_specs_property + self.cmd = flavor.UnsetFlavor(self.app, None) def test_flavor_unset_property(self): - arglist = [ - '--property', 'property', - 'baremetal' - ] + arglist = ['--property', 'property', 'baremetal'] verifylist = [ ('properties', ['property']), ('flavor', 'baremetal'), @@ -1080,20 +1090,22 @@ def test_flavor_unset_property(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False) - self.mock_shortcut.assert_called_with( - self.flavor.id, 'property') - self.sdk_client.flavor_remove_tenant_access.assert_not_called() + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) + self.compute_client.delete_flavor_extra_specs_property.assert_called_with( + self.flavor.id, 'property' + ) + self.compute_client.flavor_remove_tenant_access.assert_not_called() self.assertIsNone(result) def test_flavor_unset_properties(self): arglist = [ - '--property', 'property1', - '--property', 'property2', - 'baremetal' + '--property', + 'property1', + '--property', + 'property2', + 'baremetal', ] verifylist = [ ('properties', ['property1', 'property2']), @@ -1102,30 +1114,22 @@ def test_flavor_unset_properties(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, - get_extra_specs=True, - ignore_missing=False) - calls = [ - mock.call(self.flavor.id, 'property1'), - mock.call(self.flavor.id, 'property2') - ] - self.mock_shortcut.assert_has_calls( - calls) - - # A bit tricky way to ensure we do not unset other properties - calls.append(mock.call(self.flavor.id, 'property')) - self.assertRaises( - AssertionError, - self.mock_shortcut.assert_has_calls, - calls - ) - self.sdk_client.flavor_remove_tenant_access.assert_not_called() + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) + self.compute_client.delete_flavor_extra_specs_property.assert_has_calls( + [ + mock.call(self.flavor.id, 'property1'), + mock.call(self.flavor.id, 'property2'), + ] + ) + self.compute_client.flavor_remove_tenant_access.assert_not_called() def test_flavor_unset_project(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, self.flavor.id, ] verifylist = [ @@ -1137,14 +1141,14 @@ def test_flavor_unset_project(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.sdk_client.find_flavor.assert_called_with( - parsed_args.flavor, get_extra_specs=True, - ignore_missing=False) - self.sdk_client.flavor_remove_tenant_access.assert_called_with( + self.compute_client.find_flavor.assert_called_with( + parsed_args.flavor, get_extra_specs=True, ignore_missing=False + ) + self.compute_client.flavor_remove_tenant_access.assert_called_with( self.flavor.id, self.project.id, ) - self.sdk_client.delete_flavor_extra_specs_proerty.assert_not_called() + self.compute_client.delete_flavor_extra_specs_property.assert_not_called() self.assertIsNone(result) def test_flavor_unset_no_project(self): @@ -1156,26 +1160,38 @@ def test_flavor_unset_no_project(self): ('project', None), ('flavor', self.flavor.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_flavor_unset_no_flavor(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('project', self.project.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_flavor_unset_with_unexist_flavor(self): - self.sdk_client.find_flavor.side_effect = [ + self.compute_client.find_flavor.side_effect = [ sdk_exceptions.ResourceNotFound ] arglist = [ - '--project', self.project.id, + '--project', + self.project.id, 'unexist_flavor', ] verifylist = [ @@ -1183,8 +1199,9 @@ def test_flavor_unset_with_unexist_flavor(self): ('flavor', 'unexist_flavor'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_flavor_unset_nothing(self): arglist = [ @@ -1198,4 +1215,4 @@ def test_flavor_unset_nothing(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.sdk_client.flavor_remove_tenant_access.assert_not_called() + self.compute_client.flavor_remove_tenant_access.assert_not_called() diff --git a/openstackclient/tests/unit/compute/v2/test_host.py b/openstackclient/tests/unit/compute/v2/test_host.py index ec91b37ab8..8d38f8353b 100644 --- a/openstackclient/tests/unit/compute/v2/test_host.py +++ b/openstackclient/tests/unit/compute/v2/test_host.py @@ -11,9 +11,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from unittest import mock +import uuid from openstackclient.compute.v2 import host from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes @@ -21,46 +20,64 @@ from openstackclient.tests.unit import utils as tests_utils -class TestHost(compute_fakes.TestComputev2): - +def _generate_fake_host(): + return { + 'service_id': 1, + 'host': 'host1', + 'uuid': 'host-id-' + uuid.uuid4().hex, + 'vcpus': 10, + 'memory_mb': 100, + 'local_gb': 100, + 'vcpus_used': 5, + 'memory_mb_used': 50, + 'local_gb_used': 10, + 'hypervisor_type': 'xen', + 'hypervisor_version': 1, + 'hypervisor_hostname': 'devstack1', + 'free_ram_mb': 50, + 'free_disk_gb': 50, + 'current_workload': 10, + 'running_vms': 1, + 'cpu_info': '', + 'disk_available_least': 1, + 'host_ip': '10.10.10.10', + 'supported_instances': '', + 'metrics': '', + 'pci_stats': '', + 'extra_resources': '', + 'stats': '', + 'numa_topology': '', + 'ram_allocation_ratio': 1.0, + 'cpu_allocation_ratio': 1.0, + 'zone': 'zone-' + uuid.uuid4().hex, + 'host_name': 'name-' + uuid.uuid4().hex, + 'service': 'service-' + uuid.uuid4().hex, + 'cpu': 4, + 'disk_gb': 100, + 'project': 'project-' + uuid.uuid4().hex, + } + + +class TestHostList(compute_fakes.TestComputev2): def setUp(self): - super(TestHost, self).setUp() - - # Get a shortcut to the compute client - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.get = mock.Mock() - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.host_list' -) -class TestHostList(TestHost): - - _host = compute_fakes.FakeHost.create_one_host() - - def setUp(self): - super(TestHostList, self).setUp() + super().setUp() + + self._host = _generate_fake_host() + self.columns = ('Host Name', 'Service', 'Zone') + self.data = [ + ( + self._host['host_name'], + self._host['service'], + self._host['zone'], + ) + ] - self.sdk_client.get.return_value = fakes.FakeResponse( + self.compute_client.get.return_value = fakes.FakeResponse( data={'hosts': [self._host]} ) - - self.columns = ( - 'Host Name', 'Service', 'Zone' - ) - - self.data = [( - self._host['host_name'], - self._host['service'], - self._host['zone'], - )] - self.cmd = host.ListHost(self.app, None) - def test_host_list_no_option(self, h_mock): - h_mock.return_value = [self._host] + def test_host_list_no_option(self): arglist = [] verifylist = [] @@ -68,14 +85,16 @@ def test_host_list_no_option(self, h_mock): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.get.assert_called_with('/os-hosts', microversion='2.1') + self.compute_client.get.assert_called_with( + '/os-hosts', microversion='2.1' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) - def test_host_list_with_option(self, h_mock): - h_mock.return_value = [self._host] + def test_host_list_with_option(self): arglist = [ - '--zone', self._host['zone'], + '--zone', + self._host['zone'], ] verifylist = [ ('zone', self._host['zone']), @@ -85,84 +104,67 @@ def test_host_list_with_option(self, h_mock): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.get.assert_called_with('/os-hosts', microversion='2.1') + self.compute_client.get.assert_called_with( + '/os-hosts', microversion='2.1' + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.host_set' -) -class TestHostSet(TestHost): - +class TestHostSet(compute_fakes.TestComputev2): def setUp(self): - super(TestHostSet, self).setUp() + super().setUp() - self.host = compute_fakes.FakeHost.create_one_host() + self._host = _generate_fake_host() + self.compute_client.put.return_value = fakes.FakeResponse() self.cmd = host.SetHost(self.app, None) - def test_host_set_no_option(self, h_mock): - h_mock.return_value = self.host - h_mock.update.return_value = None + def test_host_set_no_option(self): arglist = [ - self.host['host'], + self._host['host'], ] verifylist = [ - ('host', self.host['host']), + ('host', self._host['host']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.assertIsNone(result) + self.compute_client.put.assert_not_called() - h_mock.assert_called_with(self.host['host']) - - def test_host_set(self, h_mock): - h_mock.return_value = self.host - h_mock.update.return_value = None + def test_host_set(self): arglist = [ '--enable', '--disable-maintenance', - self.host['host'], + self._host['host'], ] verifylist = [ ('enable', True), ('enable_maintenance', False), - ('host', self.host['host']), + ('host', self._host['host']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.assertIsNone(result) + self.compute_client.put.assert_called_with( + f'/os-hosts/{self._host["host"]}', + json={ + 'maintenance_mode': 'disable', + 'status': 'enable', + }, + microversion='2.1', + ) - h_mock.assert_called_with(self.host['host'], status='enable', - maintenance_mode='disable') - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.host_show' -) -class TestHostShow(TestHost): - - _host = compute_fakes.FakeHost.create_one_host() +class TestHostShow(compute_fakes.TestComputev2): def setUp(self): - super(TestHostShow, self).setUp() - - output_data = {"resource": { - "host": self._host['host'], - "project": self._host['project'], - "cpu": self._host['cpu'], - "memory_mb": self._host['memory_mb'], - "disk_gb": self._host['disk_gb'] - }} - - self.sdk_client.get.return_value = fakes.FakeResponse( - data={'host': [output_data]} - ) + super().setUp() + + self._host = _generate_fake_host() self.columns = ( 'Host', @@ -171,28 +173,48 @@ def setUp(self): 'Memory MB', 'Disk GB', ) + self.data = [ + ( + self._host['host'], + self._host['project'], + self._host['cpu'], + self._host['memory_mb'], + self._host['disk_gb'], + ) + ] - self.data = [( - self._host['host'], - self._host['project'], - self._host['cpu'], - self._host['memory_mb'], - self._host['disk_gb'], - )] + self.compute_client.get.return_value = fakes.FakeResponse( + data={ + 'host': [ + { + 'resource': { + 'host': self._host['host'], + 'project': self._host['project'], + 'cpu': self._host['cpu'], + 'memory_mb': self._host['memory_mb'], + 'disk_gb': self._host['disk_gb'], + } + } + ], + } + ) self.cmd = host.ShowHost(self.app, None) - def test_host_show_no_option(self, h_mock): - h_mock.host_show.return_value = [self._host] + def test_host_show_no_option(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) - def test_host_show_with_option(self, h_mock): - h_mock.return_value = [self._host] + def test_host_show_with_option(self): arglist = [ self._host['host_name'], ] @@ -204,9 +226,8 @@ def test_host_show_with_option(self, h_mock): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.get.assert_called_with( - '/os-hosts/' + self._host['host_name'], - microversion='2.1' + self.compute_client.get.assert_called_with( + '/os-hosts/' + self._host['host_name'], microversion='2.1' ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) diff --git a/openstackclient/tests/unit/compute/v2/test_hypervisor.py b/openstackclient/tests/unit/compute/v2/test_hypervisor.py index e5804665c1..4282982a27 100644 --- a/openstackclient/tests/unit/compute/v2/test_hypervisor.py +++ b/openstackclient/tests/unit/compute/v2/test_hypervisor.py @@ -11,13 +11,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# import json -from unittest import mock -from novaclient import exceptions as nova_exceptions -from openstack import utils as sdk_utils +from openstack.compute.v2 import hypervisor as _hypervisor +from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions @@ -25,32 +24,22 @@ from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -class TestHypervisor(compute_fakes.TestComputev2): - - def setUp(self): - super().setUp() - - # Create and get a shortcut to the compute client mock - self.app.client_manager.sdk_connection = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.reset_mock() - - -class TestHypervisorList(TestHypervisor): - +class TestHypervisorList(compute_fakes.TestComputev2): def setUp(self): super().setUp() # Fake hypervisors to be listed up - self.hypervisors = compute_fakes.create_hypervisors() - self.sdk_client.hypervisors.return_value = self.hypervisors + self.hypervisors = list( + sdk_fakes.generate_fake_resources(_hypervisor.Hypervisor, count=2) + ) + self.compute_client.hypervisors.return_value = iter(self.hypervisors) self.columns = ( "ID", "Hypervisor Hostname", "Hypervisor Type", "Host IP", - "State" + "State", ) self.columns_long = ( "ID", @@ -61,7 +50,7 @@ def setUp(self): "vCPUs Used", "vCPUs", "Memory MB Used", - "Memory MB" + "Memory MB", ) self.data = ( ( @@ -69,14 +58,14 @@ def setUp(self): self.hypervisors[0].name, self.hypervisors[0].hypervisor_type, self.hypervisors[0].host_ip, - self.hypervisors[0].state + self.hypervisors[0].state, ), ( self.hypervisors[1].id, self.hypervisors[1].name, self.hypervisors[1].hypervisor_type, self.hypervisors[1].host_ip, - self.hypervisors[1].state + self.hypervisors[1].state, ), ) @@ -90,7 +79,7 @@ def setUp(self): self.hypervisors[0].vcpus_used, self.hypervisors[0].vcpus, self.hypervisors[0].memory_used, - self.hypervisors[0].memory_size + self.hypervisors[0].memory_size, ), ( self.hypervisors[1].id, @@ -101,7 +90,7 @@ def setUp(self): self.hypervisors[1].vcpus_used, self.hypervisors[1].vcpus, self.hypervisors[1].memory_used, - self.hypervisors[1].memory_size + self.hypervisors[1].memory_size, ), ) # Get the command object to test @@ -117,13 +106,14 @@ def test_hypervisor_list_no_option(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.hypervisors.assert_called_with(details=True) + self.compute_client.hypervisors.assert_called_with(details=True) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_hypervisor_list_matching_option_found(self): arglist = [ - '--matching', self.hypervisors[0].name, + '--matching', + self.hypervisors[0].name, ] verifylist = [ ('matching', self.hypervisors[0].name), @@ -131,14 +121,15 @@ def test_hypervisor_list_matching_option_found(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) # Fake the return value of search() - self.sdk_client.find_hypervisor.return_value = [self.hypervisors[0]] + self.compute_client.hypervisors.return_value = [self.hypervisors[0]] + self.data = ( ( self.hypervisors[0].id, self.hypervisors[0].name, - self.hypervisors[1].hypervisor_type, - self.hypervisors[1].host_ip, - self.hypervisors[1].state, + self.hypervisors[0].hypervisor_type, + self.hypervisors[0].host_ip, + self.hypervisors[0].state, ), ) @@ -147,16 +138,16 @@ def test_hypervisor_list_matching_option_found(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.find_hypervisor.assert_called_with( - self.hypervisors[0].name, - ignore_missing=False + self.compute_client.hypervisors.assert_called_with( + hypervisor_hostname_pattern=self.hypervisors[0].name, details=True ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_hypervisor_list_matching_option_not_found(self): arglist = [ - '--matching', 'xxx', + '--matching', + 'xxx', ] verifylist = [ ('matching', 'xxx'), @@ -164,20 +155,20 @@ def test_hypervisor_list_matching_option_not_found(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) # Fake exception raised from search() - self.sdk_client.find_hypervisor.side_effect = \ - exceptions.NotFound(None) + self.compute_client.hypervisors.side_effect = exceptions.NotFound(None) - self.assertRaises(exceptions.NotFound, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.NotFound, self.cmd.take_action, parsed_args + ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_hypervisor_list_with_matching_and_pagination_options( - self, sm_mock): + def test_hypervisor_list_with_matching_and_pagination_options(self): arglist = [ - '--matching', self.hypervisors[0].name, - '--limit', '1', - '--marker', self.hypervisors[0].name, + '--matching', + self.hypervisors[0].name, + '--limit', + '1', + '--marker', + self.hypervisors[0].name, ] verifylist = [ ('matching', self.hypervisors[0].name), @@ -187,15 +178,14 @@ def test_hypervisor_list_with_matching_and_pagination_options( parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--matching is not compatible with --marker or --limit', str(ex)) + '--matching is not compatible with --marker or --limit', str(ex) + ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_hypervisor_list_long_option(self, sm_mock): + def test_hypervisor_list_long_option(self): arglist = [ '--long', ] @@ -209,14 +199,16 @@ def test_hypervisor_list_long_option(self, sm_mock): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.hypervisors.assert_called_with(details=True) + self.compute_client.hypervisors.assert_called_with(details=True) self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, tuple(data)) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_hypervisor_list_with_limit(self, sm_mock): + def test_hypervisor_list_with_limit(self): + self.set_compute_api_version('2.33') + arglist = [ - '--limit', '1', + '--limit', + '1', ] verifylist = [ ('limit', 1), @@ -225,12 +217,16 @@ def test_hypervisor_list_with_limit(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.hypervisors.assert_called_with(limit=1, details=True) + self.compute_client.hypervisors.assert_called_with( + limit=1, details=True + ) + + def test_hypervisor_list_with_limit_pre_v233(self): + self.set_compute_api_version('2.32') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_hypervisor_list_with_limit_pre_v233(self, sm_mock): arglist = [ - '--limit', '1', + '--limit', + '1', ] verifylist = [ ('limit', 1), @@ -238,17 +234,19 @@ def test_hypervisor_list_with_limit_pre_v233(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.33 or greater is required', str(ex)) + '--os-compute-api-version 2.33 or greater is required', str(ex) + ) + + def test_hypervisor_list_with_marker(self): + self.set_compute_api_version('2.33') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_hypervisor_list_with_marker(self, sm_mock): arglist = [ - '--marker', 'test_hyp', + '--marker', + 'test_hyp', ] verifylist = [ ('marker', 'test_hyp'), @@ -257,13 +255,16 @@ def test_hypervisor_list_with_marker(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.hypervisors.assert_called_with( - marker='test_hyp', details=True) + self.compute_client.hypervisors.assert_called_with( + marker='test_hyp', details=True + ) + + def test_hypervisor_list_with_marker_pre_v233(self): + self.set_compute_api_version('2.32') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_hypervisor_list_with_marker_pre_v233(self, sm_mock): arglist = [ - '--marker', 'test_hyp', + '--marker', + 'test_hyp', ] verifylist = [ ('marker', 'test_hyp'), @@ -271,34 +272,36 @@ def test_hypervisor_list_with_marker_pre_v233(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.33 or greater is required', str(ex)) - + '--os-compute-api-version 2.33 or greater is required', str(ex) + ) -class TestHypervisorShow(TestHypervisor): +class TestHypervisorShow(compute_fakes.TestComputev2): def setUp(self): super().setUp() - uptime_string = (' 01:28:24 up 3 days, 11:15, 1 user, ' - ' load average: 0.94, 0.62, 0.50\n') + uptime_string = ( + ' 01:28:24 up 3 days, 11:15, 1 user, ' + ' load average: 0.94, 0.62, 0.50\n' + ) # Fake hypervisors to be listed up - self.hypervisor = compute_fakes.create_one_hypervisor(attrs={ - 'uptime': uptime_string, - }) + self.hypervisor = sdk_fakes.generate_fake_resource( + _hypervisor.Hypervisor, + uptime=uptime_string, + service={"id": 1, "host": "aaa"}, + cpu_info={"aaa": "aaa"}, + ) - # Return value of compute_client.find_hypervisor - self.sdk_client.find_hypervisor.return_value = self.hypervisor + self.compute_client.find_hypervisor.return_value = self.hypervisor + self.compute_client.get_hypervisor.return_value = self.hypervisor - # Return value of compute_client.aggregates() - self.sdk_client.aggregates.return_value = [] + self.compute_client.aggregates.return_value = [] - # Return value of compute_client.get_hypervisor_uptime() uptime_info = { 'status': self.hypervisor.status, 'state': self.hypervisor.state, @@ -306,7 +309,7 @@ def setUp(self): 'hypervisor_hostname': self.hypervisor.name, 'uptime': uptime_string, } - self.sdk_client.get_hypervisor_uptime.return_value = uptime_info + self.compute_client.get_hypervisor_uptime.return_value = uptime_info self.columns_v288 = ( 'aggregates', @@ -328,18 +331,18 @@ def setUp(self): self.data_v288 = ( [], - format_columns.DictColumn({'aaa': 'aaa'}), - '192.168.0.10', + format_columns.DictColumn(self.hypervisor.cpu_info), + self.hypervisor.host_ip, '01:28:24', self.hypervisor.name, - 'QEMU', - 2004001, + self.hypervisor.hypervisor_type, + self.hypervisor.hypervisor_version, self.hypervisor.id, '0.94, 0.62, 0.50', - 'aaa', - 1, - 'up', - 'enabled', + self.hypervisor.service_details["host"], + self.hypervisor.service_details["id"], + self.hypervisor.state, + self.hypervisor.status, '3 days, 11:15', '1', ) @@ -374,38 +377,39 @@ def setUp(self): ) self.data = ( [], - format_columns.DictColumn({'aaa': 'aaa'}), - 0, - 50, - 50, - 1024, - '192.168.0.10', + format_columns.DictColumn(self.hypervisor.cpu_info), + self.hypervisor.current_workload, + self.hypervisor.disk_available, + self.hypervisor.local_disk_free, + self.hypervisor.memory_free, + self.hypervisor.host_ip, '01:28:24', self.hypervisor.name, - 'QEMU', - 2004001, + self.hypervisor.hypervisor_type, + self.hypervisor.hypervisor_version, self.hypervisor.id, '0.94, 0.62, 0.50', - 50, - 0, - 1024, - 512, - 0, - 'aaa', + self.hypervisor.local_disk_size, + self.hypervisor.local_disk_used, + self.hypervisor.memory_size, + self.hypervisor.memory_used, + self.hypervisor.running_vms, + self.hypervisor.service_details["host"], 1, - 'up', - 'enabled', + self.hypervisor.state, + self.hypervisor.status, '3 days, 11:15', '1', - 4, - 0, + self.hypervisor.vcpus, + self.hypervisor.vcpus_used, ) # Get the command object to test self.cmd = hypervisor.ShowHypervisor(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_hypervisor_show(self, sm_mock): + def test_hypervisor_show(self): + self.set_compute_api_version('2.88') + arglist = [ self.hypervisor.name, ] @@ -422,9 +426,16 @@ def test_hypervisor_show(self, sm_mock): self.assertEqual(self.columns_v288, columns) self.assertCountEqual(self.data_v288, data) - @mock.patch.object(sdk_utils, 'supports_microversion', - side_effect=[False, True, False]) - def test_hypervisor_show_pre_v288(self, sm_mock): + self.compute_client.find_hypervisor.assert_called_once_with( + self.hypervisor.name, ignore_missing=False, details=False + ) + self.compute_client.get_hypervisor.assert_called_once_with( + self.hypervisor.id + ) + + def test_hypervisor_show_pre_v288(self): + self.set_compute_api_version('2.87') + arglist = [ self.hypervisor.name, ] @@ -441,13 +452,20 @@ def test_hypervisor_show_pre_v288(self, sm_mock): self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_hypervisor_show_pre_v228(self, sm_mock): + self.compute_client.find_hypervisor.assert_called_once_with( + self.hypervisor.name, ignore_missing=False, details=False + ) + self.compute_client.get_hypervisor.assert_called_once_with( + self.hypervisor.id + ) + + def test_hypervisor_show_pre_v228(self): + self.set_compute_api_version('2.27') + # before microversion 2.28, nova returned a stringified version of this # field - self.hypervisor.cpu_info = json.dumps( - self.hypervisor.cpu_info) - self.sdk_client.find_hypervisor.return_value = self.hypervisor + self.hypervisor.cpu_info = json.dumps(self.hypervisor.cpu_info) + self.compute_client.find_hypervisor.return_value = self.hypervisor arglist = [ self.hypervisor.name, @@ -465,9 +483,16 @@ def test_hypervisor_show_pre_v228(self, sm_mock): self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', - side_effect=[False, True, False]) - def test_hypervisor_show_uptime_not_implemented(self, sm_mock): + self.compute_client.find_hypervisor.assert_called_once_with( + self.hypervisor.name, ignore_missing=False, details=False + ) + self.compute_client.get_hypervisor.assert_called_once_with( + self.hypervisor.id + ) + + def test_hypervisor_show_uptime_not_implemented(self): + self.set_compute_api_version('2.87') + arglist = [ self.hypervisor.name, ] @@ -476,8 +501,9 @@ def test_hypervisor_show_uptime_not_implemented(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.get_hypervisor_uptime.side_effect = ( - nova_exceptions.HTTPNotImplemented(501)) + self.compute_client.get_hypervisor_uptime.side_effect = ( + sdk_exceptions.HttpException(http_status=501) + ) # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of @@ -510,28 +536,35 @@ def test_hypervisor_show_uptime_not_implemented(self, sm_mock): ) expected_data = ( [], - format_columns.DictColumn({'aaa': 'aaa'}), - 0, - 50, - 50, - 1024, - '192.168.0.10', + format_columns.DictColumn(self.hypervisor.cpu_info), + self.hypervisor.current_workload, + self.hypervisor.disk_available, + self.hypervisor.local_disk_free, + self.hypervisor.memory_free, + self.hypervisor.host_ip, self.hypervisor.name, - 'QEMU', - 2004001, + self.hypervisor.hypervisor_type, + self.hypervisor.hypervisor_version, self.hypervisor.id, - 50, - 0, - 1024, - 512, - 0, - 'aaa', + self.hypervisor.local_disk_size, + self.hypervisor.local_disk_used, + self.hypervisor.memory_size, + self.hypervisor.memory_used, + self.hypervisor.running_vms, + self.hypervisor.service_details["host"], 1, - 'up', - 'enabled', - 4, - 0, + self.hypervisor.state, + self.hypervisor.status, + self.hypervisor.vcpus, + self.hypervisor.vcpus_used, ) self.assertEqual(expected_columns, columns) self.assertCountEqual(expected_data, data) + + self.compute_client.find_hypervisor.assert_called_once_with( + self.hypervisor.name, ignore_missing=False, details=False + ) + self.compute_client.get_hypervisor.assert_called_once_with( + self.hypervisor.id + ) diff --git a/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py b/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py index 7bc7468ad6..89d4d459fe 100644 --- a/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py +++ b/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py @@ -12,27 +12,15 @@ # License for the specific language governing permissions and limitations # under the License. # -from unittest import mock from openstackclient.compute.v2 import hypervisor_stats from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit import fakes -class TestHypervisorStats(compute_fakes.TestComputev2): - - def setUp(self): - super(TestHypervisorStats, self).setUp() - - # Get a shortcut to the compute client hypervisors mock - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.get = mock.Mock() - - # Not in fakes.py because hypervisor stats has been deprecated + def create_one_hypervisor_stats(attrs=None): """Create a fake hypervisor stats. @@ -65,15 +53,15 @@ def create_one_hypervisor_stats(attrs=None): return stats_info -class TestHypervisorStatsShow(TestHypervisorStats): - +class TestHypervisorStatsShow(compute_fakes.TestComputev2): _stats = create_one_hypervisor_stats() def setUp(self): - super(TestHypervisorStatsShow, self).setUp() + super().setUp() - self.sdk_client.get.return_value = fakes.FakeResponse( - data={'hypervisor_statistics': self._stats}) + self.compute_client.get.return_value = fakes.FakeResponse( + data={'hypervisor_statistics': self._stats} + ) self.cmd = hypervisor_stats.ShowHypervisorStats(self.app, None) diff --git a/openstackclient/tests/unit/compute/v2/test_keypair.py b/openstackclient/tests/unit/compute/v2/test_keypair.py index 65d9396aee..4eaaf4c9b0 100644 --- a/openstackclient/tests/unit/compute/v2/test_keypair.py +++ b/openstackclient/tests/unit/compute/v2/test_keypair.py @@ -11,74 +11,70 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import copy from unittest import mock from unittest.mock import call import uuid -from novaclient import api_versions -from openstack import utils as sdk_utils +from openstack.compute.v2 import keypair as _keypair +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import role_assignment as _role_assignment +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.compute.v2 import keypair from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit import fakes -from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes from openstackclient.tests.unit import utils as tests_utils class TestKeypair(compute_fakes.TestComputev2): - def setUp(self): - super(TestKeypair, self).setUp() + super().setUp() # Initialize the user mock - self.users_mock = self.app.client_manager.identity.users + self._user = sdk_fakes.generate_fake_resource(_user.User) + self.users_mock = self.identity_client.users self.users_mock.reset_mock() - self.users_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.USER), - loaded=True, - ) - - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.keypairs = mock.Mock() - self.sdk_client.create_keypair = mock.Mock() - self.sdk_client.delete_keypair = mock.Mock() - self.sdk_client.find_keypair = mock.Mock() + self.users_mock.get.return_value = self._user class TestKeypairCreate(TestKeypair): - - keypair = compute_fakes.FakeKeypair.create_one_keypair() - def setUp(self): - super(TestKeypairCreate, self).setUp() + super().setUp() + + self.keypair = sdk_fakes.generate_fake_resource(_keypair.Keypair) self.columns = ( + 'created_at', 'fingerprint', + 'id', + 'is_deleted', 'name', 'type', - 'user_id' + 'user_id', ) self.data = ( + self.keypair.created_at, self.keypair.fingerprint, + self.keypair.id, + self.keypair.is_deleted, self.keypair.name, self.keypair.type, - self.keypair.user_id + self.keypair.user_id, ) # Get the command object to test self.cmd = keypair.CreateKeypair(self.app, None) - self.sdk_client.create_keypair.return_value = self.keypair - - def test_key_pair_create_no_options(self): + self.compute_client.create_keypair.return_value = self.keypair + @mock.patch.object( + keypair, + '_generate_keypair', + return_value=keypair.Keypair('private', 'public'), + ) + def test_keypair_create_no_options(self, mock_generate): arglist = [ self.keypair.name, ] @@ -89,28 +85,28 @@ def test_key_pair_create_no_options(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_keypair.assert_called_with( - name=self.keypair.name + self.compute_client.create_keypair.assert_called_with( + name=self.keypair.name, + public_key=mock_generate.return_value.public_key, ) self.assertEqual({}, columns) self.assertEqual({}, data) def test_keypair_create_public_key(self): - # overwrite the setup one because we want to omit private_key - self.keypair = compute_fakes.FakeKeypair.create_one_keypair( - no_pri=True) - self.sdk_client.create_keypair.return_value = self.keypair - self.data = ( + self.keypair.created_at, self.keypair.fingerprint, + self.keypair.id, + self.keypair.is_deleted, self.keypair.name, self.keypair.type, - self.keypair.user_id + self.keypair.user_id, ) arglist = [ - '--public-key', self.keypair.public_key, + '--public-key', + self.keypair.public_key, self.keypair.name, ] verifylist = [ @@ -120,14 +116,16 @@ def test_keypair_create_public_key(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch('io.open') as mock_open: + with mock.patch( + 'openstackclient.compute.v2.keypair.open' + ) as mock_open: mock_open.return_value = mock.MagicMock() m_file = mock_open.return_value.__enter__.return_value - m_file.read.return_value = 'dummy' + m_file.read.return_value = self.keypair.public_key columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_keypair.assert_called_with( + self.compute_client.create_keypair.assert_called_with( name=self.keypair.name, public_key=self.keypair.public_key, ) @@ -135,10 +133,16 @@ def test_keypair_create_public_key(self): self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - def test_keypair_create_private_key(self): + @mock.patch.object( + keypair, + '_generate_keypair', + return_value=keypair.Keypair('private', 'public'), + ) + def test_keypair_create_private_key(self, mock_generate): tmp_pk_file = '/tmp/kp-file-' + uuid.uuid4().hex arglist = [ - '--private-key', tmp_pk_file, + '--private-key', + tmp_pk_file, self.keypair.name, ] verifylist = [ @@ -148,39 +152,48 @@ def test_keypair_create_private_key(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch('io.open') as mock_open: + with mock.patch( + 'openstackclient.compute.v2.keypair.open' + ) as mock_open: mock_open.return_value = mock.MagicMock() m_file = mock_open.return_value.__enter__.return_value columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_keypair.assert_called_with( + self.compute_client.create_keypair.assert_called_with( name=self.keypair.name, + public_key=mock_generate.return_value.public_key, ) mock_open.assert_called_once_with(tmp_pk_file, 'w+') - m_file.write.assert_called_once_with(self.keypair.private_key) + m_file.write.assert_called_once_with( + mock_generate.return_value.private_key, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_keypair_create_with_key_type(self, sm_mock): + def test_keypair_create_with_key_type(self): + self.set_compute_api_version('2.2') + for key_type in ['x509', 'ssh']: - self.keypair = compute_fakes.FakeKeypair.create_one_keypair( - no_pri=True) - self.sdk_client.create_keypair.return_value = self.keypair + self.compute_client.create_keypair.return_value = self.keypair self.data = ( + self.keypair.created_at, self.keypair.fingerprint, + self.keypair.id, + self.keypair.is_deleted, self.keypair.name, self.keypair.type, self.keypair.user_id, ) arglist = [ - '--public-key', self.keypair.public_key, + '--public-key', + self.keypair.public_key, self.keypair.name, - '--type', key_type, + '--type', + key_type, ] verifylist = [ ('public_key', self.keypair.public_key), @@ -189,13 +202,15 @@ def test_keypair_create_with_key_type(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch('io.open') as mock_open: + with mock.patch( + 'openstackclient.compute.v2.keypair.open' + ) as mock_open: mock_open.return_value = mock.MagicMock() m_file = mock_open.return_value.__enter__.return_value - m_file.read.return_value = 'dummy' + m_file.read.return_value = self.keypair.public_key columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_keypair.assert_called_with( + self.compute_client.create_keypair.assert_called_with( name=self.keypair.name, public_key=self.keypair.public_key, key_type=key_type, @@ -204,13 +219,16 @@ def test_keypair_create_with_key_type(self, sm_mock): self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_keypair_create_with_key_type_pre_v22(self, sm_mock): + def test_keypair_create_with_key_type_pre_v22(self): + self.set_compute_api_version('2.1') + for key_type in ['x509', 'ssh']: arglist = [ - '--public-key', self.keypair.public_key, + '--public-key', + self.keypair.public_key, self.keypair.name, - '--type', 'ssh', + '--type', + 'ssh', ] verifylist = [ ('public_key', self.keypair.public_key), @@ -219,75 +237,85 @@ def test_keypair_create_with_key_type_pre_v22(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch('io.open') as mock_open: + with mock.patch( + 'openstackclient.compute.v2.keypair.open' + ) as mock_open: mock_open.return_value = mock.MagicMock() m_file = mock_open.return_value.__enter__.return_value m_file.read.return_value = 'dummy' ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.2 or greater is required', - str(ex)) + '--os-compute-api-version 2.2 or greater is required', str(ex) + ) + + @mock.patch.object( + keypair, + '_generate_keypair', + return_value=keypair.Keypair('private', 'public'), + ) + def test_key_pair_create_with_user(self, mock_generate): + self.set_compute_api_version('2.10') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_key_pair_create_with_user(self, sm_mock): arglist = [ - '--user', identity_fakes.user_name, + '--user', + self._user.name, self.keypair.name, ] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self._user.name), ('name', self.keypair.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_keypair.assert_called_with( + self.compute_client.create_keypair.assert_called_with( name=self.keypair.name, - user_id=identity_fakes.user_id, + user_id=self._user.id, + public_key=mock_generate.return_value.public_key, ) self.assertEqual({}, columns) self.assertEqual({}, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_key_pair_create_with_user_pre_v210(self, sm_mock): + def test_key_pair_create_with_user_pre_v210(self): + self.set_compute_api_version('2.9') + arglist = [ - '--user', identity_fakes.user_name, + '--user', + self._user.name, self.keypair.name, ] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self._user.name), ('name', self.keypair.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.10 or greater is required', str(ex)) + '--os-compute-api-version 2.10 or greater is required', str(ex) + ) class TestKeypairDelete(TestKeypair): - - keypairs = compute_fakes.FakeKeypair.create_keypairs(count=2) - def setUp(self): - super(TestKeypairDelete, self).setUp() + super().setUp() + + self.keypairs = list( + sdk_fakes.generate_fake_resources(_keypair.Keypair, count=2) + ) self.cmd = keypair.DeleteKeypair(self.app, None) def test_keypair_delete(self): - arglist = [ - self.keypairs[0].name - ] + arglist = [self.keypairs[0].name] verifylist = [ ('name', [self.keypairs[0].name]), ] @@ -297,8 +325,9 @@ def test_keypair_delete(self): ret = self.cmd.take_action(parsed_args) self.assertIsNone(ret) - self.sdk_client.delete_keypair.assert_called_with( - self.keypairs[0].name, ignore_missing=False) + self.compute_client.delete_keypair.assert_called_with( + self.keypairs[0].name, ignore_missing=False + ) def test_delete_multiple_keypairs(self): arglist = [] @@ -314,7 +343,7 @@ def test_delete_multiple_keypairs(self): calls = [] for k in self.keypairs: calls.append(call(k.name, ignore_missing=False)) - self.sdk_client.delete_keypair.assert_has_calls(calls) + self.compute_client.delete_keypair.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_keypairs_with_exception(self): @@ -328,8 +357,10 @@ def test_delete_multiple_keypairs_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.delete_keypair.side_effect = [ - None, exceptions.CommandError] + self.compute_client.delete_keypair.side_effect = [ + None, + exceptions.CommandError, + ] try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') @@ -339,16 +370,14 @@ def test_delete_multiple_keypairs_with_exception(self): calls = [] for k in arglist: calls.append(call(k, ignore_missing=False)) - self.sdk_client.delete_keypair.assert_has_calls(calls) + self.compute_client.delete_keypair.assert_has_calls(calls) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_keypair_delete_with_user(self, sm_mock): - arglist = [ - '--user', identity_fakes.user_name, - self.keypairs[0].name - ] + def test_keypair_delete_with_user(self): + self.set_compute_api_version('2.10') + + arglist = ['--user', self._user.name, self.keypairs[0].name] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self._user.name), ('name', [self.keypairs[0].name]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -356,51 +385,45 @@ def test_keypair_delete_with_user(self, sm_mock): ret = self.cmd.take_action(parsed_args) self.assertIsNone(ret) - self.sdk_client.delete_keypair.assert_called_with( + self.compute_client.delete_keypair.assert_called_with( self.keypairs[0].name, - user_id=identity_fakes.user_id, - ignore_missing=False + user_id=self._user.id, + ignore_missing=False, ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_keypair_delete_with_user_pre_v210(self, sm_mock): - - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.9') + def test_keypair_delete_with_user_pre_v210(self): + self.set_compute_api_version('2.9') - arglist = [ - '--user', identity_fakes.user_name, - self.keypairs[0].name - ] + arglist = ['--user', self._user.name, self.keypairs[0].name] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self._user.name), ('name', [self.keypairs[0].name]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.10 or greater is required', str(ex)) + '--os-compute-api-version 2.10 or greater is required', str(ex) + ) class TestKeypairList(TestKeypair): - - # Return value of self.sdk_client.keypairs(). - keypairs = compute_fakes.FakeKeypair.create_keypairs(count=1) - def setUp(self): - super(TestKeypairList, self).setUp() + super().setUp() - self.sdk_client.keypairs.return_value = self.keypairs + self.keypairs = list( + sdk_fakes.generate_fake_resources(_keypair.Keypair, count=1) + ) + self.compute_client.keypairs.return_value = iter(self.keypairs) # Get the command object to test self.cmd = keypair.ListKeypair(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_keypair_list_no_options(self, sm_mock): + self._project = sdk_fakes.generate_fake_resource(_project.Project) + + def test_keypair_list_no_options(self): arglist = [] verifylist = [] @@ -413,16 +436,17 @@ def test_keypair_list_no_options(self, sm_mock): # Set expected values - self.sdk_client.keypairs.assert_called_with() + self.compute_client.keypairs.assert_called_with() self.assertEqual(('Name', 'Fingerprint'), columns) self.assertEqual( - ((self.keypairs[0].name, self.keypairs[0].fingerprint), ), - tuple(data) + ((self.keypairs[0].name, self.keypairs[0].fingerprint),), + tuple(data), ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_keypair_list_v22(self, sm_mock): + def test_keypair_list_v22(self): + self.set_compute_api_version('2.22') + arglist = [] verifylist = [] @@ -435,145 +459,151 @@ def test_keypair_list_v22(self, sm_mock): # Set expected values - self.sdk_client.keypairs.assert_called_with() + self.compute_client.keypairs.assert_called_with() self.assertEqual(('Name', 'Fingerprint', 'Type'), columns) self.assertEqual( - (( - self.keypairs[0].name, - self.keypairs[0].fingerprint, - self.keypairs[0].type, - ), ), - tuple(data) + ( + ( + self.keypairs[0].name, + self.keypairs[0].fingerprint, + self.keypairs[0].type, + ), + ), + tuple(data), ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_keypair_list_with_user(self, sm_mock): + def test_keypair_list_with_user(self): + self.set_compute_api_version('2.35') - users_mock = self.app.client_manager.identity.users + users_mock = self.identity_client.users users_mock.reset_mock() - users_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.USER), - loaded=True, - ) + users_mock.get.return_value = self._user arglist = [ - '--user', identity_fakes.user_name, + '--user', + self._user.name, ] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self._user.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - users_mock.get.assert_called_with(identity_fakes.user_name) - self.sdk_client.keypairs.assert_called_with( - user_id=identity_fakes.user_id, + users_mock.get.assert_called_with(self._user.name) + self.compute_client.keypairs.assert_called_with( + user_id=self._user.id, ) self.assertEqual(('Name', 'Fingerprint', 'Type'), columns) self.assertEqual( - (( - self.keypairs[0].name, - self.keypairs[0].fingerprint, - self.keypairs[0].type, - ), ), - tuple(data) + ( + ( + self.keypairs[0].name, + self.keypairs[0].fingerprint, + self.keypairs[0].type, + ), + ), + tuple(data), ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_keypair_list_with_user_pre_v210(self, sm_mock): + def test_keypair_list_with_user_pre_v210(self): + self.set_compute_api_version('2.9') arglist = [ - '--user', identity_fakes.user_name, + '--user', + self._user.name, ] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self._user.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.10 or greater is required', str(ex)) + '--os-compute-api-version 2.10 or greater is required', str(ex) + ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_keypair_list_with_project(self, sm_mock): + def test_keypair_list_with_project(self): + self.set_compute_api_version('2.35') - projects_mock = self.app.client_manager.identity.tenants + projects_mock = self.identity_client.projects projects_mock.reset_mock() - projects_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.PROJECT), - loaded=True, - ) + projects_mock.get.return_value = self._project - users_mock = self.app.client_manager.identity.users - users_mock.reset_mock() - users_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.USER), - loaded=True, - ), - ] + role_assignments_mock = self.identity_sdk_client.role_assignments + role_assignments_mock.reset_mock() + assignment = sdk_fakes.generate_fake_resource( + _role_assignment.RoleAssignment + ) + assignment.user = self._user + role_assignments_mock.return_value = [assignment] - arglist = ['--project', identity_fakes.project_name] - verifylist = [('project', identity_fakes.project_name)] + arglist = ['--project', self._project.name] + verifylist = [('project', self._project.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - projects_mock.get.assert_called_with(identity_fakes.project_name) - users_mock.list.assert_called_with(tenant_id=identity_fakes.project_id) - self.sdk_client.keypairs.assert_called_with( - user_id=identity_fakes.user_id, + projects_mock.get.assert_called_with(self._project.name) + role_assignments_mock.assert_called_with( + scope_project_id=self._project.id + ) + self.compute_client.keypairs.assert_called_with( + user_id=self._user.id, ) self.assertEqual(('Name', 'Fingerprint', 'Type'), columns) self.assertEqual( - (( - self.keypairs[0].name, - self.keypairs[0].fingerprint, - self.keypairs[0].type, - ), ), - tuple(data) + ( + ( + self.keypairs[0].name, + self.keypairs[0].fingerprint, + self.keypairs[0].type, + ), + ), + tuple(data), ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_keypair_list_with_project_pre_v210(self, sm_mock): + def test_keypair_list_with_project_pre_v210(self): + self.set_compute_api_version('2.9') - arglist = ['--project', identity_fakes.project_name] - verifylist = [('project', identity_fakes.project_name)] + arglist = ['--project', self._project.name] + verifylist = [('project', self._project.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.10 or greater is required', str(ex)) + '--os-compute-api-version 2.10 or greater is required', str(ex) + ) def test_keypair_list_conflicting_user_options(self): - arglist = [ - '--user', identity_fakes.user_name, - '--project', identity_fakes.project_name, + '--user', + self._user.name, + '--project', + self._project.name, ] self.assertRaises( tests_utils.ParserException, - self.check_parser, self.cmd, arglist, None) + self.check_parser, + self.cmd, + arglist, + None, + ) - @mock.patch.object( - sdk_utils, 'supports_microversion', new=mock.Mock(return_value=True)) def test_keypair_list_with_limit(self): + self.set_compute_api_version('2.35') + arglist = [ - '--limit', '1', + '--limit', + '1', ] verifylist = [ ('limit', 1), @@ -582,13 +612,14 @@ def test_keypair_list_with_limit(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.keypairs.assert_called_with(limit=1) + self.compute_client.keypairs.assert_called_with(limit=1) - @mock.patch.object( - sdk_utils, 'supports_microversion', new=mock.Mock(return_value=False)) def test_keypair_list_with_limit_pre_v235(self): + self.set_compute_api_version('2.34') + arglist = [ - '--limit', '1', + '--limit', + '1', ] verifylist = [ ('limit', 1), @@ -596,18 +627,19 @@ def test_keypair_list_with_limit_pre_v235(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.35 or greater is required', str(ex)) + '--os-compute-api-version 2.35 or greater is required', str(ex) + ) - @mock.patch.object( - sdk_utils, 'supports_microversion', new=mock.Mock(return_value=True)) def test_keypair_list_with_marker(self): + self.set_compute_api_version('2.35') + arglist = [ - '--marker', 'test_kp', + '--marker', + 'test_kp', ] verifylist = [ ('marker', 'test_kp'), @@ -616,13 +648,14 @@ def test_keypair_list_with_marker(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.keypairs.assert_called_with(marker='test_kp') + self.compute_client.keypairs.assert_called_with(marker='test_kp') - @mock.patch.object( - sdk_utils, 'supports_microversion', new=mock.Mock(return_value=False)) def test_keypair_list_with_marker_pre_v235(self): + self.set_compute_api_version('2.34') + arglist = [ - '--marker', 'test_kp', + '--marker', + 'test_kp', ] verifylist = [ ('marker', 'test_kp'), @@ -630,89 +663,78 @@ def test_keypair_list_with_marker_pre_v235(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.35 or greater is required', str(ex)) + '--os-compute-api-version 2.35 or greater is required', str(ex) + ) class TestKeypairShow(TestKeypair): - - keypair = compute_fakes.FakeKeypair.create_one_keypair() - def setUp(self): - super(TestKeypairShow, self).setUp() - - self.sdk_client.find_keypair.return_value = self.keypair - - self.cmd = keypair.ShowKeypair(self.app, None) + super().setUp() self.columns = ( - "fingerprint", - "name", - "type", - "user_id" + 'created_at', + 'fingerprint', + 'id', + 'is_deleted', + 'name', + 'private_key', + 'type', + 'user_id', ) - self.data = ( - self.keypair.fingerprint, - self.keypair.name, - self.keypair.type, - self.keypair.user_id - ) + self.cmd = keypair.ShowKeypair(self.app, None) def test_keypair_show_no_options(self): - arglist = [] verifylist = [] # Missing required args should boil here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_keypair_show(self): - # overwrite the setup one because we want to omit private_key - self.keypair = compute_fakes.FakeKeypair.create_one_keypair( - no_pri=True) - self.sdk_client.find_keypair.return_value = self.keypair + self.keypair = sdk_fakes.generate_fake_resource(_keypair.Keypair) + self.compute_client.find_keypair.return_value = self.keypair self.data = ( + self.keypair.created_at, self.keypair.fingerprint, + self.keypair.id, + self.keypair.is_deleted, self.keypair.name, + self.keypair.private_key, self.keypair.type, - self.keypair.user_id + self.keypair.user_id, ) - arglist = [ - self.keypair.name - ] - verifylist = [ - ('name', self.keypair.name) - ] + arglist = [self.keypair.name] + verifylist = [('name', self.keypair.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.find_keypair.assert_called_with( - self.keypair.name, - ignore_missing=False + self.compute_client.find_keypair.assert_called_with( + self.keypair.name, ignore_missing=False ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_keypair_show_public(self): + self.keypair = sdk_fakes.generate_fake_resource(_keypair.Keypair) + self.compute_client.find_keypair.return_value = self.keypair - arglist = [ - '--public-key', - self.keypair.name - ] - verifylist = [ - ('public_key', True), - ('name', self.keypair.name) - ] + arglist = ['--public-key', self.keypair.name] + verifylist = [('public_key', True), ('name', self.keypair.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -721,59 +743,67 @@ def test_keypair_show_public(self): self.assertEqual({}, columns) self.assertEqual({}, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_keypair_show_with_user(self, sm_mock): + def test_keypair_show_with_user(self): + self.set_compute_api_version('2.10') - # overwrite the setup one because we want to omit private_key - self.keypair = compute_fakes.FakeKeypair.create_one_keypair( - no_pri=True) - self.sdk_client.find_keypair.return_value = self.keypair + self.keypair = sdk_fakes.generate_fake_resource(_keypair.Keypair) + self.compute_client.find_keypair.return_value = self.keypair self.data = ( + self.keypair.created_at, self.keypair.fingerprint, + self.keypair.id, + self.keypair.is_deleted, self.keypair.name, + self.keypair.private_key, self.keypair.type, - self.keypair.user_id + self.keypair.user_id, ) arglist = [ - '--user', identity_fakes.user_name, + '--user', + self._user.name, self.keypair.name, ] verifylist = [ - ('user', identity_fakes.user_name), - ('name', self.keypair.name) + ('user', self._user.name), + ('name', self.keypair.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.users_mock.get.assert_called_with(identity_fakes.user_name) - self.sdk_client.find_keypair.assert_called_with( + self.users_mock.get.assert_called_with(self._user.name) + self.compute_client.find_keypair.assert_called_with( self.keypair.name, ignore_missing=False, - user_id=identity_fakes.user_id + user_id=self._user.id, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_keypair_show_with_user_pre_v210(self, sm_mock): + def test_keypair_show_with_user_pre_v210(self): + self.set_compute_api_version('2.9') + self.keypair = sdk_fakes.generate_fake_resource(_keypair.Keypair) arglist = [ - '--user', identity_fakes.user_name, + '--user', + self._user.name, self.keypair.name, ] verifylist = [ - ('user', identity_fakes.user_name), - ('name', self.keypair.name) + ('user', self._user.name), + ('name', self.keypair.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args, + ) self.assertIn( - '--os-compute-api-version 2.10 or greater is required', str(ex)) + '--os-compute-api-version 2.10 or greater is required', + str(ex), + ) diff --git a/openstackclient/tests/unit/compute/v2/test_server.py b/openstackclient/tests/unit/compute/v2/test_server.py index a5d5a43f06..0597312510 100644 --- a/openstackclient/tests/unit/compute/v2/test_server.py +++ b/openstackclient/tests/unit/compute/v2/test_server.py @@ -11,392 +11,187 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse -import collections -import copy + +import base64 import getpass import json import tempfile from unittest import mock -from unittest.mock import call +import uuid import iso8601 -from novaclient import api_versions +from openstack.compute.v2 import flavor as _flavor +from openstack.compute.v2 import server as _server +from openstack.compute.v2 import server_group as _server_group from openstack import exceptions as sdk_exceptions -from openstack import utils as sdk_utils +from openstack.image.v2 import image as _image +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions from osc_lib import utils as common_utils +from openstackclient.api import compute_v2 from openstackclient.compute.v2 import server from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit.image.v2 import fakes as image_fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes -from openstackclient.tests.unit import utils -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes +from openstackclient.tests.unit import utils as test_utils +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes -class TestPowerStateColumn(utils.TestCase): - +class TestPowerStateColumn(test_utils.TestCase): def test_human_readable(self): self.assertEqual( - 'NOSTATE', server.PowerStateColumn(0x00).human_readable()) - self.assertEqual( - 'Running', server.PowerStateColumn(0x01).human_readable()) - self.assertEqual( - '', server.PowerStateColumn(0x02).human_readable()) - self.assertEqual( - 'Paused', server.PowerStateColumn(0x03).human_readable()) + 'NOSTATE', server.PowerStateColumn(0x00).human_readable() + ) self.assertEqual( - 'Shutdown', server.PowerStateColumn(0x04).human_readable()) + 'Running', server.PowerStateColumn(0x01).human_readable() + ) + self.assertEqual('', server.PowerStateColumn(0x02).human_readable()) self.assertEqual( - '', server.PowerStateColumn(0x05).human_readable()) + 'Paused', server.PowerStateColumn(0x03).human_readable() + ) self.assertEqual( - 'Crashed', server.PowerStateColumn(0x06).human_readable()) + 'Shutdown', server.PowerStateColumn(0x04).human_readable() + ) + self.assertEqual('', server.PowerStateColumn(0x05).human_readable()) self.assertEqual( - 'Suspended', server.PowerStateColumn(0x07).human_readable()) + 'Crashed', server.PowerStateColumn(0x06).human_readable() + ) self.assertEqual( - 'N/A', server.PowerStateColumn(0x08).human_readable()) + 'Suspended', server.PowerStateColumn(0x07).human_readable() + ) + self.assertEqual('N/A', server.PowerStateColumn(0x08).human_readable()) class TestServer(compute_fakes.TestComputev2): - def setUp(self): - super(TestServer, self).setUp() - - # Get a shortcut to the compute client ServerManager Mock - self.servers_mock = self.app.client_manager.compute.servers - self.servers_mock.reset_mock() - - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - - # Get a shortcut to the compute client ServerMigrationsManager Mock - self.server_migrations_mock = \ - self.app.client_manager.compute.server_migrations - self.server_migrations_mock.reset_mock() - - # Get a shortcut to the compute client VolumeManager mock - self.servers_volumes_mock = self.app.client_manager.compute.volumes - self.servers_volumes_mock.reset_mock() - - # Get a shortcut to the compute client MigrationManager mock - self.migrations_mock = self.app.client_manager.compute.migrations - self.migrations_mock.reset_mock() - - # Get a shortcut to the compute client FlavorManager Mock - self.flavors_mock = self.app.client_manager.compute.flavors - self.flavors_mock.reset_mock() - - # Get a shortcut to the image client ImageManager Mock - self.images_mock = self.app.client_manager.image.images - self.images_mock.reset_mock() - - self.find_image_mock = self.app.client_manager.image.find_image - self.find_image_mock.reset_mock() - - self.get_image_mock = self.app.client_manager.image.get_image - self.get_image_mock.reset_mock() - - # Get a shortcut to the volume client VolumeManager Mock - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - - self.app.client_manager.sdk_connection.volume = mock.Mock() - self.sdk_volume_client = self.app.client_manager.sdk_connection.volume - - # Get a shortcut to the volume client VolumeManager Mock - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots - self.snapshots_mock.reset_mock() + super().setUp() # Set object attributes to be tested. Could be overwritten in subclass. self.attrs = {} - # Set object methods to be tested. Could be overwritten in subclass. - self.methods = {} - - patcher = mock.patch.object( - sdk_utils, 'supports_microversion', return_value=True) - self.addCleanup(patcher.stop) - self.supports_microversion_mock = patcher.start() - self._set_mock_microversion( - self.app.client_manager.compute.api_version.get_string()) - - def _set_mock_microversion(self, mock_v): - """Set a specific microversion for the mock supports_microversion().""" - self.supports_microversion_mock.reset_mock(return_value=True) - - self.supports_microversion_mock.side_effect = ( - lambda _, v: - api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v)) - - def setup_servers_mock(self, count): - # If we are creating more than one server, make one of them - # boot-from-volume - include_bfv = count > 1 - servers = compute_fakes.FakeServer.create_servers( - attrs=self.attrs, - methods=self.methods, - count=count - 1 if include_bfv else count - ) - if include_bfv: - attrs = copy.deepcopy(self.attrs) - attrs['image'] = '' - bfv_server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=self.methods - ) - servers.append(bfv_server) - - # This is the return value for utils.find_resource() - self.servers_mock.get = compute_fakes.FakeServer.get_servers(servers, - 0) - return servers - def setup_sdk_servers_mock(self, count): - servers = compute_fakes.FakeServer.create_sdk_servers( + servers = compute_fakes.create_servers( attrs=self.attrs, - methods=self.methods, count=count, ) # This is the return value for compute_client.find_server() - self.sdk_client.find_server.side_effect = servers + self.compute_client.find_server.side_effect = servers return servers - def setup_sdk_volumes_mock(self, count): - volumes = volume_fakes.create_sdk_volumes(count=count) - - # This is the return value for volume_client.find_volume() - self.sdk_volume_client.find_volume.side_effect = volumes - - return volumes - - def run_method_with_servers(self, method_name, server_count): - # Starting with v2.91, the nova api needs to be call with a sentinel - # as availability_zone=None will unpin the server az. - _sentinel = object() - - servers = self.setup_servers_mock(server_count) - - arglist = [] - verifylist = [] - - for s in servers: - arglist.append(s.id) - verifylist = [ - ('server', arglist), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - for s in servers: - method = getattr(s, method_name) - if method_name == 'lock': - version = self.app.client_manager.compute.api_version - if version >= api_versions.APIVersion('2.73'): - method.assert_called_with(reason=None) - elif method_name == 'unshelve': - version = self.app.client_manager.compute.api_version - if version >= api_versions.APIVersion('2.91'): - method.assert_called_with(availability_zone=_sentinel, - host=None) - elif (version >= api_versions.APIVersion('2.77') and - version < api_versions.APIVersion('2.91')): - method.assert_called_with(availability_zone=None) - else: - method.assert_called_with() - else: - method.assert_called_with() - self.assertIsNone(result) - - def run_method_with_sdk_servers(self, method_name, server_count): - servers = self.setup_sdk_servers_mock(count=server_count) - - arglist = [s.id for s in servers] - verifylist = [ - ('server', arglist), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - calls = [call(s.id) for s in servers] - method = getattr(self.sdk_client, method_name) - method.assert_has_calls(calls) - self.assertIsNone(result) - class TestServerAddFixedIP(TestServer): - def setUp(self): super().setUp() # Get the command object to test self.cmd = server.AddFixedIP(self.app, None) - # Mock network methods - self.find_network = mock.Mock() - self.app.client_manager.network.find_network = self.find_network - - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip_pre_v244(self, sm_mock): - sm_mock.return_value = False + def test_server_add_fixed_ip_pre_v249_with_tag(self): + self.set_compute_api_version('2.48') servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() + network = compute_fakes.create_one_network() with mock.patch.object( self.app.client_manager, 'is_network_endpoint_enabled', - return_value=False + return_value=False, ): arglist = [ servers[0].id, network['id'], + '--fixed-ip-address', + '5.6.7.8', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), ('network', network['id']), - ('fixed_ip_address', None), + ('fixed_ip_address', '5.6.7.8'), + ('tag', 'tag1'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - - self.sdk_client.add_fixed_ip_to_server.assert_called_once_with( - servers[0].id, - network['id'] + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.49 or greater is required', str(ex) ) - # the legacy API operates asynchronously - self.assertEqual(((), ()), result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip_pre_v244_with_fixed_ip(self, sm_mock): - sm_mock.return_value = False + def test_server_add_fixed_ip(self): + self.set_compute_api_version('2.49') servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() + network = compute_fakes.create_one_network() + interface = compute_fakes.create_one_server_interface() + self.compute_client.create_server_interface.return_value = interface with mock.patch.object( self.app.client_manager, 'is_network_endpoint_enabled', - return_value=False + return_value=False, ): - arglist = [ - servers[0].id, - network['id'], - '--fixed-ip-address', '5.6.7.8' - ] + arglist = [servers[0].id, network['id']] verifylist = [ ('server', servers[0].id), ('network', network['id']), - ('fixed_ip_address', '5.6.7.8'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - - self.sdk_client.add_fixed_ip_to_server.assert_called_once_with( - servers[0].id, - network['id'] + expected_columns = ( + 'Port ID', + 'Server ID', + 'Network ID', + 'MAC Address', + 'Port State', + 'Fixed IPs', + ) + expected_data = ( + interface.port_id, + interface.server_id, + interface.net_id, + interface.mac_addr, + interface.port_state, + format_columns.ListDictColumn(interface.fixed_ips), ) - # the legacy API operates asynchronously - self.assertEqual(((), ()), result) - - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip_pre_v244_with_tag(self, sm_mock): - sm_mock.return_value = False - - servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() - with mock.patch.object( - self.app.client_manager, - 'is_network_endpoint_enabled', - return_value=False - ): - arglist = [ - servers[0].id, - network['id'], - '--fixed-ip-address', '5.6.7.8', - '--tag', 'tag1' - ] - verifylist = [ - ('server', servers[0].id), - ('network', network['id']), - ('fixed_ip_address', '5.6.7.8'), - ('tag', 'tag1') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) - ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn( - '--os-compute-api-version 2.49 or greater is required', - str(ex)) + self.assertEqual(expected_columns, columns) + self.assertEqual(expected_data, tuple(data)) + self.compute_client.create_server_interface.assert_called_once_with( + servers[0].id, net_id=network['id'] + ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip_pre_v249_with_tag(self, sm_mock): - sm_mock.side_effect = [False, True] + def test_server_add_fixed_ip_with_fixed_ip(self): + self.set_compute_api_version('2.49') servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() + network = compute_fakes.create_one_network() + interface = compute_fakes.create_one_server_interface() + self.compute_client.create_server_interface.return_value = interface with mock.patch.object( self.app.client_manager, 'is_network_endpoint_enabled', - return_value=False + return_value=False, ): arglist = [ servers[0].id, network['id'], - '--fixed-ip-address', '5.6.7.8', - '--tag', 'tag1' + '--fixed-ip-address', + '5.6.7.8', ] verifylist = [ ('server', servers[0].id), ('network', network['id']), ('fixed_ip_address', '5.6.7.8'), - ('tag', 'tag1') - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn( - '--os-compute-api-version 2.49 or greater is required', - str(ex)) - - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip(self, sm_mock): - sm_mock.side_effect = [True, False] - - servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() - interface = compute_fakes.create_one_server_interface() - self.sdk_client.create_server_interface.return_value = interface - - with mock.patch.object( - self.app.client_manager, - 'is_network_endpoint_enabled', - return_value=False - ): - arglist = [ - servers[0].id, - network['id'] - ] - verifylist = [ - ('server', servers[0].id), - ('network', network['id']) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -421,34 +216,38 @@ def test_server_add_fixed_ip(self, sm_mock): self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, tuple(data)) - self.sdk_client.create_server_interface.assert_called_once_with( + self.compute_client.create_server_interface.assert_called_once_with( servers[0].id, - net_id=network['id'] + net_id=network['id'], + fixed_ips=[{'ip_address': '5.6.7.8'}], ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip_with_fixed_ip(self, sm_mock): - sm_mock.side_effect = [True, True] + def test_server_add_fixed_ip_with_tag(self): + self.set_compute_api_version('2.49') servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() + network = compute_fakes.create_one_network() interface = compute_fakes.create_one_server_interface() - self.sdk_client.create_server_interface.return_value = interface + self.compute_client.create_server_interface.return_value = interface with mock.patch.object( self.app.client_manager, 'is_network_endpoint_enabled', - return_value=False + return_value=False, ): arglist = [ servers[0].id, network['id'], - '--fixed-ip-address', '5.6.7.8' + '--fixed-ip-address', + '5.6.7.8', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), ('network', network['id']), - ('fixed_ip_address', '5.6.7.8') + ('fixed_ip_address', '5.6.7.8'), + ('tag', 'tag1'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -475,37 +274,39 @@ def test_server_add_fixed_ip_with_fixed_ip(self, sm_mock): self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, tuple(data)) - self.sdk_client.create_server_interface.assert_called_once_with( + self.compute_client.create_server_interface.assert_called_once_with( servers[0].id, net_id=network['id'], - fixed_ips=[{'ip_address': '5.6.7.8'}] + fixed_ips=[{'ip_address': '5.6.7.8'}], + tag='tag1', ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_fixed_ip_with_tag(self, sm_mock): - sm_mock.side_effect = [True, True, True] + def test_server_add_fixed_ip_with_fixed_ip_with_tag(self): + self.set_compute_api_version('2.49') servers = self.setup_sdk_servers_mock(count=1) - network = compute_fakes.FakeNetwork.create_one_network() + network = compute_fakes.create_one_network() interface = compute_fakes.create_one_server_interface() - self.sdk_client.create_server_interface.return_value = interface + self.compute_client.create_server_interface.return_value = interface with mock.patch.object( self.app.client_manager, 'is_network_endpoint_enabled', - return_value=False + return_value=False, ): arglist = [ servers[0].id, network['id'], - '--fixed-ip-address', '5.6.7.8', - '--tag', 'tag1' + '--fixed-ip-address', + '5.6.7.8', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), ('network', network['id']), ('fixed_ip_address', '5.6.7.8'), - ('tag', 'tag1') + ('tag', 'tag1'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -532,7 +333,7 @@ def test_server_add_fixed_ip_with_tag(self, sm_mock): self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, tuple(data)) - self.sdk_client.create_server_interface.assert_called_once_with( + self.compute_client.create_server_interface.assert_called_once_with( servers[0].id, net_id=network['id'], fixed_ips=[{'ip_address': '5.6.7.8'}], @@ -540,59 +341,57 @@ def test_server_add_fixed_ip_with_tag(self, sm_mock): ) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_add' -) class TestServerAddFloatingIPCompute(compute_fakes.TestComputev2): - def setUp(self): - super(TestServerAddFloatingIPCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server - # Get the command object to test self.cmd = server.AddFloatingIP(self.app, None) - def test_server_add_floating_ip_default(self, fip_mock): - _floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip() + def test_server_add_floating_ip_default(self): arglist = [ - 'server1', - _floating_ip['ip'], + self.server.name, + '1.2.3.4', ] verifylist = [ - ('server', 'server1'), - ('ip_address', _floating_ip['ip']), + ('server', self.server.name), + ('ip_address', '1.2.3.4'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - fip_mock.assert_called_once_with( - 'server1', - _floating_ip['ip'], - fixed_address=None, + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False + ) + self.compute_client.add_floating_ip_to_server.assert_called_once_with( + self.server, '1.2.3.4', fixed_address=None ) - def test_server_add_floating_ip_fixed(self, fip_mock): - _floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip() + def test_server_add_floating_ip_fixed(self): arglist = [ - '--fixed-ip-address', _floating_ip['fixed_ip'], - 'server1', - _floating_ip['ip'], + '--fixed-ip-address', + '5.6.7.8', + self.server.name, + '1.2.3.4', ] verifylist = [ - ('fixed_ip_address', _floating_ip['fixed_ip']), - ('server', 'server1'), - ('ip_address', _floating_ip['ip']), + ('fixed_ip_address', '5.6.7.8'), + ('server', self.server.name), + ('ip_address', '1.2.3.4'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - fip_mock.assert_called_once_with( - 'server1', - _floating_ip['ip'], - fixed_address=_floating_ip['fixed_ip'], + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False + ) + self.compute_client.add_floating_ip_to_server.assert_called_once_with( + self.server, '1.2.3.4', fixed_address='5.6.7.8' ) @@ -600,30 +399,28 @@ class TestServerAddFloatingIPNetwork( TestServer, network_fakes.TestNetworkV2, ): - def setUp(self): - super(TestServerAddFloatingIPNetwork, self).setUp() + super().setUp() - self.app.client_manager.network = mock.Mock() - self.network = self.app.client_manager.network - self.network.update_ip = mock.Mock(return_value=None) + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + + self.network_client.update_ip.return_value = None # Get the command object to test - self.cmd = server.AddFloatingIP(self.app, self.namespace) + self.cmd = server.AddFloatingIP(self.app, None) def test_server_add_floating_ip(self): - _server = compute_fakes.FakeServer.create_one_server() - self.servers_mock.get.return_value = _server _port = network_fakes.create_one_port() _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - self.network.find_ip = mock.Mock(return_value=_floating_ip) - self.network.ports = mock.Mock(return_value=[_port]) + self.network_client.find_ip.return_value = _floating_ip + self.network_client.ports.return_value = [_port] arglist = [ - _server.id, + self.server.id, _floating_ip['floating_ip_address'], ] verifylist = [ - ('server', _server.id), + ('server', self.server.id), ('ip_address', _floating_ip['floating_ip_address']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -634,74 +431,68 @@ def test_server_add_floating_ip(self): 'port_id': _port.id, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( _floating_ip['floating_ip_address'], ignore_missing=False, ) - self.network.ports.assert_called_once_with( - device_id=_server.id, + self.network_client.ports.assert_called_once_with( + device_id=self.server.id, ) - self.network.update_ip.assert_called_once_with( - _floating_ip, - **attrs + self.network_client.update_ip.assert_called_once_with( + _floating_ip, **attrs ) def test_server_add_floating_ip_no_ports(self): - server = compute_fakes.FakeServer.create_one_server() floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - self.servers_mock.get.return_value = server - self.network.find_ip = mock.Mock(return_value=floating_ip) - self.network.ports = mock.Mock(return_value=[]) + self.network_client.find_ip.return_value = floating_ip + self.network_client.ports.return_value = [] arglist = [ - server.id, + self.server.id, floating_ip['floating_ip_address'], ] verifylist = [ - ('server', server.id), + ('server', self.server.id), ('ip_address', floating_ip['floating_ip_address']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - 'No attached ports found to associate floating IP with', - str(ex)) + 'No attached ports found to associate floating IP with', str(ex) + ) - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( floating_ip['floating_ip_address'], ignore_missing=False, ) - self.network.ports.assert_called_once_with( - device_id=server.id, + self.network_client.ports.assert_called_once_with( + device_id=self.server.id, ) def test_server_add_floating_ip_no_external_gateway(self, success=False): - _server = compute_fakes.FakeServer.create_one_server() - self.servers_mock.get.return_value = _server _port = network_fakes.create_one_port() _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - self.network.find_ip = mock.Mock(return_value=_floating_ip) + self.network_client.find_ip.return_value = _floating_ip return_value = [_port] # In the success case, we'll have two ports, where the first port is # not attached to an external gateway but the second port is. if success: return_value.append(_port) - self.network.ports = mock.Mock(return_value=return_value) + self.network_client.ports.return_value = return_value side_effect = [sdk_exceptions.NotFoundException()] if success: side_effect.append(None) - self.network.update_ip = mock.Mock(side_effect=side_effect) + self.network_client.update_ip.side_effect = side_effect arglist = [ - _server.id, + self.server.id, _floating_ip['floating_ip_address'], ] verifylist = [ - ('server', _server.id), + ('server', self.server.id), ('ip_address', _floating_ip['floating_ip_address']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -709,50 +500,51 @@ def test_server_add_floating_ip_no_external_gateway(self, success=False): if success: self.cmd.take_action(parsed_args) else: - self.assertRaises(sdk_exceptions.NotFoundException, - self.cmd.take_action, parsed_args) + self.assertRaises( + sdk_exceptions.NotFoundException, + self.cmd.take_action, + parsed_args, + ) attrs = { 'port_id': _port.id, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( _floating_ip['floating_ip_address'], ignore_missing=False, ) - self.network.ports.assert_called_once_with( - device_id=_server.id, + self.network_client.ports.assert_called_once_with( + device_id=self.server.id, ) if success: - self.assertEqual(2, self.network.update_ip.call_count) + self.assertEqual(2, self.network_client.update_ip.call_count) calls = [mock.call(_floating_ip, **attrs)] * 2 - self.network.update_ip.assert_has_calls(calls) + self.network_client.update_ip.assert_has_calls(calls) else: - self.network.update_ip.assert_called_once_with( - _floating_ip, - **attrs + self.network_client.update_ip.assert_called_once_with( + _floating_ip, **attrs ) def test_server_add_floating_ip_one_external_gateway(self): self.test_server_add_floating_ip_no_external_gateway(success=True) def test_server_add_floating_ip_with_fixed_ip(self): - _server = compute_fakes.FakeServer.create_one_server() - self.servers_mock.get.return_value = _server _port = network_fakes.create_one_port() _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - self.network.find_ip = mock.Mock(return_value=_floating_ip) - self.network.ports = mock.Mock(return_value=[_port]) + self.network_client.find_ip.return_value = _floating_ip + self.network_client.ports.return_value = [_port] # The user has specified a fixed ip that matches one of the ports # already attached to the instance. arglist = [ - '--fixed-ip-address', _port.fixed_ips[0]['ip_address'], - _server.id, + '--fixed-ip-address', + _port.fixed_ips[0]['ip_address'], + self.server.id, _floating_ip['floating_ip_address'], ] verifylist = [ ('fixed_ip_address', _port.fixed_ips[0]['ip_address']), - ('server', _server.id), + ('server', self.server.id), ('ip_address', _floating_ip['floating_ip_address']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -766,69 +558,59 @@ def test_server_add_floating_ip_with_fixed_ip(self): 'fixed_ip_address': _port.fixed_ips[0]['ip_address'], } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( _floating_ip['floating_ip_address'], ignore_missing=False, ) - self.network.ports.assert_called_once_with( - device_id=_server.id, + self.network_client.ports.assert_called_once_with( + device_id=self.server.id, ) - self.network.update_ip.assert_called_once_with( - _floating_ip, - **attrs + self.network_client.update_ip.assert_called_once_with( + _floating_ip, **attrs ) def test_server_add_floating_ip_with_fixed_ip_no_port_found(self): - _server = compute_fakes.FakeServer.create_one_server() - self.servers_mock.get.return_value = _server _port = network_fakes.create_one_port() _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - self.network.find_ip = mock.Mock(return_value=_floating_ip) - self.network.ports = mock.Mock(return_value=[_port]) + self.network_client.find_ip.return_value = _floating_ip + self.network_client.ports.return_value = [_port] # The user has specified a fixed ip that does not match any of the # ports already attached to the instance. nonexistent_ip = '10.0.0.9' arglist = [ - '--fixed-ip-address', nonexistent_ip, - _server.id, + '--fixed-ip-address', + nonexistent_ip, + self.server.id, _floating_ip['floating_ip_address'], ] verifylist = [ ('fixed_ip_address', nonexistent_ip), - ('server', _server.id), + ('server', self.server.id), ('ip_address', _floating_ip['floating_ip_address']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( _floating_ip['floating_ip_address'], ignore_missing=False, ) - self.network.ports.assert_called_once_with( - device_id=_server.id, + self.network_client.ports.assert_called_once_with( + device_id=self.server.id, ) - self.network.update_ip.assert_not_called() + self.network_client.update_ip.assert_not_called() class TestServerAddPort(TestServer): - def setUp(self): - super(TestServerAddPort, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.AddPort(self.app, None) - # Set add_fixed_ip method to be tested. - self.methods = { - 'interface_attach': None, - } - - self.find_port = mock.Mock() - self.app.client_manager.network.find_port = self.find_port - def _test_server_add_port(self, port_id): servers = self.setup_sdk_servers_mock(count=1) port = 'fake-port' @@ -837,39 +619,39 @@ def _test_server_add_port(self, port_id): servers[0].id, port, ] - verifylist = [ - ('server', servers[0].id), - ('port', port) - ] + verifylist = [('server', servers[0].id), ('port', port)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_interface.assert_called_once_with( - servers[0], port_id=port_id) + self.compute_client.create_server_interface.assert_called_once_with( + servers[0], port_id=port_id + ) self.assertIsNone(result) def test_server_add_port(self): - self._test_server_add_port(self.find_port.return_value.id) - self.find_port.assert_called_once_with( - 'fake-port', ignore_missing=False) + self._test_server_add_port( + self.network_client.find_port.return_value.id + ) + self.network_client.find_port.assert_called_once_with( + 'fake-port', ignore_missing=False + ) def test_server_add_port_no_neutron(self): self.app.client_manager.network_endpoint_enabled = False self._test_server_add_port('fake-port') - self.find_port.assert_not_called() + self.network_client.find_port.assert_not_called() - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_add_port_with_tag(self, sm_mock): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.49') + def test_server_add_port_with_tag(self): + self.set_compute_api_version('2.49') servers = self.setup_sdk_servers_mock(count=1) - self.find_port.return_value.id = 'fake-port' + self.network_client.find_port.return_value.id = 'fake-port' arglist = [ servers[0].id, 'fake-port', - '--tag', 'tag1', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), @@ -881,22 +663,20 @@ def test_server_add_port_with_tag(self, sm_mock): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.sdk_client.create_server_interface.assert_called_once_with( - servers[0], - port_id='fake-port', - tag='tag1') + self.compute_client.create_server_interface.assert_called_once_with( + servers[0], port_id='fake-port', tag='tag1' + ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_add_port_with_tag_pre_v249(self, sm_mock): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.48') + def test_server_add_port_with_tag_pre_v249(self): + self.set_compute_api_version('2.48') - servers = self.setup_servers_mock(count=1) - self.find_port.return_value.id = 'fake-port' + servers = self.setup_sdk_servers_mock(count=1) + self.network_client.find_port.return_value.id = 'fake-port' arglist = [ servers[0].id, 'fake-port', - '--tag', 'tag1', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), @@ -906,56 +686,54 @@ def test_server_add_port_with_tag_pre_v249(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.49 or greater is required', - str(ex)) + '--os-compute-api-version 2.49 or greater is required', str(ex) + ) class TestServerVolume(TestServer): - def setUp(self): - super(TestServerVolume, self).setUp() + super().setUp() - self.methods = { - 'create_volume_attachment': None, - } + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server - self.servers = self.setup_sdk_servers_mock(count=1) - self.volumes = self.setup_sdk_volumes_mock(count=1) + self.volume = volume_fakes.create_one_sdk_volume() + self.volume_sdk_client.find_volume.return_value = self.volume attrs = { - 'server_id': self.servers[0].id, - 'volume_id': self.volumes[0].id, + 'server_id': self.server.id, + 'volume_id': self.volume.id, } - self.volume_attachment = \ - compute_fakes.create_one_volume_attachment(attrs=attrs) + self.volume_attachment = compute_fakes.create_one_volume_attachment( + attrs=attrs + ) - self.sdk_client.create_volume_attachment.return_value = \ + self.compute_client.create_volume_attachment.return_value = ( self.volume_attachment + ) class TestServerAddVolume(TestServerVolume): - def setUp(self): - super(TestServerAddVolume, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.AddServerVolume(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_add_volume(self, sm_mock): - + def test_server_add_volume(self): + self.set_compute_api_version('2.48') arglist = [ - '--device', '/dev/sdb', - self.servers[0].id, - self.volumes[0].id, + '--device', + '/dev/sdb', + self.server.id, + self.volume.id, ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('device', '/dev/sdb'), ] @@ -973,26 +751,24 @@ def test_server_add_volume(self, sm_mock): self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, data) - self.sdk_client.create_volume_attachment.assert_called_once_with( - self.servers[0], volumeId=self.volumes[0].id, device='/dev/sdb') + self.compute_client.create_volume_attachment.assert_called_once_with( + self.server, volumeId=self.volume.id, device='/dev/sdb' + ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_add_volume_with_tag(self, sm_mock): - def side_effect(compute_client, version): - if version == '2.49': - return True - return False - sm_mock.side_effect = side_effect + def test_server_add_volume_with_tag(self): + self.set_compute_api_version('2.49') arglist = [ - '--device', '/dev/sdb', - '--tag', 'foo', - self.servers[0].id, - self.volumes[0].id, + '--device', + '/dev/sdb', + '--tag', + 'foo', + self.server.id, + self.volume.id, ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('device', '/dev/sdb'), ('tag', 'foo'), ] @@ -1012,50 +788,51 @@ def side_effect(compute_client, version): self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, data) - self.sdk_client.create_volume_attachment.assert_called_once_with( - self.servers[0], - volumeId=self.volumes[0].id, + self.compute_client.create_volume_attachment.assert_called_once_with( + self.server, + volumeId=self.volume.id, device='/dev/sdb', - tag='foo') + tag='foo', + ) + + def test_server_add_volume_with_tag_pre_v249(self): + self.set_compute_api_version('2.48') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_add_volume_with_tag_pre_v249(self, sm_mock): arglist = [ - self.servers[0].id, - self.volumes[0].id, - '--tag', 'foo', + self.server.id, + self.volume.id, + '--tag', + 'foo', ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('tag', 'foo'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.49 or greater is required', - str(ex)) + '--os-compute-api-version 2.49 or greater is required', str(ex) + ) + + def test_server_add_volume_with_enable_delete_on_termination(self): + self.set_compute_api_version('2.79') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_add_volume_with_enable_delete_on_termination( - self, - sm_mock, - ): self.volume_attachment.delete_on_termination = True arglist = [ '--enable-delete-on-termination', - '--device', '/dev/sdb', - self.servers[0].id, - self.volumes[0].id, + '--device', + '/dev/sdb', + self.server.id, + self.volume.id, ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('device', '/dev/sdb'), ('enable_delete_on_termination', True), ] @@ -1081,29 +858,29 @@ def test_server_add_volume_with_enable_delete_on_termination( columns, data = self.cmd.take_action(parsed_args) self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, data) - self.sdk_client.create_volume_attachment.assert_called_once_with( - self.servers[0], - volumeId=self.volumes[0].id, + self.compute_client.create_volume_attachment.assert_called_once_with( + self.server, + volumeId=self.volume.id, device='/dev/sdb', - delete_on_termination=True) + delete_on_termination=True, + ) + + def test_server_add_volume_with_disable_delete_on_termination(self): + self.set_compute_api_version('2.79') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_add_volume_with_disable_delete_on_termination( - self, - sm_mock, - ): self.volume_attachment.delete_on_termination = False arglist = [ '--disable-delete-on-termination', - '--device', '/dev/sdb', - self.servers[0].id, - self.volumes[0].id, + '--device', + '/dev/sdb', + self.server.id, + self.volume.id, ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('device', '/dev/sdb'), ('disable_delete_on_termination', True), ] @@ -1130,114 +907,112 @@ def test_server_add_volume_with_disable_delete_on_termination( self.assertEqual(expected_columns, columns) self.assertEqual(expected_data, data) - self.sdk_client.create_volume_attachment.assert_called_once_with( - self.servers[0], - volumeId=self.volumes[0].id, + self.compute_client.create_volume_attachment.assert_called_once_with( + self.server, + volumeId=self.volume.id, device='/dev/sdb', - delete_on_termination=False) + delete_on_termination=False, + ) - @mock.patch.object(sdk_utils, 'supports_microversion') def test_server_add_volume_with_enable_delete_on_termination_pre_v279( self, - sm_mock, ): - def side_effect(compute_client, version): - if version == '2.79': - return False - return True - sm_mock.side_effect = side_effect + self.set_compute_api_version('2.78') arglist = [ - self.servers[0].id, - self.volumes[0].id, + self.server.id, + self.volume.id, '--enable-delete-on-termination', ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('enable_delete_on_termination', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn('--os-compute-api-version 2.79 or greater is required', - str(ex)) + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.79 or greater is required', str(ex) + ) - @mock.patch.object(sdk_utils, 'supports_microversion') def test_server_add_volume_with_disable_delete_on_termination_pre_v279( self, - sm_mock, ): - def side_effect(compute_client, version): - if version == '2.79': - return False - return True - sm_mock.side_effect = side_effect + self.set_compute_api_version('2.78') arglist = [ - self.servers[0].id, - self.volumes[0].id, + self.server.id, + self.volume.id, '--disable-delete-on-termination', ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('disable_delete_on_termination', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn('--os-compute-api-version 2.79 or greater is required', - str(ex)) + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.79 or greater is required', str(ex) + ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) def test_server_add_volume_with_disable_and_enable_delete_on_termination( self, - sm_mock, ): + self.set_compute_api_version('2.78') + arglist = [ '--enable-delete-on-termination', '--disable-delete-on-termination', - '--device', '/dev/sdb', - self.servers[0].id, - self.volumes[0].id, + '--device', + '/dev/sdb', + self.server.id, + self.volume.id, ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ('device', '/dev/sdb'), ('enable_delete_on_termination', True), ('disable_delete_on_termination', True), ] - ex = self.assertRaises(utils.ParserException, - self.check_parser, - self.cmd, arglist, verifylist) - self.assertIn('argument --disable-delete-on-termination: not allowed ' - 'with argument --enable-delete-on-termination', str(ex)) + ex = self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + self.assertIn( + 'argument --disable-delete-on-termination: not allowed ' + 'with argument --enable-delete-on-termination', + str(ex), + ) class TestServerRemoveVolume(TestServerVolume): - def setUp(self): - super(TestServerRemoveVolume, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.RemoveServerVolume(self.app, None) def test_server_remove_volume(self): arglist = [ - self.servers[0].id, - self.volumes[0].id, + self.server.id, + self.volume.id, ] verifylist = [ - ('server', self.servers[0].id), - ('volume', self.volumes[0].id), + ('server', self.server.id), + ('volume', self.volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1245,29 +1020,20 @@ def test_server_remove_volume(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.sdk_client.delete_volume_attachment.assert_called_once_with( - self.volumes[0], - self.servers[0], + self.compute_client.delete_volume_attachment.assert_called_once_with( + self.volume, + self.server, ignore_missing=False, ) class TestServerAddNetwork(TestServer): - def setUp(self): - super(TestServerAddNetwork, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.AddNetwork(self.app, None) - # Set add_fixed_ip method to be tested. - self.methods = { - 'interface_attach': None, - } - - self.find_network = mock.Mock() - self.app.client_manager.network.find_network = self.find_network - def _test_server_add_network(self, net_id): servers = self.setup_sdk_servers_mock(count=1) network = 'fake-network' @@ -1276,40 +1042,40 @@ def _test_server_add_network(self, net_id): servers[0].id, network, ] - verifylist = [ - ('server', servers[0].id), - ('network', network) - ] + verifylist = [('server', servers[0].id), ('network', network)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_interface.assert_called_once_with( - servers[0], net_id=net_id) + self.compute_client.create_server_interface.assert_called_once_with( + servers[0], net_id=net_id + ) self.assertIsNone(result) def test_server_add_network(self): - self._test_server_add_network(self.find_network.return_value.id) - self.find_network.assert_called_once_with( - 'fake-network', ignore_missing=False) + self._test_server_add_network( + self.network_client.find_network.return_value.id + ) + self.network_client.find_network.assert_called_once_with( + 'fake-network', ignore_missing=False + ) def test_server_add_network_no_neutron(self): self.app.client_manager.network_endpoint_enabled = False self._test_server_add_network('fake-network') - self.find_network.assert_not_called() + self.network_client.find_network.assert_not_called() - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_add_network_with_tag(self, sm_mock): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.49') + def test_server_add_network_with_tag(self): + self.set_compute_api_version('2.49') servers = self.setup_sdk_servers_mock(count=1) - self.find_network.return_value.id = 'fake-network' + self.network_client.find_network.return_value.id = 'fake-network' arglist = [ servers[0].id, 'fake-network', - '--tag', 'tag1', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), @@ -1321,24 +1087,21 @@ def test_server_add_network_with_tag(self, sm_mock): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.sdk_client.create_server_interface.assert_called_once_with( - servers[0], - net_id='fake-network', - tag='tag1' + self.compute_client.create_server_interface.assert_called_once_with( + servers[0], net_id='fake-network', tag='tag1' ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_add_network_with_tag_pre_v249(self, sm_mock): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.48') + def test_server_add_network_with_tag_pre_v249(self): + self.set_compute_api_version('2.48') servers = self.setup_sdk_servers_mock(count=1) - self.find_network.return_value.id = 'fake-network' + self.network_client.find_network.return_value.id = 'fake-network' arglist = [ servers[0].id, 'fake-network', - '--tag', 'tag1', + '--tag', + 'tag1', ] verifylist = [ ('server', servers[0].id), @@ -1348,637 +1111,755 @@ def test_server_add_network_with_tag_pre_v249(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.49 or greater is required', - str(ex)) - + '--os-compute-api-version 2.49 or greater is required', str(ex) + ) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_find' -) -class TestServerAddSecurityGroup(TestServer): +class TestServerAddSecurityGroup(compute_fakes.TestComputev2): def setUp(self): - super(TestServerAddSecurityGroup, self).setUp() - - self.security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group() - - attrs = { - 'security_groups': [{'name': self.security_group['id']}] - } - methods = { - 'add_security_group': None, - } + super().setUp() - self.server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=methods - ) - # This is the return value for utils.find_resource() for server - self.servers_mock.get.return_value = self.server + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.add_security_group_to_server.return_value = None # Get the command object to test self.cmd = server.AddServerSecurityGroup(self.app, None) - def test_server_add_security_group(self, sg_find_mock): - sg_find_mock.return_value = self.security_group - arglist = [ - self.server.id, - self.security_group['id'] + def test_server_add_security_group__nova_network(self): + arglist = [self.server.id, 'fake_sg'] + verifylist = [ + ('server', self.server.id), + ('security_groups', ['fake_sg']), ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch.object( + self.app.client_manager, + 'is_network_endpoint_enabled', + return_value=False, + ): + with mock.patch.object( + compute_v2, + 'find_security_group', + return_value={'name': 'fake_sg'}, + ) as mock_find_nova_net_sg: + result = self.cmd.take_action(parsed_args) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.add_security_group_to_server.assert_called_once_with( + self.server, {'name': 'fake_sg'} + ) + mock_find_nova_net_sg.assert_called_once_with( + self.compute_client, 'fake_sg' + ) + self.assertIsNone(result) + + def test_server_add_security_group(self): + arglist = [self.server.id, 'fake_sg'] verifylist = [ ('server', self.server.id), - ('group', self.security_group['id']), + ('security_groups', ['fake_sg']), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - sg_find_mock.assert_called_with( - self.security_group['id'], + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False ) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.add_security_group.assert_called_with( - self.security_group['id'], + self.compute_client.add_security_group_to_server.assert_called_once_with( + self.server, {'name': 'fake_sg'} ) self.assertIsNone(result) class TestServerCreate(TestServer): - columns = ( + 'OS-DCF:diskConfig', + 'OS-EXT-AZ:availability_zone', + 'OS-EXT-SRV-ATTR:host', + 'OS-EXT-SRV-ATTR:hostname', + 'OS-EXT-SRV-ATTR:hypervisor_hostname', + 'OS-EXT-SRV-ATTR:instance_name', + 'OS-EXT-SRV-ATTR:kernel_id', + 'OS-EXT-SRV-ATTR:launch_index', + 'OS-EXT-SRV-ATTR:ramdisk_id', + 'OS-EXT-SRV-ATTR:reservation_id', + 'OS-EXT-SRV-ATTR:root_device_name', + 'OS-EXT-SRV-ATTR:user_data', 'OS-EXT-STS:power_state', + 'OS-EXT-STS:task_state', + 'OS-EXT-STS:vm_state', + 'OS-SRV-USG:launched_at', + 'OS-SRV-USG:terminated_at', + 'accessIPv4', + 'accessIPv6', 'addresses', + 'config_drive', + 'created', + 'description', 'flavor', + 'hostId', + 'host_status', 'id', 'image', + 'key_name', + 'locked', + 'locked_reason', 'name', - 'networks', + 'progress', + 'project_id', 'properties', + 'server_groups', + 'status', + 'tags', + 'trusted_image_certificates', + 'updated', + 'user_id', + 'volumes_attached', ) def datalist(self): - datalist = ( + return ( + None, # OS-DCF:diskConfig + None, # OS-EXT-AZ:availability_zone + None, # OS-EXT-SRV-ATTR:host + None, # OS-EXT-SRV-ATTR:hostname + None, # OS-EXT-SRV-ATTR:hypervisor_hostname + None, # OS-EXT-SRV-ATTR:instance_name + None, # OS-EXT-SRV-ATTR:kernel_id + None, # OS-EXT-SRV-ATTR:launch_index + None, # OS-EXT-SRV-ATTR:ramdisk_id + None, # OS-EXT-SRV-ATTR:reservation_id + None, # OS-EXT-SRV-ATTR:root_device_name + None, # OS-EXT-SRV-ATTR:user_data server.PowerStateColumn( - getattr(self.new_server, 'OS-EXT-STS:power_state')), - format_columns.DictListColumn({}), - self.flavor.name + ' (' + self.new_server.flavor.get('id') + ')', - self.new_server.id, - self.image.name + ' (' + self.new_server.image.get('id') + ')', - self.new_server.name, - self.new_server.networks, - format_columns.DictColumn(self.new_server.metadata), + self.server.power_state + ), # OS-EXT-STS:power_state + None, # OS-EXT-STS:task_state + None, # OS-EXT-STS:vm_state + None, # OS-SRV-USG:launched_at + None, # OS-SRV-USG:terminated_at + None, # accessIPv4 + None, # accessIPv6 + server.AddressesColumn({}), # addresses + None, # config_drive + None, # created + None, # description + self.flavor.name + " (" + self.flavor.id + ")", # flavor + None, # hostId + None, # host_status + self.server.id, # id + self.image.name + " (" + self.image.id + ")", # image + None, # key_name + None, # locked + None, # locked_reason + self.server.name, + None, # progress + None, # project_id + format_columns.DictColumn({}), # properties + None, # server_groups + None, # status + format_columns.ListColumn([]), # tags + None, # trusted_image_certificates + None, # updated + None, # user_id + format_columns.ListDictColumn([]), # volumes_attached ) - return datalist def setUp(self): - super(TestServerCreate, self).setUp() + super().setUp() + + self.image = image_fakes.create_one_image() + self.image_client.find_image.return_value = self.image + self.image_client.get_image.return_value = self.image + + self.flavor = compute_fakes.create_one_flavor() + self.compute_client.find_flavor.return_value = self.flavor attrs = { + 'addresses': {}, 'networks': {}, + 'image': self.image, + 'flavor': self.flavor, } - self.new_server = compute_fakes.FakeServer.create_one_server( - attrs=attrs) - - # This is the return value for utils.find_resource(). - # This is for testing --wait option. - self.servers_mock.get.return_value = self.new_server - - self.servers_mock.create.return_value = self.new_server - - self.image = image_fakes.create_one_image() - self.find_image_mock.return_value = self.image - self.get_image_mock.return_value = self.image + self.server = compute_fakes.create_one_server(attrs=attrs) - self.flavor = compute_fakes.FakeFlavor.create_one_flavor() - self.flavors_mock.get.return_value = self.flavor + self.compute_client.create_server.return_value = self.server + self.compute_client.get_server.return_value = self.server self.volume = volume_fakes.create_one_volume() - self.volume_alt = volume_fakes.create_one_volume() - self.volumes_mock.get.return_value = self.volume - self.snapshot = volume_fakes.create_one_snapshot() - self.snapshots_mock.get.return_value = self.snapshot # Get the command object to test self.cmd = server.CreateServer(self.app, None) def test_server_create_no_options(self): arglist = [ - self.new_server.name, + self.server.name, ] verifylist = [ - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - self.assertRaises(utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_server_create_minimal(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.find_flavor.assert_has_calls( + [mock.call(self.flavor.id, ignore_missing=False)] * 2 + ) + self.image_client.find_image.assert_called_once_with( + self.image.id, ignore_missing=False + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks=[], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) - self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) def test_server_create_with_options(self): + server_group = sdk_fakes.generate_fake_resource( + _server_group.ServerGroup + ) + self.compute_client.find_server_group.return_value = server_group + + security_group = network_fakes.create_one_security_group() + self.network_client.find_security_group.return_value = security_group + arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--key-name', 'keyname', - '--property', 'Beta=b', - '--security-group', 'securitygroup', + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--key-name', + 'keyname', + '--property', + 'Beta=b', + '--security-group', + security_group.id, '--use-config-drive', - '--password', 'passw0rd', - '--hint', 'a=b', - '--hint', 'a=c', - self.new_server.name, + '--password', + 'passw0rd', + '--hint', + 'a=b', + '--hint', + 'a=c', + '--server-group', + server_group.id, + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('key_name', 'keyname'), ('properties', {'Beta': 'b'}), - ('security_group', ['securitygroup']), - ('hint', {'a': ['b', 'c']}), + ('security_groups', [security_group.id]), + ('hints', {'a': ['b', 'c']}), + ('server_group', server_group.id), ('config_drive', True), ('password', 'passw0rd'), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - fake_sg = network_fakes.FakeSecurityGroup.create_security_groups() - mock_find_sg = ( - network_fakes.FakeSecurityGroup.get_security_groups(fake_sg) - ) - self.app.client_manager.network.find_security_group = mock_find_sg + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - mock_find_sg.assert_called_once_with('securitygroup', - ignore_missing=False) - # Set expected values - kwargs = dict( - meta={'Beta': 'b'}, - files={}, - reservation_id=None, + self.compute_client.find_flavor.assert_has_calls( + [mock.call(self.flavor.id, ignore_missing=False)] * 2 + ) + self.network_client.find_security_group.assert_called_once_with( + security_group.id, ignore_missing=False + ) + self.image_client.find_image.assert_called_once_with( + self.image.id, ignore_missing=False + ) + self.compute_client.find_server_group.assert_called_once_with( + server_group.id, ignore_missing=False + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + metadata={'Beta': 'b'}, min_count=1, max_count=1, - security_groups=[fake_sg[0].id], - userdata=None, + security_groups=[{'name': security_group.id}], key_name='keyname', - availability_zone=None, - admin_pass='passw0rd', - block_device_mapping_v2=[], - nics=[], - scheduler_hints={'a': ['b', 'c']}, + admin_password='passw0rd', + networks=[], + scheduler_hints={'a': ['b', 'c'], 'group': server_group.id}, config_drive=True, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_not_exist_security_group(self): + self.network_client.find_security_group.side_effect = ( + sdk_exceptions.NotFoundException() + ) + arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--key-name', 'keyname', - '--security-group', 'securitygroup', - '--security-group', 'not_exist_sg', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--key-name', + 'keyname', + '--security-group', + 'not_exist_sg', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('key_name', 'keyname'), - ('security_group', ['securitygroup', 'not_exist_sg']), - ('server_name', self.new_server.name), + ('security_groups', ['not_exist_sg']), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - fake_sg = network_fakes.FakeSecurityGroup.create_security_groups( - count=1) - fake_sg.append(exceptions.NotFound(code=404)) - mock_find_sg = ( - network_fakes.FakeSecurityGroup.get_security_groups(fake_sg) + self.assertRaises( + sdk_exceptions.NotFoundException, self.cmd.take_action, parsed_args + ) + self.network_client.find_security_group.assert_called_once_with( + 'not_exist_sg', ignore_missing=False ) - self.app.client_manager.network.find_security_group = mock_find_sg - - self.assertRaises(exceptions.NotFound, - self.cmd.take_action, - parsed_args) - mock_find_sg.assert_called_with('not_exist_sg', - ignore_missing=False) def test_server_create_with_security_group_in_nova_network(self): - arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--key-name', 'keyname', - '--security-group', 'securitygroup', - self.new_server.name, + sg_name = 'nova-net-sec-group' + arglist = [ + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--security-group', + sg_name, + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), - ('key_name', 'keyname'), - ('security_group', ['securitygroup']), - ('server_name', self.new_server.name), + ('image', self.image.id), + ('flavor', self.flavor.id), + ('security_groups', [sg_name]), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(self.app.client_manager, - 'is_network_endpoint_enabled', - return_value=False): - with mock.patch.object(self.app.client_manager.compute.api, - 'security_group_find', - return_value={'name': 'fake_sg'} - ) as mock_find: + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch.object( + self.app.client_manager, + 'is_network_endpoint_enabled', + return_value=False, + ): + with mock.patch.object( + compute_v2, + 'find_security_group', + return_value={'name': sg_name}, + ) as mock_find: columns, data = self.cmd.take_action(parsed_args) - mock_find.assert_called_once_with('securitygroup') - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + mock_find.assert_called_once_with(self.compute_client, sg_name) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=['fake_sg'], - userdata=None, - key_name='keyname', - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + security_groups=[{'name': sg_name}], + networks=[], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - def test_server_create_with_network(self): + def test_server_create_with_no_security_group(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--network', 'net1', - '--nic', 'net-id=net1,v4-fixed-ip=10.0.0.2', - '--port', 'port1', - '--network', 'net1', - '--network', 'auto', # this is a network called 'auto' - '--nic', 'port-id=port2', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--no-security-group', + self.server.name, + ] + verifylist = [ + ('image', self.image.id), + ('flavor', self.flavor.id), + ('key_name', None), + ('properties', None), + ('security_groups', []), + ('hints', {}), + ('server_group', None), + ('config_drive', False), + ('password', None), + ('server_name', self.server.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.compute_client.find_flavor.assert_has_calls( + [mock.call(self.flavor.id, ignore_missing=False)] * 2 + ) + self.network_client.find_security_group.assert_not_called() + self.image_client.find_image.assert_called_once_with( + self.image.id, ignore_missing=False + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + security_groups=[], + networks=[], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist(), data) + + def test_server_create_with_network(self): + network_net1 = network_fakes.create_one_network() + network_net2 = network_fakes.create_one_network() + network_auto = network_fakes.create_one_network({'name': 'auto'}) + port_port1 = network_fakes.create_one_port() + port_port2 = network_fakes.create_one_port() + + def find_network(name_or_id, ignore_missing): + assert ignore_missing is False + return { + network_net1.id: network_net1, + network_net2.id: network_net2, + network_auto.name: network_auto, + }[name_or_id] + + def find_port(name_or_id, ignore_missing): + assert ignore_missing is False + return { + port_port1.name: port_port1, + port_port2.id: port_port2, + }[name_or_id] + + self.app.client_manager.network.find_network.side_effect = find_network + self.app.client_manager.network.find_port.side_effect = find_port + + arglist = [ + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--network', + network_net1.id, + '--nic', + f'net-id={network_net2.id},v4-fixed-ip=10.0.0.2', + '--port', + port_port1.name, + '--network', + network_auto.name, + '--nic', + f'port-id={port_port2.id}', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), - ('nics', [ + ('image', self.image.id), + ('flavor', self.flavor.id), + ( + 'nics', + [ + { + 'net-id': network_net1.id, + 'port-id': '', + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + }, + { + 'net-id': network_net2.id, + 'port-id': '', + 'v4-fixed-ip': '10.0.0.2', + 'v6-fixed-ip': '', + }, + { + 'net-id': '', + 'port-id': port_port1.name, + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + }, + { + 'net-id': network_auto.name, + 'port-id': '', + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + }, + { + 'net-id': '', + 'port-id': port_port2.id, + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + }, + ], + ), + ('config_drive', False), + ('server_name', self.server.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.find_network.assert_has_calls( + [ + mock.call(network_net1.id, ignore_missing=False), + mock.call(network_net2.id, ignore_missing=False), + mock.call(network_auto.name, ignore_missing=False), + ] + ) + self.network_client.find_port.assert_has_calls( + [ + mock.call(port_port1.name, ignore_missing=False), + mock.call(port_port2.id, ignore_missing=False), + ] + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + networks=[ { - 'net-id': 'net1', - 'port-id': '', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', + 'uuid': network_net1.id, }, { - 'net-id': 'net1', - 'port-id': '', - 'v4-fixed-ip': '10.0.0.2', - 'v6-fixed-ip': '', + 'uuid': network_net2.id, + 'fixed_ip': '10.0.0.2', }, { - 'net-id': '', - 'port-id': 'port1', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', + 'port': port_port1.id, }, { - 'net-id': 'net1', - 'port-id': '', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', + 'uuid': network_auto.id, }, { - 'net-id': 'auto', - 'port-id': '', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', + 'port': port_port2.id, }, + ], + block_device_mapping=[ { - 'net-id': '', - 'port-id': 'port2', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, }, - ]), - ('config_drive', False), - ('server_name', self.new_server.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - get_endpoints = mock.Mock() - get_endpoints.return_value = {'network': []} - self.app.client_manager.auth_ref = mock.Mock() - self.app.client_manager.auth_ref.service_catalog = mock.Mock() - self.app.client_manager.auth_ref.service_catalog.get_endpoints = ( - get_endpoints) - - find_network = mock.Mock() - find_port = mock.Mock() - network_client = self.app.client_manager.network - network_client.find_network = find_network - network_client.find_port = find_port - network_resource = mock.Mock(id='net1_uuid') - port1_resource = mock.Mock(id='port1_uuid') - port2_resource = mock.Mock(id='port2_uuid') - find_network.return_value = network_resource - find_port.side_effect = (lambda port_id, ignore_missing: - {"port1": port1_resource, - "port2": port2_resource}[port_id]) - - # Mock sdk APIs. - _network_1 = mock.Mock(id='net1_uuid') - _network_auto = mock.Mock(id='auto_uuid') - _port1 = mock.Mock(id='port1_uuid') - _port2 = mock.Mock(id='port2_uuid') - find_network = mock.Mock() - find_port = mock.Mock() - find_network.side_effect = lambda net_id, ignore_missing: { - "net1": _network_1, - "auto": _network_auto, - }[net_id] - find_port.side_effect = (lambda port_id, ignore_missing: - {"port1": _port1, - "port2": _port2}[port_id]) - self.app.client_manager.network.find_network = find_network - self.app.client_manager.network.find_port = find_port - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, - min_count=1, - max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[{'net-id': 'net1_uuid', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', - 'port-id': ''}, - {'net-id': 'net1_uuid', - 'v4-fixed-ip': '10.0.0.2', - 'v6-fixed-ip': '', - 'port-id': ''}, - {'net-id': '', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', - 'port-id': 'port1_uuid'}, - {'net-id': 'net1_uuid', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', - 'port-id': ''}, - {'net-id': 'auto_uuid', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', - 'port-id': ''}, - {'net-id': '', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', - 'port-id': 'port2_uuid'}], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_network_tag(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.43') + self.set_compute_api_version('2.43') + + network = network_fakes.create_one_network() + self.app.client_manager.network.find_network.return_value = network arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'net-id=net1,tag=foo', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + f'net-id={network.id},tag=foo', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), - ('nics', [ - { - 'net-id': 'net1', 'port-id': '', - 'v4-fixed-ip': '', 'v6-fixed-ip': '', - 'tag': 'foo', - }, - ]), - ('server_name', self.new_server.name), + ('image', self.image.id), + ('flavor', self.flavor.id), + ( + 'nics', + [ + { + 'net-id': network.id, + 'port-id': '', + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + 'tag': 'foo', + }, + ], + ), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - find_network = mock.Mock() - network_client = self.app.client_manager.network - network_client.find_network = find_network - network_resource = mock.Mock(id='net1_uuid') - find_network.return_value = network_resource - - # Mock sdk APIs. - _network = mock.Mock(id='net1_uuid') - find_network = mock.Mock() - find_network.return_value = _network - self.app.client_manager.network.find_network = find_network - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.network_client.find_network.assert_called_once_with( + network.id, ignore_missing=False + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[ + networks=[ { - 'net-id': 'net1_uuid', - 'v4-fixed-ip': '', - 'v6-fixed-ip': '', - 'port-id': '', + 'uuid': network.id, 'tag': 'foo', }, ], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - network_client.find_network.assert_called_once() - self.app.client_manager.network.find_network.assert_called_once() - def test_server_create_with_network_tag_pre_v243(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.42') + self.set_compute_api_version('2.42') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'net-id=net1,tag=foo', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'net-id=net1,tag=foo', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), - ('nics', [ - { - 'net-id': 'net1', 'port-id': '', - 'v4-fixed-ip': '', 'v6-fixed-ip': '', - 'tag': 'foo', - }, - ]), - ('server_name', self.new_server.name), + ('image', self.image.id), + ('flavor', self.flavor.id), + ( + 'nics', + [ + { + 'net-id': 'net1', + 'port-id': '', + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + 'tag': 'foo', + }, + ], + ), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.network_client.find_network.assert_not_called() + self.compute_client.create_server.assert_not_called() def _test_server_create_with_auto_network(self, arglist): # requires API microversion 2.37 or later - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.37') + self.set_compute_api_version('2.37') verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('nics', ['auto']), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.network_client.find_network.assert_not_called() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs - ) - - self.assertEqual(self.columns, columns) + networks='auto', + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], + ) + + self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) # NOTE(stephenfin): '--auto-network' is an alias for '--nic auto' so the @@ -1986,39 +1867,46 @@ def _test_server_create_with_auto_network(self, arglist): def test_server_create_with_auto_network_legacy(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'auto', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'auto', + self.server.name, ] self._test_server_create_with_auto_network(arglist) def test_server_create_with_auto_network(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', + '--image', + self.image.id, + '--flavor', + self.flavor.id, '--auto-network', - self.new_server.name, + self.server.name, ] self._test_server_create_with_auto_network(arglist) def test_server_create_with_auto_network_pre_v237(self): # use an API microversion that's too old - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.36') + self.set_compute_api_version('2.36') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'auto', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'auto', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('nics', ['auto']), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -2033,52 +1921,48 @@ def test_server_create_with_auto_network_pre_v237(self): 'allocation', str(exc), ) - self.assertNotCalled(self.servers_mock.create) + self.compute_client.create_server.assert_not_called() - def test_server_create_with_auto_network_default_v2_37(self): + def test_server_create_with_auto_network_default(self): """Tests creating a server without specifying --nic using 2.37.""" # requires API microversion 2.37 or later - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.37') + self.set_compute_api_version('2.37') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), + ('nics', []), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.network_client.find_network.assert_not_called() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks='auto', + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) @@ -2086,43 +1970,36 @@ def test_server_create_with_auto_network_default_v2_37(self): def _test_server_create_with_none_network(self, arglist): # requires API microversion 2.37 or later - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.37') + self.set_compute_api_version('2.37') verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('nics', ['none']), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.network_client.find_network.assert_not_called() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='none', - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks='none', + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) @@ -2133,43 +2010,49 @@ def _test_server_create_with_none_network(self, arglist): def test_server_create_with_none_network_legacy(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'none', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'none', + self.server.name, ] self._test_server_create_with_none_network(arglist) def test_server_create_with_none_network(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', + '--image', + self.image.id, + '--flavor', + self.flavor.id, '--no-network', - self.new_server.name, + self.server.name, ] self._test_server_create_with_none_network(arglist) def test_server_create_with_none_network_pre_v237(self): # use an API microversion that's too old - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.36') + self.set_compute_api_version('2.36') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'none', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'none', + self.server.name, ] - verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('nics', ['none']), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( exceptions.CommandError, self.cmd.take_action, @@ -2181,360 +2064,478 @@ def test_server_create_with_none_network_pre_v237(self): 'allocation', str(exc), ) - self.assertNotCalled(self.servers_mock.create) + self.compute_client.create_server.assert_not_called() - def test_server_create_with_conflict_network_options(self): + def test_server_create_with_conflicting_network_options(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'none', - '--nic', 'auto', - '--nic', 'port-id=port1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'none', + '--nic', + 'auto', + '--nic', + 'port-id=port1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), - ('nics', [ - 'none', - 'auto', - { - 'net-id': '', 'port-id': 'port1', - 'v4-fixed-ip': '', 'v6-fixed-ip': '', - }, - ]), + ('image', self.image.id), + ('flavor', self.flavor.id), + ( + 'nics', + [ + 'none', + 'auto', + { + 'net-id': '', + 'port-id': 'port1', + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + }, + ], + ), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - get_endpoints = mock.Mock() - get_endpoints.return_value = {'network': []} - self.app.client_manager.auth_ref = mock.Mock() - self.app.client_manager.auth_ref.service_catalog = mock.Mock() - self.app.client_manager.auth_ref.service_catalog.get_endpoints = ( - get_endpoints) - - find_port = mock.Mock() - network_client = self.app.client_manager.network - network_client.find_port = find_port - port_resource = mock.Mock(id='port1_uuid') - find_port.return_value = port_resource - - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) - self.assertNotCalled(self.servers_mock.create) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + 'Specifying a --nic of auto or none cannot be used with any ' + 'other --nic, --network or --port value.', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_invalid_network_options(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'abcdefgh', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'abcdefgh', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) - self.assertNotCalled(self.servers_mock.create) + self.cmd, + arglist, + [], + ) + self.assertIn( + 'Invalid argument abcdefgh; argument must be of form ', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_invalid_network_key(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'abcdefgh=12324', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'abcdefgh=12324', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) - self.assertNotCalled(self.servers_mock.create) + self.cmd, + arglist, + [], + ) + self.assertIn( + 'Invalid argument abcdefgh=12324; argument must be of form ', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_empty_network_key_value(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'net-id=', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'net-id=', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) - self.assertNotCalled(self.servers_mock.create) + self.cmd, + arglist, + [], + ) + self.assertIn( + 'Invalid argument net-id=; argument must be of form ', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_only_network_key(self): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--nic', 'net-id', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--nic', + 'net-id', + self.server.name, + ] + exc = self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + self.assertIn( + 'Invalid argument net-id; argument must be of form ', + str(exc), + ) + self.compute_client.create_server.assert_not_called() + + def test_server_create_with_network_in_nova_network(self): + net_name = 'nova-net-net' + net_id = uuid.uuid4().hex + + arglist = [ + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--network', + net_name, + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + verifylist = [ + ('image', self.image.id), + ('flavor', self.flavor.id), + ( + 'nics', + [ + { + 'net-id': net_name, + 'port-id': '', + 'v4-fixed-ip': '', + 'v6-fixed-ip': '', + }, + ], + ), + ('config_drive', False), + ('server_name', self.server.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch.object( + self.app.client_manager, + 'is_network_endpoint_enabled', + return_value=False, + ): + with mock.patch.object( + compute_v2, + 'find_network', + return_value={'id': net_id, 'name': net_name}, + ) as mock_find: + columns, data = self.cmd.take_action(parsed_args) + + mock_find.assert_called_once_with(self.compute_client, net_name) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + networks=[ + { + 'uuid': net_id, + }, + ], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist(), data) + + def test_server_create_with_conflicting_net_port_filters(self): + arglist = [ + '--image', + 'image1', + '--flavor', + 'flavor1', + '--nic', + 'net-id=abc,port-id=xyz', + self.server.name, + ] + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.cmd, + arglist, + [], + ) + self.assertIn("either 'network' or 'port'", str(exc)) + self.compute_client.create_server.assert_not_called() - self.assertNotCalled(self.servers_mock.create) + def test_server_create_with_conflicting_fixed_ip_filters(self): + arglist = [ + '--image', + 'image1', + '--flavor', + 'flavor1', + '--nic', + 'net-id=abc,v4-fixed-ip=1.2.3.4,v6-fixed-ip=2001:db8:abcd', + self.server.name, + ] + exc = self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + self.assertIn("either 'v4-fixed-ip' or 'v6-fixed-ip'", str(exc)) + self.compute_client.create_server.assert_not_called() @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_server_create_with_wait_ok(self, mock_wait_for_status): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', + '--image', + self.image.id, + '--flavor', + self.flavor.id, '--wait', - self.new_server.name, + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('config_drive', False), ('wait', True), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, - self.new_server.id, - callback=mock.ANY, - ) - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks=[], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], + ) + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + callback=mock.ANY, ) + self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist(), data) + self.assertTupleEqual(self.datalist(), data) @mock.patch.object(common_utils, 'wait_for_status', return_value=False) def test_server_create_with_wait_fails(self, mock_wait_for_status): arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', + '--image', + self.image.id, + '--flavor', + self.flavor.id, '--wait', - self.new_server.name, + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('config_drive', False), ('wait', True), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(SystemExit, self.cmd.take_action, parsed_args) - - mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, - self.new_server.id, - callback=mock.ANY, + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args ) - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks=[], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], + ) + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + callback=mock.ANY, ) - @mock.patch('openstackclient.compute.v2.server.io.open') - def test_server_create_userdata(self, mock_open): - mock_file = mock.Mock(name='File') - mock_open.return_value = mock_file - mock_open.read.return_value = '#!/bin/sh' - + def test_server_create_userdata(self): + user_data = b'#!/bin/sh' arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--user-data', 'userdata.sh', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--user-data', + 'userdata.sh', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('user_data', 'userdata.sh'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # Ensure the userdata file is opened - mock_open.assert_called_with('userdata.sh') - - # Ensure the userdata file is closed - mock_file.close.assert_called_with() + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch( + 'openstackclient.compute.v2.server.open', + mock.mock_open(read_data=user_data), + ) as mock_file: + columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + mock_file.assert_called_with('userdata.sh', 'rb') + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=mock_file, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks=[], + user_data=base64.b64encode(user_data).decode('utf-8'), + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_volume(self): + self.volume_client.volumes.get.return_value = self.volume + arglist = [ - '--flavor', self.flavor.id, - '--volume', self.volume.name, - self.new_server.name, + '--flavor', + self.flavor.id, + '--volume', + self.volume.name, + self.server.name, ] verifylist = [ ('flavor', self.flavor.id), ('volume', self.volume.name), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [{ - 'uuid': self.volume.id, - 'boot_index': 0, - 'source_type': 'volume', - 'destination_type': 'volume', - }], - 'nics': [], - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - None, - self.flavor, - **kwargs + self.volume_client.volumes.get.assert_called_once_with( + self.volume.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id='', + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ + { + 'uuid': self.volume.id, + 'boot_index': 0, + 'source_type': 'volume', + 'destination_type': 'volume', + } + ], + networks=[], ) - self.volumes_mock.get.assert_called_once_with( - self.volume.name) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_snapshot(self): + self.volume_client.volume_snapshots.get.return_value = self.snapshot + arglist = [ - '--flavor', self.flavor.id, - '--snapshot', self.snapshot.name, - self.new_server.name, + '--flavor', + self.flavor.id, + '--snapshot', + self.snapshot.name, + self.server.name, ] verifylist = [ ('flavor', self.flavor.id), ('snapshot', self.snapshot.name), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [{ - 'uuid': self.snapshot.id, - 'boot_index': 0, - 'source_type': 'snapshot', - 'destination_type': 'volume', - 'delete_on_termination': False, - }], - 'nics': [], - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - None, - self.flavor, - **kwargs + self.volume_client.volume_snapshots.get.assert_called_once_with( + self.snapshot.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id='', + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ + { + 'uuid': self.snapshot.id, + 'boot_index': 0, + 'source_type': 'snapshot', + 'destination_type': 'volume', + 'delete_on_termination': False, + } + ], + networks=[], ) - self.snapshots_mock.get.assert_called_once_with( - self.snapshot.name) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) @@ -2542,66 +2543,57 @@ def test_server_create_with_snapshot(self): def test_server_create_with_block_device(self): block_device = f'uuid={self.volume.id},source_type=volume,boot_index=0' arglist = [ - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] verifylist = [ ('image', None), ('flavor', self.flavor.id), - ('block_devices', [ - { - 'uuid': self.volume.id, - 'source_type': 'volume', - 'boot_index': '0', - }, - ]), - ('server_name', self.new_server.name), + ( + 'block_devices', + [ + { + 'uuid': self.volume.id, + 'source_type': 'volume', + 'boot_index': '0', + }, + ], + ), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [ + # we don't do any validation of IDs when using the legacy option + self.volume_client.volumes.get.assert_not_called() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id='', + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ { 'uuid': self.volume.id, + 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', - 'boot_index': 0, - }, + } ], - 'nics': [], - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - None, - self.flavor, - **kwargs + networks=[], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_full(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.67') + self.set_compute_api_version('2.67') + self.volume_alt = volume_fakes.create_one_volume() block_device = ( f'uuid={self.volume.id},source_type=volume,' f'destination_type=volume,disk_bus=ide,device_type=disk,' @@ -2612,55 +2604,64 @@ def test_server_create_with_block_device_full(self): block_device_alt = f'uuid={self.volume_alt.id},source_type=volume' arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - '--block-device', block_device_alt, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + '--block-device', + block_device_alt, + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_devices', [ - { - 'uuid': self.volume.id, - 'source_type': 'volume', - 'destination_type': 'volume', - 'disk_bus': 'ide', - 'device_type': 'disk', - 'device_name': 'sdb', - 'guest_format': 'ext4', - 'volume_size': '64', - 'volume_type': 'foo', - 'boot_index': '1', - 'delete_on_termination': 'true', - 'tag': 'foo', - }, - { - 'uuid': self.volume_alt.id, - 'source_type': 'volume', - }, - ]), - ('server_name', self.new_server.name), + ( + 'block_devices', + [ + { + 'uuid': self.volume.id, + 'source_type': 'volume', + 'destination_type': 'volume', + 'disk_bus': 'ide', + 'device_type': 'disk', + 'device_name': 'sdb', + 'guest_format': 'ext4', + 'volume_size': '64', + 'volume_type': 'foo', + 'boot_index': '1', + 'delete_on_termination': 'true', + 'tag': 'foo', + }, + { + 'uuid': self.volume_alt.id, + 'source_type': 'volume', + }, + ], + ), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [ + # we don't do any validation of IDs when using the legacy option + self.volume_client.volumes.get.assert_not_called() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, { 'uuid': self.volume.id, 'source_type': 'volume', @@ -2681,24 +2682,14 @@ def test_server_create_with_block_device_full(self): 'destination_type': 'volume', }, ], - 'nics': 'auto', - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks='auto', ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_from_file(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.67') + self.set_compute_api_version('2.67') block_device = { 'uuid': self.volume.id, @@ -2720,517 +2711,566 @@ def test_server_create_with_block_device_from_file(self): fp.flush() arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', fp.name, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + fp.name, + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), ('block_devices', [block_device]), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # CreateServer.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [{ - 'uuid': self.volume.id, - 'source_type': 'volume', - 'destination_type': 'volume', - 'disk_bus': 'ide', - 'device_name': 'sdb', - 'volume_size': 64, - 'guest_format': 'ext4', - 'boot_index': 1, - 'device_type': 'disk', - 'delete_on_termination': True, - 'tag': 'foo', - 'volume_type': 'foo', - }], - 'nics': 'auto', - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + # we don't do any validation of IDs when using the legacy option + self.volume_client.volumes.get.assert_not_called() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'uuid': self.volume.id, + 'source_type': 'volume', + 'destination_type': 'volume', + 'disk_bus': 'ide', + 'device_name': 'sdb', + 'volume_size': 64, + 'guest_format': 'ext4', + 'boot_index': 1, + 'device_type': 'disk', + 'delete_on_termination': True, + 'tag': 'foo', + 'volume_type': 'foo', + }, + ], + networks='auto', ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_invalid_boot_index(self): - block_device = \ + block_device = ( f'uuid={self.volume.name},source_type=volume,boot_index=foo' + ) arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] parsed_args = self.check_parser(self.cmd, arglist, []) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn('The boot_index key of --block-device ', str(ex)) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_invalid_source_type(self): block_device = f'uuid={self.volume.name},source_type=foo' arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] parsed_args = self.check_parser(self.cmd, arglist, []) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn('The source_type key of --block-device ', str(ex)) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_invalid_destination_type(self): - block_device = \ - f'uuid={self.volume.name},destination_type=foo' - arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + block_device = f'uuid={self.volume.name},destination_type=foo' + arglist = [ + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] parsed_args = self.check_parser(self.cmd, arglist, []) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn('The destination_type key of --block-device ', str(ex)) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_invalid_shutdown(self): - block_device = \ - f'uuid={self.volume.name},delete_on_termination=foo' - arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + block_device = f'uuid={self.volume.name},delete_on_termination=foo' + arglist = [ + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] parsed_args = self.check_parser(self.cmd, arglist, []) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - 'The delete_on_termination key of --block-device ', str(ex)) + 'The delete_on_termination key of --block-device ', str(ex) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_tag_pre_v242(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.41') + self.set_compute_api_version('2.41') - block_device = \ - f'uuid={self.volume.name},tag=foo' + block_device = f'uuid={self.volume.name},tag=foo' arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] parsed_args = self.check_parser(self.cmd, arglist, []) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.42 or greater is required', - str(ex)) + '--os-compute-api-version 2.42 or greater is required', str(ex) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_volume_type_pre_v267(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.66') + self.set_compute_api_version('2.66') block_device = f'uuid={self.volume.name},volume_type=foo' arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device', block_device, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device', + block_device, + self.server.name, ] parsed_args = self.check_parser(self.cmd, arglist, []) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.67 or greater is required', - str(ex)) + '--os-compute-api-version 2.67 or greater is required', str(ex) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_mapping(self): + self.volume_client.volumes.get.return_value = self.volume + arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', 'vda=' + self.volume.name + ':::false', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + 'vda=' + self.volume.name + ':::false', + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_device_mapping', [ - { - 'device_name': 'vda', - 'uuid': self.volume.name, - 'source_type': 'volume', - 'destination_type': 'volume', - 'delete_on_termination': 'false', - } - ]), + ( + 'block_device_mapping', + [ + { + 'device_name': 'vda', + 'uuid': self.volume.name, + 'source_type': 'volume', + 'destination_type': 'volume', + 'delete_on_termination': 'false', + } + ], + ), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.volume_client.volumes.get.assert_called_once_with( + self.volume.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[{ - 'device_name': 'vda', - 'uuid': self.volume.id, - 'destination_type': 'volume', - 'source_type': 'volume', - 'delete_on_termination': 'false', - }], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'device_name': 'vda', + 'uuid': self.volume.id, + 'destination_type': 'volume', + 'source_type': 'volume', + 'delete_on_termination': 'false', + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_mapping_min_input(self): + self.volume_client.volumes.get.return_value = self.volume + arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', 'vdf=' + self.volume.name, - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + 'vdf=' + self.volume.name, + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_device_mapping', [ - { - 'device_name': 'vdf', - 'uuid': self.volume.name, - 'source_type': 'volume', - 'destination_type': 'volume', - } - ]), + ( + 'block_device_mapping', + [ + { + 'device_name': 'vdf', + 'uuid': self.volume.name, + 'source_type': 'volume', + 'destination_type': 'volume', + } + ], + ), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.volume_client.volumes.get.assert_called_once_with( + self.volume.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[{ - 'device_name': 'vdf', - 'uuid': self.volume.id, - 'destination_type': 'volume', - 'source_type': 'volume', - }], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'device_name': 'vdf', + 'uuid': self.volume.id, + 'destination_type': 'volume', + 'source_type': 'volume', + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_mapping_default_input(self): + self.volume_client.volumes.get.return_value = self.volume + arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', 'vdf=' + self.volume.name + ':::', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + 'vdf=' + self.volume.name + ':::', + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_device_mapping', [ - { - 'device_name': 'vdf', - 'uuid': self.volume.name, - 'source_type': 'volume', - 'destination_type': 'volume', - } - ]), + ( + 'block_device_mapping', + [ + { + 'device_name': 'vdf', + 'uuid': self.volume.name, + 'source_type': 'volume', + 'destination_type': 'volume', + } + ], + ), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.volume_client.volumes.get.assert_called_once_with( + self.volume.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[{ - 'device_name': 'vdf', - 'uuid': self.volume.id, - 'destination_type': 'volume', - 'source_type': 'volume', - }], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'device_name': 'vdf', + 'uuid': self.volume.id, + 'destination_type': 'volume', + 'source_type': 'volume', + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_mapping_full_input(self): + self.volume_client.volumes.get.return_value = self.volume + arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, + '--image', + self.image.id, + '--flavor', + self.flavor.id, '--block-device-mapping', 'vde=' + self.volume.name + ':volume:3:true', - self.new_server.name, + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_device_mapping', [ - { - 'device_name': 'vde', - 'uuid': self.volume.name, - 'source_type': 'volume', - 'destination_type': 'volume', - 'volume_size': '3', - 'delete_on_termination': 'true', - } - ]), + ( + 'block_device_mapping', + [ + { + 'device_name': 'vde', + 'uuid': self.volume.name, + 'source_type': 'volume', + 'destination_type': 'volume', + 'volume_size': '3', + 'delete_on_termination': 'true', + } + ], + ), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.volume_client.volumes.get.assert_called_once_with( + self.volume.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[{ - 'device_name': 'vde', - 'uuid': self.volume.id, - 'destination_type': 'volume', - 'source_type': 'volume', - 'delete_on_termination': 'true', - 'volume_size': '3' - }], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'device_name': 'vde', + 'uuid': self.volume.id, + 'destination_type': 'volume', + 'source_type': 'volume', + 'delete_on_termination': 'true', + 'volume_size': '3', + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_mapping_snapshot(self): + self.snapshot = volume_fakes.create_one_snapshot() + self.volume_client.volume_snapshots.get.return_value = self.snapshot + arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, + '--image', + self.image.id, + '--flavor', + self.flavor.id, '--block-device-mapping', - 'vds=' + self.volume.name + ':snapshot:5:true', - self.new_server.name, + 'vds=' + self.snapshot.name + ':snapshot:5:true', + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_device_mapping', [ - { - 'device_name': 'vds', - 'uuid': self.volume.name, - 'source_type': 'snapshot', - 'volume_size': '5', - 'destination_type': 'volume', - 'delete_on_termination': 'true', - } - ]), + ( + 'block_device_mapping', + [ + { + 'device_name': 'vds', + 'uuid': self.snapshot.name, + 'source_type': 'snapshot', + 'volume_size': '5', + 'destination_type': 'volume', + 'delete_on_termination': 'true', + } + ], + ), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.volume_client.volume_snapshots.get.assert_called_once_with( + self.snapshot.name + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[{ - 'device_name': 'vds', - 'uuid': self.snapshot.id, - 'destination_type': 'volume', - 'source_type': 'snapshot', - 'delete_on_termination': 'true', - 'volume_size': '5' - }], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'device_name': 'vds', + 'uuid': self.snapshot.id, + 'destination_type': 'volume', + 'source_type': 'snapshot', + 'delete_on_termination': 'true', + 'volume_size': '5', + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_with_block_device_mapping_multiple(self): + self.volume_client.volumes.get.return_value = self.volume + arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', 'vdb=' + self.volume.name + ':::false', - '--block-device-mapping', 'vdc=' + self.volume.name + ':::true', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + 'vdb=' + self.volume.name + ':::false', + '--block-device-mapping', + 'vdc=' + self.volume.name + ':::true', + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), - ('block_device_mapping', [ + ( + 'block_device_mapping', + [ + { + 'device_name': 'vdb', + 'uuid': self.volume.name, + 'source_type': 'volume', + 'destination_type': 'volume', + 'delete_on_termination': 'false', + }, + { + 'device_name': 'vdc', + 'uuid': self.volume.name, + 'source_type': 'volume', + 'destination_type': 'volume', + 'delete_on_termination': 'true', + }, + ], + ), + ('config_drive', False), + ('server_name', self.server.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.volume_client.volumes.get.assert_has_calls( + [mock.call(self.volume.name)] * 2 + ) + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ { - 'device_name': 'vdb', - 'uuid': self.volume.name, - 'source_type': 'volume', - 'destination_type': 'volume', - 'delete_on_termination': 'false', - }, - { - 'device_name': 'vdc', - 'uuid': self.volume.name, - 'source_type': 'volume', - 'destination_type': 'volume', - 'delete_on_termination': 'true', + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, }, - ]), - ('config_drive', False), - ('server_name', self.new_server.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # CreateServer.take_action() returns two tuples - columns, data = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, - min_count=1, - max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[ { 'device_name': 'vdb', 'uuid': self.volume.id, @@ -3244,18 +3284,9 @@ def test_server_create_with_block_device_mapping_multiple(self): 'destination_type': 'volume', 'source_type': 'volume', 'delete_on_termination': 'true', - } + }, ], - nics=[], - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + networks=[], ) self.assertEqual(self.columns, columns) @@ -3264,39 +3295,69 @@ def test_server_create_with_block_device_mapping_multiple(self): def test_server_create_with_block_device_mapping_invalid_format(self): # block device mapping don't contain equal sign "=" arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', 'not_contain_equal_sign', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + 'not_contain_equal_sign', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.cmd, + arglist, + [], + ) + self.assertIn( + 'argument --block-device-mapping: Invalid argument ', str(exc) + ) + self.compute_client.create_server.assert_not_called() # block device mapping don't contain device name "=uuid:::true" arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', '=uuid:::true', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + '=uuid:::true', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.cmd, + arglist, + [], + ) + self.assertIn( + 'argument --block-device-mapping: Invalid argument ', str(exc) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_block_device_mapping_no_uuid(self): arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--block-device-mapping', 'vdb=', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--block-device-mapping', + 'vdb=', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.cmd, + arglist, + [], + ) + self.assertIn( + 'argument --block-device-mapping: Invalid argument ', str(exc) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_volume_boot_from_volume_conflict(self): # Tests that specifying --volume and --boot-from-volume results in @@ -3305,214 +3366,236 @@ def test_server_create_volume_boot_from_volume_conflict(self): # only specify --volume and --boot-from-volume for this test since # the validation is not handled with argparse. arglist = [ - '--flavor', self.flavor.id, - '--volume', 'volume1', - '--boot-from-volume', '1', - self.new_server.name, + '--flavor', + self.flavor.id, + '--volume', + 'volume1', + '--boot-from-volume', + '1', + self.server.name, ] verifylist = [ ('flavor', self.flavor.id), ('volume', 'volume1'), ('boot_from_volume', 1), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) # Assert it is the error we expect. - self.assertIn('--volume is not allowed with --boot-from-volume', - str(ex)) + self.assertIn( + '--volume is not allowed with --boot-from-volume', str(ex) + ) + self.compute_client.create_server.assert_not_called() + + def test_server_create_boot_from_volume_no_image(self): + # Test --boot-from-volume option without --image or + # --image-property. + arglist = [ + '--flavor', + self.flavor.id, + '--boot-from-volume', + '1', + self.server.name, + ] + verifylist = [ + ('flavor', self.flavor.id), + ('boot_from_volume', 1), + ('config_drive', False), + ('server_name', self.server.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + 'An image (--image or --image-property) is required ' + 'to support --boot-from-volume option', + str(ex), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_image_property(self): + image = image_fakes.create_one_image({'hypervisor_type': 'qemu'}) + self.image_client.images.return_value = [image] + arglist = [ - '--image-property', 'hypervisor_type=qemu', - '--flavor', 'flavor1', - self.new_server.name, + '--image-property', + 'hypervisor_type=qemu', + '--flavor', + self.flavor.id, + self.server.name, ] verifylist = [ ('image_properties', {'hypervisor_type': 'qemu'}), - ('flavor', 'flavor1'), + ('flavor', self.flavor.id), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - # create a image_info as the side_effect of the fake image_list() - image_info = { - 'hypervisor_type': 'qemu', - } - - _image = image_fakes.create_one_image(image_info) - self.images_mock.return_value = [_image] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - files={}, - reservation_id=None, + self.image_client.images.assert_called_once_with() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - meta=None, - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - _image, - self.flavor, - **kwargs + networks=[], + block_device_mapping=[ + { + 'uuid': image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_image_property_multi(self): + image = image_fakes.create_one_image( + {'hypervisor_type': 'qemu', 'hw_disk_bus': 'ide'} + ) + self.image_client.images.return_value = [image] + arglist = [ - '--image-property', 'hypervisor_type=qemu', - '--image-property', 'hw_disk_bus=ide', - '--flavor', 'flavor1', - self.new_server.name, + '--image-property', + 'hypervisor_type=qemu', + '--image-property', + 'hw_disk_bus=ide', + '--flavor', + self.flavor.id, + self.server.name, ] verifylist = [ - ('image_properties', {'hypervisor_type': 'qemu', - 'hw_disk_bus': 'ide'}), - ('flavor', 'flavor1'), + ( + 'image_properties', + {'hypervisor_type': 'qemu', 'hw_disk_bus': 'ide'}, + ), + ('flavor', self.flavor.id), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - # create a image_info as the side_effect of the fake image_list() - image_info = { - 'hypervisor_type': 'qemu', - 'hw_disk_bus': 'ide', - } - _image = image_fakes.create_one_image(image_info) - self.images_mock.return_value = [_image] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - files={}, - reservation_id=None, + self.image_client.images.assert_called_once_with() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - meta=None, - scheduler_hints={}, - config_drive=None, - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - _image, - self.flavor, - **kwargs + networks=[], + block_device_mapping=[ + { + 'uuid': image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) def test_server_create_image_property_missed(self): + image = image_fakes.create_one_image( + {'hypervisor_type': 'qemu', 'hw_disk_bus': 'ide'} + ) + self.image_client.images.return_value = [image] + arglist = [ - '--image-property', 'hypervisor_type=qemu', - '--image-property', 'hw_disk_bus=virtio', - '--flavor', 'flavor1', - self.new_server.name, + '--image-property', + 'hypervisor_type=qemu', + # note the mismatch in the 'hw_disk_bus' property + '--image-property', + 'hw_disk_bus=virtio', + '--flavor', + self.flavor.id, + self.server.name, ] verifylist = [ - ('image_properties', {'hypervisor_type': 'qemu', - 'hw_disk_bus': 'virtio'}), - ('flavor', 'flavor1'), + ( + 'image_properties', + {'hypervisor_type': 'qemu', 'hw_disk_bus': 'virtio'}, + ), + ('flavor', self.flavor.id), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - # create a image_info as the side_effect of the fake image_list() - image_info = { - 'hypervisor_type': 'qemu', - 'hw_disk_bus': 'ide', - } - - _image = image_fakes.create_one_image(image_info) - self.images_mock.return_value = [_image] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + 'No images match the property expected by --image-property', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_image_property_with_image_list(self): + target_image = image_fakes.create_one_image( + { + 'properties': { + 'owner_specified.openstack.object': 'image/cirros' + } + } + ) + another_image = image_fakes.create_one_image() + self.image_client.images.return_value = [target_image, another_image] + arglist = [ '--image-property', 'owner_specified.openstack.object=image/cirros', - '--flavor', 'flavor1', - self.new_server.name, + '--flavor', + self.flavor.id, + self.server.name, ] - verifylist = [ - ('image_properties', - {'owner_specified.openstack.object': 'image/cirros'}), - ('flavor', 'flavor1'), - ('server_name', self.new_server.name), + ( + 'image_properties', + {'owner_specified.openstack.object': 'image/cirros'}, + ), + ('flavor', self.flavor.id), + ('server_name', self.server.name), ] - # create a image_info as the side_effect of the fake image_list() - image_info = { - 'properties': { - 'owner_specified.openstack.object': 'image/cirros' - } - } - - target_image = image_fakes.create_one_image(image_info) - another_image = image_fakes.create_one_image({}) - self.images_mock.return_value = [target_image, another_image] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - files={}, - reservation_id=None, + self.image_client.images.assert_called_once_with() + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=target_image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics=[], - meta=None, - scheduler_hints={}, - config_drive=None, - ) - - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - target_image, - self.flavor, - **kwargs + networks=[], + block_device_mapping=[ + { + 'uuid': target_image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) @@ -3521,21 +3604,26 @@ def test_server_create_image_property_with_image_list(self): def test_server_create_no_boot_device(self): block_device = f'uuid={self.volume.id},source_type=volume,boot_index=1' arglist = [ - '--block-device', block_device, - '--flavor', self.flavor.id, - self.new_server.name, + '--block-device', + block_device, + '--flavor', + self.flavor.id, + self.server.name, ] verifylist = [ ('image', None), ('flavor', self.flavor.id), - ('block_devices', [ - { - 'uuid': self.volume.id, - 'source_type': 'volume', - 'boot_index': '1', - }, - ]), - ('server_name', self.new_server.name), + ( + 'block_devices', + [ + { + 'uuid': self.volume.id, + 'source_type': 'volume', + 'boot_index': '1', + }, + ], + ), + ('server_name', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( @@ -3548,55 +3636,52 @@ def test_server_create_no_boot_device(self): '(--volume, --snapshot, --block-device) is required', str(exc), ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_swap(self): arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--swap', '1024', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--swap', + '1024', + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), ('swap', 1024), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [{ - 'boot_index': -1, - 'source_type': 'blank', - 'destination_type': 'local', - 'guest_format': 'swap', - 'volume_size': 1024, - 'delete_on_termination': True, - }], - 'nics': [], - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'boot_index': -1, + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': 'swap', + 'volume_size': 1024, + 'delete_on_termination': True, + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) @@ -3604,52 +3689,48 @@ def test_server_create_with_swap(self): def test_server_create_with_ephemeral(self): arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--ephemeral', 'size=1024,format=ext4', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--ephemeral', + 'size=1024,format=ext4', + self.server.name, ] verifylist = [ - ('image', 'image1'), + ('image', self.image.id), ('flavor', self.flavor.id), ('ephemerals', [{'size': '1024', 'format': 'ext4'}]), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # CreateServer.take_action() returns two tuples + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'admin_pass': None, - 'block_device_mapping_v2': [{ - 'boot_index': -1, - 'source_type': 'blank', - 'destination_type': 'local', - 'guest_format': 'ext4', - 'volume_size': '1024', - 'delete_on_termination': True, - }], - 'nics': [], - 'scheduler_hints': {}, - 'config_drive': None, - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + { + 'boot_index': -1, + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': 'ext4', + 'volume_size': '1024', + 'delete_on_termination': True, + }, + ], + networks=[], ) self.assertEqual(self.columns, columns) @@ -3657,808 +3738,844 @@ def test_server_create_with_ephemeral(self): def test_server_create_with_ephemeral_missing_key(self): arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--ephemeral', 'format=ext3', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--ephemeral', + 'format=ext3', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.cmd, + arglist, + [], + ) + self.assertIn('Argument parse failed', str(exc)) + self.compute_client.create_server.assert_not_called() def test_server_create_with_ephemeral_invalid_key(self): arglist = [ - '--image', 'image1', - '--flavor', self.flavor.id, - '--ephemeral', 'size=1024,foo=bar', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--ephemeral', + 'size=1024,foo=bar', + self.server.name, ] - self.assertRaises( - argparse.ArgumentTypeError, + exc = self.assertRaises( + test_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.cmd, + arglist, + [], + ) + self.assertIn('Argument parse failed', str(exc)) + self.compute_client.create_server.assert_not_called() def test_server_create_invalid_hint(self): # Not a key-value pair arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--hint', 'a0cf03a5-d921-4877-bb5c-86d26cf818e1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--hint', + 'a0cf03a5-d921-4877-bb5c-86d26cf818e1', + self.server.name, ] - self.assertRaises(argparse.ArgumentTypeError, - self.check_parser, - self.cmd, arglist, []) + exc = self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + self.assertIn('Argument parse failed', str(exc)) + self.compute_client.create_server.assert_not_called() # Empty key arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--hint', '=a0cf03a5-d921-4877-bb5c-86d26cf818e1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--hint', + '=a0cf03a5-d921-4877-bb5c-86d26cf818e1', + self.server.name, ] - self.assertRaises(argparse.ArgumentTypeError, - self.check_parser, - self.cmd, arglist, []) - - def test_server_create_with_description_api_newer(self): + exc = self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + self.assertIn('Argument parse failed', str(exc)) + self.compute_client.create_server.assert_not_called() + def test_server_create_with_description(self): # Description is supported for nova api version 2.19 or above - self.app.client_manager.compute.api_version = 2.19 + self.set_compute_api_version('2.19') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--description', 'description1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--description', + 'description1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('description', 'description1'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.19): - # In base command class ShowOne in cliff, abstract method - # take_action() returns a two-part tuple with a tuple of - # column names and a tuple of data to be shown. - columns, data = self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, + networks=[], description='description1', - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) - - def test_server_create_with_description_api_older(self): + def test_server_create_with_description_pre_v219(self): # Description is not supported for nova api version below 2.19 - self.app.client_manager.compute.api_version = 2.18 + self.set_compute_api_version('2.18') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--description', 'description1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--description', + 'description1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('description', 'description1'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.19): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_tag(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.52') + self.set_compute_api_version('2.52') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--tag', 'tag1', - '--tag', 'tag2', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--tag', + 'tag1', + '--tag', + 'tag2', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('tags', ['tag1', 'tag2']), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'meta': None, - 'files': {}, - 'reservation_id': None, - 'min_count': 1, - 'max_count': 1, - 'security_groups': [], - 'userdata': None, - 'key_name': None, - 'availability_zone': None, - 'block_device_mapping_v2': [], - 'admin_pass': None, - 'nics': 'auto', - 'scheduler_hints': {}, - 'config_drive': None, - 'tags': ['tag1', 'tag2'], - } - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, + min_count=1, + max_count=1, + networks='auto', + tags=['tag1', 'tag2'], + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) def test_server_create_with_tag_pre_v252(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.51') + self.set_compute_api_version('2.51') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--tag', 'tag1', - '--tag', 'tag2', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--tag', + 'tag1', + '--tag', + 'tag2', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('tags', ['tag1', 'tag2']), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.52 or greater is required', - str(ex)) - - def test_server_create_with_host_v274(self): + '--os-compute-api-version 2.52 or greater is required', str(exc) + ) + self.compute_client.create_server.assert_not_called() + def test_server_create_with_host(self): # Explicit host is supported for nova api version 2.74 or above - self.app.client_manager.compute.api_version = 2.74 + self.set_compute_api_version('2.74') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--host', 'host1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--host', + 'host1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('host', 'host1'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.74): - # In base command class ShowOne in cliff, abstract method - # take_action() returns a two-part tuple with a tuple of - # column names and a tuple of data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, + networks='auto', host='host1', + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs - ) - self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) def test_server_create_with_host_pre_v274(self): - # Host is not supported for nova api version below 2.74 - self.app.client_manager.compute.api_version = 2.73 + self.set_compute_api_version('2.73') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--host', 'host1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--host', + 'host1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('host', 'host1'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.74): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) - def test_server_create_with_hypervisor_hostname_v274(self): + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.74 or greater is required', str(exc) + ) + self.compute_client.create_server.assert_not_called() + def test_server_create_with_hypervisor_hostname(self): # Explicit hypervisor_hostname is supported for nova api version # 2.74 or above - self.app.client_manager.compute.api_version = 2.74 + self.set_compute_api_version('2.74') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--hypervisor-hostname', 'node1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--hypervisor-hostname', + 'node1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('hypervisor_hostname', 'node1'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.74): - # In base command class ShowOne in cliff, abstract method - # take_action() returns a two-part tuple with a tuple of - # column names and a tuple of data to be shown. - columns, data = self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, + networks='auto', hypervisor_hostname='node1', + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs - ) - self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) def test_server_create_with_hypervisor_hostname_pre_v274(self): - # Hypervisor_hostname is not supported for nova api version below 2.74 - self.app.client_manager.compute.api_version = 2.73 + self.set_compute_api_version('2.73') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--hypervisor-hostname', 'node1', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--hypervisor-hostname', + 'node1', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('hypervisor_hostname', 'node1'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.74): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) - - def test_server_create_with_host_and_hypervisor_hostname_v274(self): - - # Explicit host and hypervisor_hostname is supported for nova api - # version 2.74 or above - self.app.client_manager.compute.api_version = 2.74 - arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--host', 'host1', - '--hypervisor-hostname', 'node1', - self.new_server.name, - ] - verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), - ('host', 'host1'), - ('hypervisor_hostname', 'node1'), - ('config_drive', False), - ('server_name', self.new_server.name), - ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.74): - # In base command class ShowOne in cliff, abstract method - # take_action() returns a two-part tuple with a tuple of - # column names and a tuple of data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, - min_count=1, - max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, - host='host1', - hypervisor_hostname='node1', + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + self.assertIn( + '--os-compute-api-version 2.74 or greater is required', str(exc) ) + self.compute_client.create_server.assert_not_called() - self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) - - def test_server_create_with_hostname_v290(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.90') + def test_server_create_with_hostname(self): + self.set_compute_api_version('2.90') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--hostname', 'hostname', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--hostname', + 'hostname', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('hostname', 'hostname'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, + networks='auto', hostname='hostname', + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) - self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) def test_server_create_with_hostname_pre_v290(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.89') + self.set_compute_api_version('2.89') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--hostname', 'hostname', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--hostname', + 'hostname', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('hostname', 'hostname'), ('config_drive', False), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises( - exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.90 or greater is required', str(exc) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_trusted_image_cert(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.63') + self.set_compute_api_version('2.63') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('config_drive', False), ('trusted_image_certs', ['foo', 'bar']), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = dict( - meta=None, - files={}, - reservation_id=None, + self.compute_client.create_server.assert_called_once_with( + name=self.server.name, + image_id=self.image.id, + flavor_id=self.flavor.id, min_count=1, max_count=1, - security_groups=[], - userdata=None, - key_name=None, - availability_zone=None, - admin_pass=None, - block_device_mapping_v2=[], - nics='auto', - scheduler_hints={}, - config_drive=None, + networks='auto', trusted_image_certificates=['foo', 'bar'], - ) - # ServerManager.create(name, image, flavor, **kwargs) - self.servers_mock.create.assert_called_with( - self.new_server.name, - self.image, - self.flavor, - **kwargs + block_device_mapping=[ + { + 'uuid': self.image.id, + 'boot_index': 0, + 'source_type': 'image', + 'destination_type': 'local', + 'delete_on_termination': True, + }, + ], ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist(), data) - self.assertFalse(self.images_mock.called) - self.assertFalse(self.flavors_mock.called) - def test_server_create_with_trusted_image_cert_prev263(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.62') + def test_server_create_with_trusted_image_cert_pre_v263(self): + self.set_compute_api_version('2.62') arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', - self.new_server.name, + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('config_drive', False), ('trusted_image_certs', ['foo', 'bar']), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.63 or greater is required', str(exc) + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_trusted_image_cert_from_volume(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.63') + self.set_compute_api_version('2.63') + arglist = [ - '--volume', 'volume1', - '--flavor', 'flavor1', - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', - self.new_server.name, + '--volume', + 'volume1', + '--flavor', + self.flavor.id, + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', + self.server.name, ] verifylist = [ ('volume', 'volume1'), - ('flavor', 'flavor1'), + ('flavor', self.flavor.id), ('config_drive', False), ('trusted_image_certs', ['foo', 'bar']), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--trusted-image-cert option is only supported for servers booted ' + 'directly from images', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_trusted_image_cert_from_snapshot(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.63') + self.set_compute_api_version('2.63') + arglist = [ - '--snapshot', 'snapshot1', - '--flavor', 'flavor1', - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', - self.new_server.name, + '--snapshot', + 'snapshot1', + '--flavor', + self.flavor.id, + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', + self.server.name, ] verifylist = [ ('snapshot', 'snapshot1'), - ('flavor', 'flavor1'), + ('flavor', self.flavor.id), ('config_drive', False), ('trusted_image_certs', ['foo', 'bar']), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--trusted-image-cert option is only supported for servers booted ' + 'directly from images', + str(exc), + ) + self.compute_client.create_server.assert_not_called() def test_server_create_with_trusted_image_cert_boot_from_volume(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.63') - arglist = [ - '--image', 'image1', - '--flavor', 'flavor1', - '--boot-from-volume', '1', - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', - self.new_server.name, + self.set_compute_api_version('2.63') + + arglist = [ + '--image', + self.image.id, + '--flavor', + self.flavor.id, + '--boot-from-volume', + '1', + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', + self.server.name, ] verifylist = [ - ('image', 'image1'), - ('flavor', 'flavor1'), + ('image', self.image.id), + ('flavor', self.flavor.id), ('boot_from_volume', 1), ('config_drive', False), ('trusted_image_certs', ['foo', 'bar']), - ('server_name', self.new_server.name), + ('server_name', self.server.name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--trusted-image-cert option is only supported for servers booted ' + 'directly from images', + str(exc), + ) + self.compute_client.create_server.assert_not_called() -class TestServerDelete(TestServer): +class TestServerDelete(compute_fakes.TestComputev2): def setUp(self): - super(TestServerDelete, self).setUp() + super().setUp() - self.servers_mock.delete.return_value = None - self.servers_mock.force_delete.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.delete_server.return_value = None # Get the command object to test self.cmd = server.DeleteServer(self.app, None) def test_server_delete_no_options(self): - servers = self.setup_servers_mock(count=1) - arglist = [ - servers[0].id, + self.server.id, ] verifylist = [ - ('server', [servers[0].id]), + ('server', [self.server.id]), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.delete.assert_called_with(servers[0].id) - self.servers_mock.force_delete.assert_not_called() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False, all_projects=False + ) + self.compute_client.delete_server.assert_called_once_with( + self.server, force=False + ) self.assertIsNone(result) def test_server_delete_with_force(self): - servers = self.setup_servers_mock(count=1) - arglist = [ - servers[0].id, + self.server.id, '--force', ] verifylist = [ - ('server', [servers[0].id]), + ('server', [self.server.id]), ('force', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.servers_mock.force_delete.assert_called_with(servers[0].id) - self.servers_mock.delete.assert_not_called() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False, all_projects=False + ) + self.compute_client.delete_server.assert_called_once_with( + self.server, force=True + ) self.assertIsNone(result) def test_server_delete_multi_servers(self): - servers = self.setup_servers_mock(count=3) + servers = compute_fakes.create_servers(count=3) + self.compute_client.find_server.return_value = None + self.compute_client.find_server.side_effect = servers arglist = [] verifylist = [] - for s in servers: arglist.append(s.id) verifylist = [ ('server', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [] - for s in servers: - calls.append(call(s.id)) - self.servers_mock.delete.assert_has_calls(calls) + self.compute_client.find_server.assert_has_calls( + [ + mock.call(s.id, ignore_missing=False, all_projects=False) + for s in servers + ] + ) + self.compute_client.delete_server.assert_has_calls( + [mock.call(s, force=False) for s in servers] + ) self.assertIsNone(result) - @mock.patch.object(common_utils, 'find_resource') - def test_server_delete_with_all_projects(self, mock_find_resource): - servers = self.setup_servers_mock(count=1) - mock_find_resource.side_effect = compute_fakes.FakeServer.get_servers( - servers, 0, + def test_server_delete_multi_servers_with_exceptions(self): + servers = compute_fakes.create_servers(count=2) + self.compute_client.find_server.side_effect = [ + servers[0], + sdk_exceptions.ResourceNotFound(), + servers[1], + ] + + arglist = [servers[0].id, 'unexist_server', servers[1].id] + + verifylist = [ + ('force', False), + ('all_projects', False), + ('wait', False), + ( + 'server', + [servers[0].id, 'unexist_server', servers[1].id], + ), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 3 servers failed to delete.', str(exc)) + + self.compute_client.find_server.assert_has_calls( + [ + mock.call( + servers[0].id, ignore_missing=False, all_projects=False + ), + mock.call( + 'unexist_server', ignore_missing=False, all_projects=False + ), + mock.call( + servers[1].id, ignore_missing=False, all_projects=False + ), + ] + ) + + self.compute_client.delete_server.assert_has_calls( + [ + mock.call(servers[0], force=False), + mock.call(servers[1], force=False), + ] ) + def test_server_delete_with_all_projects(self): arglist = [ - servers[0].id, + self.server.id, '--all-projects', ] verifylist = [ - ('server', [servers[0].id]), + ('server', [self.server.id]), + ('all_projects', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - mock_find_resource.assert_called_once_with( - mock.ANY, servers[0].id, all_tenants=True, + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False, all_projects=True ) + self.compute_client.delete_server.assert_called_once_with( + self.server, force=False + ) + self.assertIsNone(result) - @mock.patch.object(common_utils, 'wait_for_delete', return_value=True) - def test_server_delete_wait_ok(self, mock_wait_for_delete): - servers = self.setup_servers_mock(count=1) - + def test_server_delete_wait_ok(self): arglist = [ - servers[0].id, '--wait' + self.server.id, + '--wait', ] verifylist = [ - ('server', [servers[0].id]), + ('server', [self.server.id]), + ('wait', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.delete.assert_called_with(servers[0].id) - mock_wait_for_delete.assert_called_once_with( - self.servers_mock, - servers[0].id, + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False, all_projects=False + ) + self.compute_client.delete_server.assert_called_once_with( + self.server, force=False + ) + self.compute_client.wait_for_delete.assert_called_once_with( + self.server, callback=mock.ANY, ) self.assertIsNone(result) - @mock.patch.object(common_utils, 'wait_for_delete', return_value=False) - def test_server_delete_wait_fails(self, mock_wait_for_delete): - servers = self.setup_servers_mock(count=1) + def test_server_delete_wait_fails(self): + self.compute_client.wait_for_delete.side_effect = ( + sdk_exceptions.ResourceTimeout() + ) arglist = [ - servers[0].id, '--wait' + self.server.id, + '--wait', ] verifylist = [ - ('server', [servers[0].id]), + ('server', [self.server.id]), + ('wait', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(SystemExit, self.cmd.take_action, parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) - self.servers_mock.delete.assert_called_with(servers[0].id) - mock_wait_for_delete.assert_called_once_with( - self.servers_mock, - servers[0].id, + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False, all_projects=False + ) + self.compute_client.delete_server.assert_called_once_with( + self.server, force=False + ) + self.compute_client.wait_for_delete.assert_called_once_with( + self.server, callback=mock.ANY, ) class TestServerDumpCreate(TestServer): - def setUp(self): super().setUp() @@ -4483,7 +4600,7 @@ def run_test_server_dump(self, server_count): self.assertIsNone(result) for s in servers: - s.trigger_crash_dump.assert_called_once_with(self.sdk_client) + s.trigger_crash_dump.assert_called_once_with(self.compute_client) def test_server_dump_one_server(self): self.run_test_server_dump(1) @@ -4493,7 +4610,6 @@ def test_server_dump_multi_servers(self): class _TestServerList(TestServer): - # Columns to be listed up. columns = ( 'ID', @@ -4518,9 +4634,18 @@ class _TestServerList(TestServer): 'Host', 'Properties', ) + columns_all_projects = ( + 'ID', + 'Name', + 'Status', + 'Networks', + 'Image', + 'Flavor', + 'Project ID', + ) def setUp(self): - super(_TestServerList, self).setUp() + super().setUp() # Default params of the core function of the command in the case of no # commandline option specified. @@ -4532,7 +4657,7 @@ def setUp(self): 'status': None, 'flavor': None, 'image': None, - 'host': None, + 'compute_host': None, 'project_id': None, 'all_projects': False, 'user_id': None, @@ -4546,10 +4671,8 @@ def setUp(self): self.attrs = { 'status': 'ACTIVE', 'OS-EXT-STS:task_state': 'None', - 'OS-EXT-STS:power_state': 0x01, # Running - 'networks': { - u'public': [u'10.20.30.40', u'2001:db8::5'] - }, + 'OS-EXT-STS:power_state': 0x01, # Running + 'networks': {'public': ['10.20.30.40', '2001:db8::5']}, 'OS-EXT-AZ:availability_zone': 'availability-zone-xxx', 'OS-EXT-SRV-ATTR:host': 'host-name-xxx', 'Metadata': format_columns.DictColumn({}), @@ -4557,37 +4680,38 @@ def setUp(self): self.image = image_fakes.create_one_image() - # self.images_mock.return_value = [self.image] - self.find_image_mock.return_value = self.image - self.get_image_mock.return_value = self.image + self.image_client.find_image.return_value = self.image + self.image_client.get_image.return_value = self.image - self.flavor = compute_fakes.FakeFlavor.create_one_flavor() - self.sdk_client.find_flavor.return_value = self.flavor + self.flavor = compute_fakes.create_one_flavor() + self.compute_client.find_flavor.return_value = self.flavor self.attrs['flavor'] = {'original_name': self.flavor.name} # The servers to be listed. self.servers = self.setup_sdk_servers_mock(3) - self.sdk_client.servers.return_value = self.servers + self.compute_client.servers.return_value = self.servers # Get the command object to test self.cmd = server.ListServer(self.app, None) class TestServerList(_TestServerList): - def setUp(self): - super(TestServerList, self).setUp() + super().setUp() - Image = collections.namedtuple('Image', 'id name') - self.images_mock.return_value = [ - Image(id=s.image['id'], name=self.image.name) + self.image_client.images.return_value = [ + sdk_fakes.generate_fake_resource( + _image.Image, id=s.image['id'], name=self.image.name + ) # Image will be an empty string if boot-from-volume - for s in self.servers if s.image + for s in self.servers + if s.image ] - Flavor = collections.namedtuple('Flavor', 'id name') - self.sdk_client.flavors.return_value = [ - Flavor(id=s.flavor['id'], name=self.flavor.name) + self.compute_client.flavors.return_value = [ + sdk_fakes.generate_fake_resource( + _flavor.Flavor, id=s.flavor['id'], name=self.flavor.name + ) for s in self.servers ] @@ -4600,7 +4724,8 @@ def setUp(self): # Image will be an empty string if boot-from-volume self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, self.flavor.name, - ) for s in self.servers + ) + for s in self.servers ) def test_server_list_no_option(self): @@ -4615,12 +4740,9 @@ def test_server_list_no_option(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_called() - self.sdk_client.flavors.assert_called() - # we did not pass image or flavor, so gets on those must be absent - self.assertFalse(self.flavors_mock.get.call_count) - self.assertFalse(self.get_image_mock.call_count) + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_called() + self.compute_client.flavors.assert_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -4632,14 +4754,14 @@ def test_server_list_no_servers(self): ('deleted', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.servers.return_value = [] + self.compute_client.servers.return_value = [] self.data = () columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_not_called() - self.sdk_client.flavors.assert_not_called() + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_not_called() + self.compute_client.flavors.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -4650,9 +4772,7 @@ def test_server_list_long_option(self): s.name, s.status, getattr(s, 'task_state'), - server.PowerStateColumn( - getattr(s, 'power_state') - ), + server.PowerStateColumn(getattr(s, 'power_state')), server.AddressesColumn(s.addresses), # Image will be an empty string if boot-from-volume self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, @@ -4662,7 +4782,8 @@ def test_server_list_long_option(self): getattr(s, 'availability_zone'), server.HostColumn(getattr(s, 'hypervisor_hostname')), format_columns.DictColumn(s.metadata), - ) for s in self.servers + ) + for s in self.servers ) arglist = [ '--long', @@ -4674,38 +4795,79 @@ def test_server_list_long_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) image_ids = {s.image['id'] for s in self.servers if s.image} - self.images_mock.assert_called_once_with( + self.image_client.images.assert_called_once_with( id=f'in:{",".join(image_ids)}', ) - self.sdk_client.flavors.assert_called_once_with(is_public=None) + self.compute_client.flavors.assert_called_once_with(is_public=None) self.assertEqual(self.columns_long, columns) self.assertEqual(self.data, tuple(data)) - def test_server_list_column_option(self): - arglist = [ - '-c', 'Project ID', - '-c', 'User ID', - '-c', 'Created At', - '-c', 'Security Groups', - '-c', 'Task State', - '-c', 'Power State', - '-c', 'Image ID', - '-c', 'Flavor ID', - '-c', 'Availability Zone', - '-c', 'Host', - '-c', 'Properties', - '--long' - ] - verifylist = [ - ('long', True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) + def test_server_list_all_projects_option(self): + self.data = tuple( + ( + s.id, + s.name, + s.status, + server.AddressesColumn(s.addresses), + # Image will be an empty string if boot-from-volume + self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, + self.flavor.name, + s.project_id, + ) + for s in self.servers + ) + arglist = [ + '--all-projects', + ] + verifylist = [ + ('all_projects', True), + ('long', False), + ('deleted', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_called() + self.compute_client.flavors.assert_called() + self.assertEqual(self.columns_all_projects, columns) + self.assertEqual(self.data, tuple(data)) + + def test_server_list_column_option(self): + arglist = [ + '-c', + 'Project ID', + '-c', + 'User ID', + '-c', + 'Created At', + '-c', + 'Security Groups', + '-c', + 'Task State', + '-c', + 'Power State', + '-c', + 'Image ID', + '-c', + 'Flavor ID', + '-c', + 'Availability Zone', + '-c', + 'Host', + '-c', + 'Properties', + '--long', + ] + verifylist = [ + ('long', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertIn('Project ID', columns) self.assertIn('User ID', columns) self.assertIn('Created At', columns) @@ -4728,8 +4890,9 @@ def test_server_list_no_name_lookup_option(self): server.AddressesColumn(s.addresses), # Image will be an empty string if boot-from-volume s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV, - s.flavor['id'] - ) for s in self.servers + s.flavor['id'], + ) + for s in self.servers ) arglist = [ @@ -4743,9 +4906,9 @@ def test_server_list_no_name_lookup_option(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_not_called() - self.sdk_client.flavors.assert_not_called() + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_not_called() + self.compute_client.flavors.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -4758,8 +4921,9 @@ def test_server_list_n_option(self): server.AddressesColumn(s.addresses), # Image will be an empty string if boot-from-volume s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV, - s.flavor['id'] - ) for s in self.servers + s.flavor['id'], + ) + for s in self.servers ) arglist = [ @@ -4773,16 +4937,14 @@ def test_server_list_n_option(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_not_called() - self.sdk_client.flavors.assert_not_called() + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_not_called() + self.compute_client.flavors.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_server_list_name_lookup_one_by_one(self): - arglist = [ - '--name-lookup-one-by-one' - ] + arglist = ['--name-lookup-one-by-one'] verifylist = [ ('all_projects', False), ('no_name_lookup', False), @@ -4792,67 +4954,55 @@ def test_server_list_name_lookup_one_by_one(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_not_called() - self.sdk_client.flavors.assert_not_called() - self.get_image_mock.assert_called() - self.sdk_client.find_flavor.assert_called() + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_not_called() + self.compute_client.flavors.assert_not_called() + self.image_client.get_image.assert_called() + self.compute_client.find_flavor.assert_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_server_list_with_image(self): - - arglist = [ - '--image', self.image.id - ] - verifylist = [ - ('image', self.image.id) - ] + arglist = ['--image', self.image.id] + verifylist = [('image', self.image.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.find_image_mock.assert_called_with(self.image.id, - ignore_missing=False) + self.image_client.find_image.assert_called_with( + self.image.id, ignore_missing=False + ) self.kwargs['image'] = self.image.id - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_not_called() - self.sdk_client.flavors.assert_called_once() + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_not_called() + self.compute_client.flavors.assert_called_once() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_server_list_with_flavor(self): - - arglist = [ - '--flavor', self.flavor.id - ] - verifylist = [ - ('flavor', self.flavor.id) - ] + arglist = ['--flavor', self.flavor.id] + verifylist = [('flavor', self.flavor.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.find_flavor.assert_has_calls( - [mock.call(self.flavor.id)]) + self.compute_client.find_flavor.assert_has_calls( + [mock.call(self.flavor.id, ignore_missing=False)] + ) self.kwargs['flavor'] = self.flavor.id - self.sdk_client.servers.assert_called_with(**self.kwargs) - self.images_mock.assert_called_once() - self.sdk_client.flavors.assert_not_called() + self.compute_client.servers.assert_called_with(**self.kwargs) + self.image_client.images.assert_called_once() + self.compute_client.flavors.assert_not_called() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_server_list_with_changes_since(self): - - arglist = [ - '--changes-since', '2016-03-04T06:27:59Z', - '--deleted' - ] + arglist = ['--changes-since', '2016-03-04T06:27:59Z', '--deleted'] verifylist = [ ('changes_since', '2016-03-04T06:27:59Z'), ('deleted', True), @@ -4863,16 +5013,16 @@ def test_server_list_with_changes_since(self): self.kwargs['changes-since'] = '2016-03-04T06:27:59Z' self.kwargs['deleted'] = True - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @mock.patch.object(iso8601, 'parse_date', side_effect=iso8601.ParseError) def test_server_list_with_invalid_changes_since(self, mock_parse_isotime): - arglist = [ - '--changes-since', 'Invalid time value', + '--changes-since', + 'Invalid time value', ] verifylist = [ ('changes_since', 'Invalid time value'), @@ -4883,18 +5033,19 @@ def test_server_list_with_invalid_changes_since(self, mock_parse_isotime): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('Invalid changes-since value: Invalid time ' - 'value', str(e)) - mock_parse_isotime.assert_called_once_with( - 'Invalid time value' - ) + self.assertEqual( + 'Invalid changes-since value: Invalid time value', str(e) + ) + mock_parse_isotime.assert_called_once_with('Invalid time value') def test_server_list_with_tag(self): - self._set_mock_microversion('2.26') + self.set_compute_api_version('2.26') arglist = [ - '--tag', 'tag1', - '--tag', 'tag2', + '--tag', + 'tag1', + '--tag', + 'tag2', ] verifylist = [ ('tags', ['tag1', 'tag2']), @@ -4905,17 +5056,19 @@ def test_server_list_with_tag(self): self.kwargs['tags'] = 'tag1,tag2' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_server_list_with_tag_pre_v225(self): - self._set_mock_microversion('2.25') + self.set_compute_api_version('2.25') arglist = [ - '--tag', 'tag1', - '--tag', 'tag2', + '--tag', + 'tag1', + '--tag', + 'tag2', ] verifylist = [ ('tags', ['tag1', 'tag2']), @@ -4923,18 +5076,19 @@ def test_server_list_with_tag_pre_v225(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.26 or greater is required', - str(ex)) + '--os-compute-api-version 2.26 or greater is required', str(ex) + ) def test_server_list_with_not_tag(self): - self._set_mock_microversion('2.26') + self.set_compute_api_version('2.26') arglist = [ - '--not-tag', 'tag1', - '--not-tag', 'tag2', + '--not-tag', + 'tag1', + '--not-tag', + 'tag2', ] verifylist = [ ('not_tags', ['tag1', 'tag2']), @@ -4945,17 +5099,19 @@ def test_server_list_with_not_tag(self): self.kwargs['not-tags'] = 'tag1,tag2' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_server_list_with_not_tag_pre_v226(self): - self._set_mock_microversion('2.25') + self.set_compute_api_version('2.25') arglist = [ - '--not-tag', 'tag1', - '--not-tag', 'tag2', + '--not-tag', + 'tag1', + '--not-tag', + 'tag2', ] verifylist = [ ('not_tags', ['tag1', 'tag2']), @@ -4963,16 +5119,16 @@ def test_server_list_with_not_tag_pre_v226(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.26 or greater is required', - str(ex)) + '--os-compute-api-version 2.26 or greater is required', str(ex) + ) def test_server_list_with_availability_zone(self): arglist = [ - '--availability-zone', 'test-az', + '--availability-zone', + 'test-az', ] verifylist = [ ('availability_zone', 'test-az'), @@ -4982,13 +5138,14 @@ def test_server_list_with_availability_zone(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['availability_zone'] = 'test-az' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_list_with_key_name(self): arglist = [ - '--key-name', 'test-key', + '--key-name', + 'test-key', ] verifylist = [ ('key_name', 'test-key'), @@ -4998,7 +5155,7 @@ def test_server_list_with_key_name(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['key_name'] = 'test-key' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) @@ -5014,7 +5171,7 @@ def test_server_list_with_config_drive(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['config_drive'] = True - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) @@ -5030,13 +5187,14 @@ def test_server_list_with_no_config_drive(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['config_drive'] = False - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_list_with_progress(self): arglist = [ - '--progress', '100', + '--progress', + '100', ] verifylist = [ ('progress', 100), @@ -5046,22 +5204,28 @@ def test_server_list_with_progress(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['progress'] = '100' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_list_with_progress_invalid(self): arglist = [ - '--progress', '101', + '--progress', + '101', ] self.assertRaises( - utils.ParserException, - self.check_parser, self.cmd, arglist, verify_args=[]) + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verify_args=[], + ) def test_server_list_with_vm_state(self): arglist = [ - '--vm-state', 'active', + '--vm-state', + 'active', ] verifylist = [ ('vm_state', 'active'), @@ -5071,13 +5235,14 @@ def test_server_list_with_vm_state(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['vm_state'] = 'active' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_list_with_task_state(self): arglist = [ - '--task-state', 'deleting', + '--task-state', + 'deleting', ] verifylist = [ ('task_state', 'deleting'), @@ -5087,13 +5252,14 @@ def test_server_list_with_task_state(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['task_state'] = 'deleting' - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_list_with_power_state(self): arglist = [ - '--power-state', 'running', + '--power-state', + 'running', ] verifylist = [ ('power_state', 'running'), @@ -5103,21 +5269,19 @@ def test_server_list_with_power_state(self): columns, data = self.cmd.take_action(parsed_args) self.kwargs['power_state'] = 1 - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_list_long_with_host_status_v216(self): - self._set_mock_microversion('2.16') + self.set_compute_api_version('2.16') self.data1 = tuple( ( s.id, s.name, s.status, getattr(s, 'task_state'), - server.PowerStateColumn( - getattr(s, 'power_state') - ), + server.PowerStateColumn(getattr(s, 'power_state')), server.AddressesColumn(s.addresses), # Image will be an empty string if boot-from-volume self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, @@ -5127,11 +5291,11 @@ def test_server_list_long_with_host_status_v216(self): getattr(s, 'availability_zone'), server.HostColumn(getattr(s, 'hypervisor_hostname')), format_columns.DictColumn(s.metadata), - ) for s in self.servers) + ) + for s in self.servers + ) - arglist = [ - '--long' - ] + arglist = ['--long'] verifylist = [ ('long', True), ] @@ -5141,25 +5305,27 @@ def test_server_list_long_with_host_status_v216(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(self.columns_long, columns) self.assertEqual(tuple(self.data1), tuple(data)) # Next test with host_status in the data -- the column should be # present in this case. - self.sdk_client.servers.reset_mock() + self.compute_client.servers.reset_mock() self.attrs['host_status'] = 'UP' servers = self.setup_sdk_servers_mock(3) - self.sdk_client.servers.return_value = servers + self.compute_client.servers.return_value = servers # Make sure the returned image and flavor IDs match the servers. - Image = collections.namedtuple('Image', 'id name') - self.images_mock.return_value = [ - Image(id=s.image['id'], name=self.image.name) + self.image_client.images.return_value = [ + sdk_fakes.generate_fake_resource( + _image.Image, id=s.image['id'], name=self.image.name + ) # Image will be an empty string if boot-from-volume - for s in servers if s.image + for s in servers + if s.image ] # Add the expected host_status column and data. @@ -5170,9 +5336,7 @@ def test_server_list_long_with_host_status_v216(self): s.name, s.status, getattr(s, 'task_state'), - server.PowerStateColumn( - getattr(s, 'power_state') - ), + server.PowerStateColumn(getattr(s, 'power_state')), server.AddressesColumn(s.addresses), # Image will be an empty string if boot-from-volume self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, @@ -5183,18 +5347,19 @@ def test_server_list_long_with_host_status_v216(self): server.HostColumn(getattr(s, 'hypervisor_hostname')), format_columns.DictColumn(s.metadata), s.host_status, - ) for s in servers) + ) + for s in servers + ) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertEqual(columns_long, columns) self.assertEqual(tuple(self.data2), tuple(data)) class TestServerListV273(_TestServerList): - # Columns to be listed up. columns = ( 'ID', @@ -5217,10 +5382,11 @@ class TestServerListV273(_TestServerList): 'Availability Zone', 'Host', 'Properties', + 'Scheduler Hints', ) def setUp(self): - super(TestServerListV273, self).setUp() + super().setUp() # The fake servers' attributes. Use the original attributes names in # nova, not the ones printed by "server list" command. @@ -5236,18 +5402,20 @@ def setUp(self): # The servers to be listed. self.servers = self.setup_sdk_servers_mock(3) - self.sdk_client.servers.return_value = self.servers + self.compute_client.servers.return_value = self.servers - Image = collections.namedtuple('Image', 'id name') - self.images_mock.return_value = [ - Image(id=s.image['id'], name=self.image.name) + self.image_client.images.return_value = [ + sdk_fakes.generate_fake_resource( + _image.Image, id=s.image['id'], name=self.image.name + ) # Image will be an empty string if boot-from-volume - for s in self.servers if s.image + for s in self.servers + if s.image ] # The flavor information is embedded, so now reason for this to be # called - self.sdk_client.flavors = mock.NonCallableMock() + self.compute_client.flavors = mock.NonCallableMock() self.data = tuple( ( @@ -5258,85 +5426,68 @@ def setUp(self): # Image will be an empty string if boot-from-volume self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, self.flavor.name, - ) for s in self.servers) + ) + for s in self.servers + ) def test_server_list_with_locked_pre_v273(self): - - arglist = [ - '--locked' - ] - verifylist = [ - ('locked', True) - ] + arglist = ['--locked'] + verifylist = [('locked', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.73 or greater is required', str(ex)) + '--os-compute-api-version 2.73 or greater is required', str(ex) + ) def test_server_list_with_locked(self): - - self._set_mock_microversion('2.73') - arglist = [ - '--locked' - ] - verifylist = [ - ('locked', True) - ] + self.set_compute_api_version('2.73') + arglist = ['--locked'] + verifylist = [('locked', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.kwargs['locked'] = True - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, tuple(data)) def test_server_list_with_unlocked_v273(self): - self._set_mock_microversion('2.73') + self.set_compute_api_version('2.73') - arglist = [ - '--unlocked' - ] - verifylist = [ - ('unlocked', True) - ] + arglist = ['--unlocked'] + verifylist = [('unlocked', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.kwargs['locked'] = False - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, tuple(data)) def test_server_list_with_locked_and_unlocked(self): - - self._set_mock_microversion('2.73') - arglist = [ - '--locked', - '--unlocked' - ] - verifylist = [ - ('locked', True), - ('unlocked', True) - ] + self.set_compute_api_version('2.73') + arglist = ['--locked', '--unlocked'] + verifylist = [('locked', True), ('unlocked', True)] ex = self.assertRaises( - utils.ParserException, - self.check_parser, self.cmd, arglist, verifylist) + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) self.assertIn('Argument parse failed', str(ex)) def test_server_list_with_changes_before(self): - self._set_mock_microversion('2.66') - arglist = [ - '--changes-before', '2016-03-05T06:27:59Z', - '--deleted' - ] + self.set_compute_api_version('2.66') + arglist = ['--changes-before', '2016-03-05T06:27:59Z', '--deleted'] verifylist = [ ('changes_before', '2016-03-05T06:27:59Z'), ('deleted', True), @@ -5348,17 +5499,17 @@ def test_server_list_with_changes_before(self): self.kwargs['changes-before'] = '2016-03-05T06:27:59Z' self.kwargs['deleted'] = True - self.sdk_client.servers.assert_called_with(**self.kwargs) + self.compute_client.servers.assert_called_with(**self.kwargs) self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, tuple(data)) @mock.patch.object(iso8601, 'parse_date', side_effect=iso8601.ParseError) - def test_server_list_with_invalid_changes_before( - self, mock_parse_isotime): - self._set_mock_microversion('2.66') + def test_server_list_with_invalid_changes_before(self, mock_parse_isotime): + self.set_compute_api_version('2.66') arglist = [ - '--changes-before', 'Invalid time value', + '--changes-before', + 'Invalid time value', ] verifylist = [ ('changes_before', 'Invalid time value'), @@ -5369,19 +5520,15 @@ def test_server_list_with_invalid_changes_before( self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('Invalid changes-before value: Invalid time ' - 'value', str(e)) - mock_parse_isotime.assert_called_once_with( - 'Invalid time value' - ) + self.assertEqual( + 'Invalid changes-before value: Invalid time value', str(e) + ) + mock_parse_isotime.assert_called_once_with('Invalid time value') def test_server_with_changes_before_pre_v266(self): - self._set_mock_microversion('2.65') + self.set_compute_api_version('2.65') - arglist = [ - '--changes-before', '2016-03-05T06:27:59Z', - '--deleted' - ] + arglist = ['--changes-before', '2016-03-05T06:27:59Z', '--deleted'] verifylist = [ ('changes_before', '2016-03-05T06:27:59Z'), ('deleted', True), @@ -5389,157 +5536,475 @@ def test_server_with_changes_before_pre_v266(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - - def test_server_list_v269_with_partial_constructs(self): - self._set_mock_microversion('2.69') - arglist = [] - verifylist = [] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # include "partial results" from non-responsive part of - # infrastructure. - server_dict = { - "id": "server-id-95a56bfc4xxxxxx28d7e418bfd97813a", - "status": "UNKNOWN", - "tenant_id": "6f70656e737461636b20342065766572", - "created": "2018-12-03T21:06:18Z", - "links": [ - { - "href": "http://fake/v2.1/", - "rel": "self" - }, - { - "href": "http://fake", - "rel": "bookmark" - } - ], - # We need to pass networks as {} because its defined as a property - # of the novaclient Server class which gives {} by default. If not - # it will fail at formatting the networks info later on. - "networks": {} - } - fake_server = compute_fakes.fakes.FakeResource( - info=server_dict, - ) - self.servers.append(fake_server) - columns, data = self.cmd.take_action(parsed_args) - # get the first three servers out since our interest is in the partial - # server. - next(data) - next(data) - next(data) - partial_server = next(data) - expected_row = ( - 'server-id-95a56bfc4xxxxxx28d7e418bfd97813a', '', - 'UNKNOWN', server.AddressesColumn(''), '', '') - self.assertEqual(expected_row, partial_server) - + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_server_list_v269_with_partial_constructs(self): + self.set_compute_api_version('2.69') + arglist = [] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + # include "partial results" from non-responsive part of + # infrastructure. + server_dict = { + "id": "server-id-95a56bfc4xxxxxx28d7e418bfd97813a", + "status": "UNKNOWN", + "tenant_id": "6f70656e737461636b20342065766572", + "created": "2018-12-03T21:06:18Z", + "links": [ + {"href": "http://fake/v2.1/", "rel": "self"}, + {"href": "http://fake", "rel": "bookmark"}, + ], + "networks": {}, + } + fake_server = _server.Server(**server_dict) + self.servers.append(fake_server) + columns, data = self.cmd.take_action(parsed_args) + # get the first three servers out since our interest is in the partial + # server. + next(data) + next(data) + next(data) + partial_server = next(data) + expected_row = ( + 'server-id-95a56bfc4xxxxxx28d7e418bfd97813a', + None, + 'UNKNOWN', + server.AddressesColumn(None), + '', + '', + ) + self.assertEqual(expected_row, partial_server) + + +class TestServerListV296(_TestServerList): + columns = ( + 'ID', + 'Name', + 'Status', + 'Networks', + 'Image', + 'Flavor', + ) + columns_long = ( + 'ID', + 'Name', + 'Status', + 'Task State', + 'Power State', + 'Networks', + 'Image Name', + 'Image ID', + 'Flavor', + 'Availability Zone', + 'Host', + 'Properties', + 'Pinned Availability Zone', + ) + + def setUp(self): + super().setUp() + self.set_compute_api_version('2.96') + + self.image_client.images.return_value = [ + sdk_fakes.generate_fake_resource( + _image.Image, id=s.image['id'], name=self.image.name + ) + # Image will be an empty string if boot-from-volume + for s in self.servers + if s.image + ] + + self.compute_client.flavors.return_value = [ + sdk_fakes.generate_fake_resource( + _flavor.Flavor, id=s.flavor['id'], name=self.flavor.name + ) + for s in self.servers + ] + + self.data = tuple( + ( + s.id, + s.name, + s.status, + server.AddressesColumn(s.addresses), + # Image will be an empty string if boot-from-volume + self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, + self.flavor.name, + ) + for s in self.servers + ) + + def test_server_list_long_option(self): + self.data = tuple( + ( + s.id, + s.name, + s.status, + getattr(s, 'task_state'), + server.PowerStateColumn(getattr(s, 'power_state')), + server.AddressesColumn(s.addresses), + # Image will be an empty string if boot-from-volume + self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, + s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV, + self.flavor.name, + getattr(s, 'availability_zone'), + server.HostColumn(getattr(s, 'hypervisor_hostname')), + format_columns.DictColumn(s.metadata), + getattr(s, 'pinned_availability_zone', ''), + ) + for s in self.servers + ) + arglist = [ + '--long', + ] + verifylist = [ + ('all_projects', False), + ('long', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.compute_client.servers.assert_called_with(**self.kwargs) + image_ids = {s.image['id'] for s in self.servers if s.image} + self.image_client.images.assert_called_once_with( + id=f'in:{",".join(image_ids)}', + ) + self.compute_client.flavors.assert_called_once_with(is_public=None) + self.assertEqual(self.columns_long, columns) + self.assertEqual(self.data, tuple(data)) + + def test_server_list_column_option(self): + arglist = [ + '-c', + 'Project ID', + '-c', + 'User ID', + '-c', + 'Created At', + '-c', + 'Security Groups', + '-c', + 'Task State', + '-c', + 'Power State', + '-c', + 'Image ID', + '-c', + 'Flavor ID', + '-c', + 'Availability Zone', + '-c', + 'Host', + '-c', + 'Properties', + '-c', + 'Pinned Availability Zone', + '--long', + ] + verifylist = [ + ('long', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.compute_client.servers.assert_called_with(**self.kwargs) + self.assertIn('Project ID', columns) + self.assertIn('User ID', columns) + self.assertIn('Created At', columns) + self.assertIn('Security Groups', columns) + self.assertIn('Task State', columns) + self.assertIn('Power State', columns) + self.assertIn('Image ID', columns) + self.assertIn('Flavor ID', columns) + self.assertIn('Availability Zone', columns) + self.assertIn('Pinned Availability Zone', columns) + self.assertIn('Host', columns) + self.assertIn('Properties', columns) + self.assertCountEqual(columns, set(columns)) + + +class TestServerListV2100(_TestServerList): + columns = ( + 'ID', + 'Name', + 'Status', + 'Networks', + 'Image', + 'Flavor', + ) + columns_long = ( + 'ID', + 'Name', + 'Status', + 'Task State', + 'Power State', + 'Networks', + 'Image Name', + 'Image ID', + 'Flavor', + 'Availability Zone', + 'Host', + 'Properties', + 'Pinned Availability Zone', + 'Scheduler Hints', + ) + + def setUp(self): + super().setUp() + self.set_compute_api_version('2.100') + + self.image_client.images.return_value = [ + sdk_fakes.generate_fake_resource( + _image.Image, id=s.image['id'], name=self.image.name + ) + # Image will be an empty string if boot-from-volume + for s in self.servers + if s.image + ] + + self.compute_client.flavors.return_value = [ + sdk_fakes.generate_fake_resource( + _flavor.Flavor, id=s.flavor['id'], name=self.flavor.name + ) + for s in self.servers + ] + + self.data = tuple( + ( + s.id, + s.name, + s.status, + server.AddressesColumn(s.addresses), + # Image will be an empty string if boot-from-volume + self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, + self.flavor.name, + ) + for s in self.servers + ) + + def test_server_list_long_option(self): + self.data = tuple( + ( + s.id, + s.name, + s.status, + getattr(s, 'task_state'), + server.PowerStateColumn(getattr(s, 'power_state')), + server.AddressesColumn(s.addresses), + # Image will be an empty string if boot-from-volume + self.image.name if s.image else server.IMAGE_STRING_FOR_BFV, + s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV, + self.flavor.name, + getattr(s, 'availability_zone'), + server.HostColumn(getattr(s, 'hypervisor_hostname')), + format_columns.DictColumn(s.metadata), + getattr(s, 'pinned_availability_zone', ''), + format_columns.DictListColumn(None), + ) + for s in self.servers + ) + arglist = [ + '--long', + ] + verifylist = [ + ('all_projects', False), + ('long', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.compute_client.servers.assert_called_with(**self.kwargs) + image_ids = {s.image['id'] for s in self.servers if s.image} + self.image_client.images.assert_called_once_with( + id=f'in:{",".join(image_ids)}', + ) + self.compute_client.flavors.assert_called_once_with(is_public=None) + self.assertEqual(self.columns_long, columns) + self.assertEqual(self.data, tuple(data)) + + def test_server_list_column_option(self): + arglist = [ + '-c', + 'Project ID', + '-c', + 'User ID', + '-c', + 'Created At', + '-c', + 'Security Groups', + '-c', + 'Task State', + '-c', + 'Power State', + '-c', + 'Image ID', + '-c', + 'Flavor ID', + '-c', + 'Availability Zone', + '-c', + 'Host', + '-c', + 'Properties', + '-c', + 'Pinned Availability Zone', + '-c', + 'Scheduler Hints', + '--long', + ] + verifylist = [ + ('long', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.compute_client.servers.assert_called_with(**self.kwargs) + self.assertIn('Project ID', columns) + self.assertIn('User ID', columns) + self.assertIn('Created At', columns) + self.assertIn('Security Groups', columns) + self.assertIn('Task State', columns) + self.assertIn('Power State', columns) + self.assertIn('Image ID', columns) + self.assertIn('Flavor ID', columns) + self.assertIn('Availability Zone', columns) + self.assertIn('Pinned Availability Zone', columns) + self.assertIn('Host', columns) + self.assertIn('Properties', columns) + self.assertIn('Scheduler Hints', columns) + self.assertCountEqual(columns, set(columns)) + + +class TestServerAction(compute_fakes.TestComputev2): + def run_method_with_sdk_servers(self, method_name, server_count): + servers = compute_fakes.create_servers(count=server_count) + self.compute_client.find_server.side_effect = servers + + arglist = [s.id for s in servers] + verifylist = [ + ('server', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + calls = [mock.call(s.id) for s in servers] + method = getattr(self.compute_client, method_name) + method.assert_has_calls(calls) + self.assertIsNone(result) -class TestServerLock(TestServer): +class TestServerLock(TestServerAction): def setUp(self): - super(TestServerLock, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.LockServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'lock': None, - } - - def test_server_lock_one_server(self): - self.run_method_with_servers('lock', 1) + def test_server_lock(self): + self.run_method_with_sdk_servers('lock_server', 1) def test_server_lock_multi_servers(self): - self.run_method_with_servers('lock', 3) + self.run_method_with_sdk_servers('lock_server', 3) def test_server_lock_with_reason(self): - server = compute_fakes.FakeServer.create_one_server() + self.set_compute_api_version('2.73') + + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.lock_server.return_value = None + arglist = [ - server.id, - '--reason', "blah", + self.server.id, + '--reason', + 'blah', ] verifylist = [ - ('reason', "blah"), - ('server', [server.id]) + ('server', [self.server.id]), + ('reason', 'blah'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn( - '--os-compute-api-version 2.73 or greater is required', str(ex)) - - -class TestServerLockV273(TestServerLock): - - def setUp(self): - super(TestServerLockV273, self).setUp() - self.server = compute_fakes.FakeServer.create_one_server( - methods=self.methods) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server + self.compute_client.find_server.assert_called_with( + self.server.id, + ignore_missing=False, + ) + self.compute_client.lock_server.assert_called_with( + self.server.id, + locked_reason="blah", + ) - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.73') + def test_server_lock_with_reason_multi_servers(self): + self.set_compute_api_version('2.73') - # Get the command object to test - self.cmd = server.LockServer(self.app, None) + server_a = compute_fakes.create_one_server() + server_b = compute_fakes.create_one_server() - def test_server_lock_with_reason(self): + self.compute_client.find_server.side_effect = [server_a, server_b] + self.compute_client.lock_server.return_value = None arglist = [ - self.server.id, - '--reason', "blah", + server_a.id, + server_b.id, + '--reason', + 'choo..choo', ] verifylist = [ - ('reason', "blah"), - ('server', [self.server.id]) + ('server', [server_a.id, server_b.id]), + ('reason', 'choo..choo'), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.lock.assert_called_with(reason="blah") - def test_server_lock_multi_servers_with_reason(self): - server2 = compute_fakes.FakeServer.create_one_server( - methods=self.methods) + self.assertEqual(2, self.compute_client.find_server.call_count) + self.compute_client.lock_server.assert_has_calls( + [ + mock.call(server_a.id, locked_reason="choo..choo"), + mock.call(server_b.id, locked_reason="choo..choo"), + ] + ) + + def test_server_lock_with_reason_pre_v273(self): + self.set_compute_api_version('2.72') + + server = compute_fakes.create_one_server() + arglist = [ - self.server.id, server2.id, - '--reason', "choo..choo", + server.id, + '--reason', + "blah", ] verifylist = [ - ('reason', "choo..choo"), - ('server', [self.server.id, server2.id]) + ('server', [server.id]), + ('reason', "blah"), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - self.assertEqual(2, self.servers_mock.get.call_count) - self.server.lock.assert_called_with(reason="choo..choo") - self.assertEqual(2, self.server.lock.call_count) + ex = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + '--os-compute-api-version 2.73 or greater is required', + str(ex), + ) class TestServerMigrate(TestServer): - def setUp(self): - super(TestServerMigrate, self).setUp() - - methods = { - 'migrate': None, - 'live_migrate': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server + super().setUp() - self.servers_mock.migrate.return_value = None - self.servers_mock.live_migrate.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.migrate_server.return_value = None + self.compute_client.live_migrate_server.return_value = None # Get the command object to test self.cmd = server.MigrateServer(self.app, None) @@ -5554,20 +6019,28 @@ def test_server_migrate_no_options(self): ('disk_overcommit', None), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.migrate.assert_called_with() - self.assertNotCalled(self.servers_mock.live_migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_called_once_with( + self.server, + ) + self.compute_client.live_migrate_server.assert_not_called() self.assertIsNone(result) - def test_server_migrate_with_host_2_56(self): + def test_server_migrate_with_host(self): # Tests that --host is allowed for a cold migration # for microversion 2.56 and greater. + self.set_compute_api_version('2.56') + arglist = [ - '--host', 'fakehost', self.server.id, + '--host', + 'fakehost', + self.server.id, ] verifylist = [ ('live_migration', False), @@ -5576,21 +6049,23 @@ def test_server_migrate_with_host_2_56(self): ('disk_overcommit', None), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.56') + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.migrate.assert_called_with(host='fakehost') - self.assertNotCalled(self.servers_mock.live_migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_called_once_with( + self.server, host='fakehost' + ) + self.compute_client.live_migrate_server.assert_not_called() self.assertIsNone(result) def test_server_migrate_with_block_migration(self): arglist = [ - '--block-migration', self.server.id, + '--block-migration', + self.server.id, ] verifylist = [ ('live_migration', False), @@ -5598,18 +6073,22 @@ def test_server_migrate_with_block_migration(self): ('disk_overcommit', None), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) - self.servers_mock.get.assert_called_with(self.server.id) - self.assertNotCalled(self.servers_mock.live_migrate) - self.assertNotCalled(self.servers_mock.migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_not_called() + self.compute_client.live_migrate_server.assert_not_called() def test_server_migrate_with_disk_overcommit(self): arglist = [ - '--disk-overcommit', self.server.id, + '--disk-overcommit', + self.server.id, ] verifylist = [ ('live_migration', False), @@ -5617,20 +6096,27 @@ def test_server_migrate_with_disk_overcommit(self): ('disk_overcommit', True), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) - self.servers_mock.get.assert_called_with(self.server.id) - self.assertNotCalled(self.servers_mock.live_migrate) - self.assertNotCalled(self.servers_mock.migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_not_called() + self.compute_client.live_migrate_server.assert_not_called() def test_server_migrate_with_host_pre_v256(self): # Tests that --host is not allowed for a cold migration # before microversion 2.56 (the test defaults to 2.1). + self.set_compute_api_version('2.55') + arglist = [ - '--host', 'fakehost', self.server.id, + '--host', + 'fakehost', + self.server.id, ] verifylist = [ ('live_migration', False), @@ -5641,22 +6127,28 @@ def test_server_migrate_with_host_pre_v256(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) # Make sure it's the error we expect. - self.assertIn('--os-compute-api-version 2.56 or greater is required ' - 'to use --host without --live-migration.', - str(ex)) + self.assertIn( + '--os-compute-api-version 2.56 or greater is required ' + 'to use --host without --live-migration.', + str(ex), + ) - self.servers_mock.get.assert_called_with(self.server.id) - self.assertNotCalled(self.servers_mock.live_migrate) - self.assertNotCalled(self.servers_mock.migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_not_called() + self.compute_client.live_migrate_server.assert_not_called() def test_server_live_migrate(self): # Tests the --live-migration option without --host or --live. arglist = [ - '--live-migration', self.server.id, + '--live-migration', + self.server.id, ] verifylist = [ ('live_migration', True), @@ -5665,22 +6157,31 @@ def test_server_live_migrate(self): ('disk_overcommit', None), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.live_migrate.assert_called_with( + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.live_migrate_server.assert_called_once_with( + self.server, block_migration=False, - disk_over_commit=False, - host=None) - self.assertNotCalled(self.servers_mock.migrate) + host=None, + disk_overcommit=False, + ) + self.compute_client.migrate_server.assert_not_called() self.assertIsNone(result) def test_server_live_migrate_with_host(self): # This requires --os-compute-api-version >= 2.30 so the test uses 2.30. + self.set_compute_api_version('2.30') + arglist = [ - '--live-migration', '--host', 'fakehost', self.server.id, + '--live-migration', + '--host', + 'fakehost', + self.server.id, ] verifylist = [ ('live_migration', True), @@ -5689,26 +6190,33 @@ def test_server_live_migrate_with_host(self): ('disk_overcommit', None), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.30') + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) # No disk_overcommit and block_migration defaults to auto with # microversion >= 2.25 - self.server.live_migrate.assert_called_with( - block_migration='auto', host='fakehost') - self.assertNotCalled(self.servers_mock.migrate) + self.compute_client.live_migrate_server.assert_called_once_with( + self.server, + block_migration='auto', + host='fakehost', + ) + self.compute_client.migrate_server.assert_not_called() self.assertIsNone(result) def test_server_live_migrate_with_host_pre_v230(self): # Tests that the --host option is not supported for --live-migration # before microversion 2.30 (the test defaults to 2.1). + self.set_compute_api_version('2.29') + arglist = [ - '--live-migration', '--host', 'fakehost', self.server.id, + '--live-migration', + '--host', + 'fakehost', + self.server.id, ] verifylist = [ ('live_migration', True), @@ -5717,22 +6225,27 @@ def test_server_live_migrate_with_host_pre_v230(self): ('disk_overcommit', None), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, self.cmd.take_action, - parsed_args) - + exceptions.CommandError, self.cmd.take_action, parsed_args + ) # Make sure it's the error we expect. self.assertIn( '--os-compute-api-version 2.30 or greater is required ' - 'when using --host', str(ex)) + 'when using --host', + str(ex), + ) - self.servers_mock.get.assert_called_with(self.server.id) - self.assertNotCalled(self.servers_mock.live_migrate) - self.assertNotCalled(self.servers_mock.migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_not_called() + self.compute_client.live_migrate_server.assert_not_called() def test_server_block_live_migrate(self): + self.set_compute_api_version('2.24') + arglist = [ '--live-migration', '--block-migration', @@ -5746,20 +6259,25 @@ def test_server_block_live_migrate(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.24') - result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.live_migrate.assert_called_with( + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + # No disk_overcommit and block_migration defaults to auto with + # microversion >= 2.25 + self.compute_client.live_migrate_server.assert_called_once_with( + self.server, block_migration=True, - disk_over_commit=False, - host=None) - self.assertNotCalled(self.servers_mock.migrate) + disk_overcommit=False, + host=None, + ) + self.compute_client.migrate_server.assert_not_called() self.assertIsNone(result) def test_server_live_migrate_with_disk_overcommit(self): + self.set_compute_api_version('2.24') + arglist = [ '--live-migration', '--disk-overcommit', @@ -5771,22 +6289,25 @@ def test_server_live_migrate_with_disk_overcommit(self): ('disk_overcommit', True), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.24') + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.live_migrate.assert_called_with( + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.live_migrate_server.assert_called_once_with( + self.server, block_migration=False, - disk_over_commit=True, - host=None) - self.assertNotCalled(self.servers_mock.migrate) + disk_overcommit=True, + host=None, + ) + self.compute_client.migrate_server.assert_not_called() self.assertIsNone(result) def test_server_live_migrate_with_disk_overcommit_post_v224(self): + self.set_compute_api_version('2.25') + arglist = [ '--live-migration', '--disk-overcommit', @@ -5798,80 +6319,215 @@ def test_server_live_migrate_with_disk_overcommit_post_v224(self): ('disk_overcommit', True), ('wait', False), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.25') + parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object(self.cmd.log, 'warning') as mock_warning: result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - # There should be no 'disk_over_commit' value present - self.server.live_migrate.assert_called_with( - block_migration='auto', - host=None) - self.assertNotCalled(self.servers_mock.migrate) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + # There should be no 'disk_over_commit' value present + self.compute_client.live_migrate_server.assert_called_once_with( + self.server, + block_migration='auto', + host=None, + ) + self.compute_client.migrate_server.assert_not_called() + self.assertIsNone(result) + + # A warning should have been logged for using --disk-overcommit. + mock_warning.assert_called_once() + self.assertIn( + 'The --disk-overcommit and --no-disk-overcommit options ', + str(mock_warning.call_args[0][0]), + ) + + @mock.patch.object(common_utils, 'wait_for_status', return_value=True) + def test_server_migrate_with_wait(self, mock_wait_for_status): + arglist = [ + '--wait', + self.server.id, + ] + verifylist = [ + ('live_migration', False), + ('block_migration', None), + ('disk_overcommit', None), + ('wait', True), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_called_once_with( + self.server, + ) + self.compute_client.live_migrate_server.assert_not_called() + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + success_status=('active', 'verify_resize'), + callback=mock.ANY, + ) + self.assertIsNone(result) + + @mock.patch.object(common_utils, 'wait_for_status', return_value=False) + def test_server_migrate_with_wait_fails(self, mock_wait_for_status): + arglist = [ + '--wait', + self.server.id, + ] + verifylist = [ + ('live_migration', False), + ('block_migration', None), + ('disk_overcommit', None), + ('wait', True), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.migrate_server.assert_called_once_with( + self.server, + ) + self.compute_client.live_migrate_server.assert_not_called() + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + success_status=('active', 'verify_resize'), + callback=mock.ANY, + ) + + +class TestServerReboot(TestServer): + def setUp(self): + super().setUp() + + self.compute_client.reboot_server.return_value = None + + self.cmd = server.RebootServer(self.app, None) + + def test_server_reboot(self): + servers = self.setup_sdk_servers_mock(count=1) + + arglist = [ + servers[0].id, + ] + verifylist = [ + ('server', servers[0].id), + ('reboot_type', 'SOFT'), + ('wait', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.compute_client.reboot_server.assert_called_once_with( + servers[0].id, + 'SOFT', + ) + self.assertIsNone(result) + + def test_server_reboot_with_hard(self): + servers = self.setup_sdk_servers_mock(count=1) + + arglist = [ + '--hard', + servers[0].id, + ] + verifylist = [ + ('server', servers[0].id), + ('reboot_type', 'HARD'), + ('wait', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.compute_client.reboot_server.assert_called_once_with( + servers[0].id, + 'HARD', + ) self.assertIsNone(result) - # A warning should have been logged for using --disk-overcommit. - mock_warning.assert_called_once() - self.assertIn( - 'The --disk-overcommit and --no-disk-overcommit options ', - str(mock_warning.call_args[0][0])) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) - def test_server_migrate_with_wait(self, mock_wait_for_status): + def test_server_reboot_with_wait(self, mock_wait_for_status): + servers = self.setup_sdk_servers_mock(count=1) + arglist = [ - '--wait', self.server.id, + '--wait', + servers[0].id, ] verifylist = [ - ('live_migration', False), - ('block_migration', None), - ('disk_overcommit', None), + ('server', servers[0].id), + ('reboot_type', 'SOFT'), ('wait', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.migrate.assert_called_with() - self.assertNotCalled(self.servers_mock.live_migrate) self.assertIsNone(result) + self.compute_client.reboot_server.assert_called_once_with( + servers[0].id, + 'SOFT', + ) + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + servers[0].id, + callback=mock.ANY, + ) + @mock.patch.object(server.LOG, 'error') @mock.patch.object(common_utils, 'wait_for_status', return_value=False) - def test_server_migrate_with_wait_fails(self, mock_wait_for_status): + def test_server_reboot_with_wait_fails( + self, + mock_wait_for_status, + mock_log, + ): + servers = self.setup_sdk_servers_mock(count=1) + arglist = [ - '--wait', self.server.id, + '--wait', + servers[0].id, ] verifylist = [ - ('live_migration', False), - ('block_migration', None), - ('disk_overcommit', None), + ('server', servers[0].id), + ('reboot_type', 'SOFT'), ('wait', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(SystemExit, self.cmd.take_action, parsed_args) - - self.servers_mock.get.assert_called_with(self.server.id) - self.server.migrate.assert_called_with() - self.assertNotCalled(self.servers_mock.live_migrate) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.compute_client.reboot_server.assert_called_once_with( + servers[0].id, + 'SOFT', + ) + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + servers[0].id, + callback=mock.ANY, + ) -class TestServerPause(TestServer): +class TestServerPause(TestServerAction): def setUp(self): - super(TestServerPause, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.PauseServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'pause': None, - } - def test_server_pause_one_server(self): self.run_method_with_sdk_servers('pause_server', 1) @@ -5880,90 +6536,71 @@ def test_server_pause_multi_servers(self): class TestServerRebuild(TestServer): - def setUp(self): - super(TestServerRebuild, self).setUp() + super().setUp() - # Return value for utils.find_resource for image self.image = image_fakes.create_one_image() - self.get_image_mock.return_value = self.image + self.image_client.get_image.return_value = self.image - # Fake the rebuilt new server. attrs = { - 'image': { - 'id': self.image.id - }, - 'networks': {}, - 'adminPass': 'passw0rd', - } - new_server = compute_fakes.FakeServer.create_one_server(attrs=attrs) - - # Fake the server to be rebuilt. The IDs of them should be the same. - attrs['id'] = new_server.id - methods = { - 'rebuild': new_server, + 'status': 'ACTIVE', + 'image': {'id': self.image.id}, } - self.server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=methods - ) - - # Return value for utils.find_resource for server. - self.servers_mock.get.return_value = self.server + self.server = compute_fakes.create_one_server(attrs=attrs) + self.compute_client.find_server.return_value = self.server + self.compute_client.rebuild_server.return_value = self.server self.cmd = server.RebuildServer(self.app, None) def test_rebuild_with_image_name(self): image_name = 'my-custom-image' - user_image = image_fakes.create_one_image( - attrs={'name': image_name}) - self.find_image_mock.return_value = user_image - - attrs = { - 'image': { - 'id': user_image.id - }, - 'networks': {}, - 'adminPass': 'passw0rd', - } - new_server = compute_fakes.FakeServer.create_one_server(attrs=attrs) - self.server.rebuild.return_value = new_server + image = image_fakes.create_one_image(attrs={'name': image_name}) + self.image_client.find_image.return_value = image arglist = [ self.server.id, - '--image', image_name + '--image', + image_name, ] verifylist = [ ('server', self.server.id), - ('image', image_name) + ('image', image_name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test. + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.find_image_mock.assert_called_with( - image_name, ignore_missing=False) - self.get_image_mock.assert_called_with(user_image.id) - self.server.rebuild.assert_called_with(user_image, None) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_called_with( + image_name, ignore_missing=False + ) + self.image_client.get_image.assert_called_with(self.image.id) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, image + ) def test_rebuild_with_current_image(self): arglist = [ self.server.id, ] - verifylist = [ - ('server', self.server.id) - ] + verifylist = [('server', self.server.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # Get the command object to test. self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.find_image_mock.assert_not_called() - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with(self.image, None) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image + ) def test_rebuild_with_volume_backed_server_no_image(self): # the volume-backed server will have the image attribute set to an @@ -5976,32 +6613,38 @@ def test_rebuild_with_volume_backed_server_no_image(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn('The --image option is required', str(exc)) def test_rebuild_with_name(self): name = 'test-server-xxx' arglist = [ self.server.id, - '--name', name, + '--name', + name, ] verifylist = [ ('server', self.server.id), ('name', name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with(self.image, None, name=name) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, name=name + ) def test_rebuild_with_preserve_ephemeral(self): arglist = [ @@ -6012,15 +6655,20 @@ def test_rebuild_with_preserve_ephemeral(self): ('server', self.server.id), ('preserve_ephemeral', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, preserve_ephemeral=True) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, preserve_ephemeral=True + ) def test_rebuild_with_no_preserve_ephemeral(self): arglist = [ @@ -6036,74 +6684,138 @@ def test_rebuild_with_no_preserve_ephemeral(self): # Get the command object to test self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, preserve_ephemeral=False) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, preserve_ephemeral=False + ) def test_rebuild_with_password(self): password = 'password-xxx' - arglist = [ - self.server.id, - '--password', password - ] - verifylist = [ - ('server', self.server.id), - ('password', password) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + arglist = [self.server.id, '--password', password] + verifylist = [('server', self.server.id), ('password', password)] - # Get the command object to test + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with(self.image, password) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, + self.image, + admin_password=password, + ) def test_rebuild_with_description(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.19') + self.set_compute_api_version('2.19') + + description = 'description1' + arglist = [self.server.id, '--description', description] + verifylist = [('server', self.server.id), ('description', description)] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, description=description + ) + + def test_rebuild_with_description_pre_v219(self): + self.set_compute_api_version('2.18') description = 'description1' + arglist = [self.server.id, '--description', description] + verifylist = [('server', self.server.id), ('description', description)] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + @mock.patch.object(common_utils, 'wait_for_status', return_value=True) + def test_rebuild_with_wait_ok(self, mock_wait_for_status): arglist = [ + '--wait', self.server.id, - '--description', description ] verifylist = [ + ('wait', True), ('server', self.server.id), - ('description', description) ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with(self.image, None, - description=description) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image + ) - def test_rebuild_with_description_pre_v219(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.18') + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + callback=mock.ANY, + success_status=['active'], + ) - description = 'description1' + @mock.patch.object(common_utils, 'wait_for_status', return_value=False) + def test_rebuild_with_wait_fails(self, mock_wait_for_status): arglist = [ + '--wait', self.server.id, - '--description', description ] verifylist = [ + ('wait', True), ('server', self.server.id), - ('description', description) ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_called_once_with(self.image.id) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image + ) + + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + callback=mock.ANY, + success_status=['active'], + ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) - def test_rebuild_with_wait_ok(self, mock_wait_for_status): + def test_rebuild_with_wait_shutoff_status(self, mock_wait_for_status): + self.server.status = 'SHUTOFF' arglist = [ '--wait', self.server.id, @@ -6112,26 +6824,31 @@ def test_rebuild_with_wait_ok(self, mock_wait_for_status): ('wait', True), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test. + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - # kwargs = dict(success_status=['active', 'verify_resize'],) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image + ) mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, + self.compute_client.get_server, self.server.id, callback=mock.ANY, - # **kwargs + success_status=['shutoff'], ) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with(self.image, None) - - @mock.patch.object(common_utils, 'wait_for_status', return_value=False) - def test_rebuild_with_wait_fails(self, mock_wait_for_status): + @mock.patch.object(common_utils, 'wait_for_status', return_value=True) + def test_rebuild_with_wait_error_status(self, mock_wait_for_status): + self.server.status = 'ERROR' arglist = [ '--wait', self.server.id, @@ -6140,86 +6857,126 @@ def test_rebuild_with_wait_fails(self, mock_wait_for_status): ('wait', True), ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) - self.assertRaises(SystemExit, self.cmd.take_action, parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image + ) mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, + self.compute_client.get_server, self.server.id, callback=mock.ANY, + success_status=['active'], + ) + + def test_rebuild_wrong_status_fails(self): + self.server.status = 'SHELVED' + arglist = [ + self.server.id, + ] + verifylist = [ + ('server', self.server.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args ) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with(self.image, None) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_called_once_with(self.image.id) + self.compute_client.rebuild_server.assert_not_called() def test_rebuild_with_property(self): arglist = [ self.server.id, - '--property', 'key1=value1', - '--property', 'key2=value2' + '--property', + 'key1=value1', + '--property', + 'key2=value2', ] expected_properties = {'key1': 'value1', 'key2': 'value2'} verifylist = [ ('server', self.server.id), - ('properties', expected_properties) + ('properties', expected_properties), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, meta=expected_properties) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, metadata=expected_properties + ) def test_rebuild_with_keypair_name(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.54') + self.set_compute_api_version('2.54') self.server.key_name = 'mykey' arglist = [ self.server.id, - '--key-name', self.server.key_name, + '--key-name', + self.server.key_name, ] verifylist = [ ('server', self.server.id), - ('key_name', self.server.key_name) + ('key_name', self.server.key_name), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, key_name=self.server.key_name) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, key_name=self.server.key_name + ) def test_rebuild_with_keypair_name_pre_v254(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.53') + self.set_compute_api_version('2.53') self.server.key_name = 'mykey' arglist = [ self.server.id, - '--key-name', self.server.key_name, + '--key-name', + self.server.key_name, ] verifylist = [ ('server', self.server.id), - ('key_name', self.server.key_name) + ('key_name', self.server.key_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_rebuild_with_no_keypair_name(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.54') + self.set_compute_api_version('2.54') self.server.key_name = 'mykey' arglist = [ @@ -6229,85 +6986,98 @@ def test_rebuild_with_no_keypair_name(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, key_name=None) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, key_name=None + ) def test_rebuild_with_keypair_name_and_unset(self): self.server.key_name = 'mykey' arglist = [ self.server.id, - '--key-name', self.server.key_name, + '--key-name', + self.server.key_name, '--no-key-name', ] verifylist = [ ('server', self.server.id), - ('key_name', self.server.key_name) + ('key_name', self.server.key_name), ] self.assertRaises( - utils.ParserException, + test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - @mock.patch('openstackclient.compute.v2.server.io.open') - def test_rebuild_with_user_data(self, mock_open): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.57') + self.cmd, + arglist, + verifylist, + ) - mock_file = mock.Mock(name='File') - mock_open.return_value = mock_file - mock_open.read.return_value = '#!/bin/sh' + def test_rebuild_with_user_data(self): + self.set_compute_api_version('2.57') + user_data = b'#!/bin/sh' arglist = [ self.server.id, - '--user-data', 'userdata.sh', + '--user-data', + 'userdata.sh', ] verifylist = [ ('server', self.server.id), ('user_data', 'userdata.sh'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch( + 'openstackclient.compute.v2.server.open', + mock.mock_open(read_data=user_data), + ) as mock_file: + self.cmd.take_action(parsed_args) # Ensure the userdata file is opened - mock_open.assert_called_with('userdata.sh') - - # Ensure the userdata file is closed - mock_file.close.assert_called_with() + mock_file.assert_called_with('userdata.sh', 'rb') - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, - userdata=mock_file,) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, + self.image, + user_data=base64.b64encode(user_data).decode('utf-8'), + ) def test_rebuild_with_user_data_pre_v257(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.56') + self.set_compute_api_version('2.56') arglist = [ self.server.id, - '--user-data', 'userdata.sh', + '--user-data', + 'userdata.sh', ] verifylist = [ ('server', self.server.id), ('user_data', 'userdata.sh'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_rebuild_with_no_user_data(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.54') + self.set_compute_api_version('2.54') self.server.key_name = 'mykey' arglist = [ @@ -6318,17 +7088,23 @@ def test_rebuild_with_no_user_data(self): ('server', self.server.id), ('no_user_data', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, userdata=None) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, user_data=None + ) def test_rebuild_with_no_user_data_pre_v254(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.53') + self.set_compute_api_version('2.53') arglist = [ self.server.id, @@ -6338,69 +7114,78 @@ def test_rebuild_with_no_user_data_pre_v254(self): ('server', self.server.id), ('no_user_data', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_rebuild_with_user_data_and_unset(self): arglist = [ self.server.id, - '--user-data', 'userdata.sh', + '--user-data', + 'userdata.sh', '--no-user-data', ] self.assertRaises( - utils.ParserException, + test_utils.ParserException, self.check_parser, - self.cmd, arglist, None) + self.cmd, + arglist, + None, + ) def test_rebuild_with_trusted_image_cert(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.63') + self.set_compute_api_version('2.63') arglist = [ self.server.id, - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', ] verifylist = [ ('server', self.server.id), ('trusted_image_certs', ['foo', 'bar']), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, trusted_image_certificates=['foo', 'bar']) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, trusted_image_certificates=['foo', 'bar'] + ) def test_rebuild_with_trusted_image_cert_pre_v263(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.62') + self.set_compute_api_version('2.62') arglist = [ self.server.id, - '--trusted-image-cert', 'foo', - '--trusted-image-cert', 'bar', + '--trusted-image-cert', + 'foo', + '--trusted-image-cert', + 'bar', ] verifylist = [ ('server', self.server.id), ('trusted_image_certs', ['foo', 'bar']), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_rebuild_with_no_trusted_image_cert(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.63') + self.set_compute_api_version('2.63') arglist = [ self.server.id, @@ -6410,17 +7195,23 @@ def test_rebuild_with_no_trusted_image_cert(self): ('server', self.server.id), ('no_trusted_image_certs', True), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, trusted_image_certificates=None) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, trusted_image_certificates=None + ) def test_rebuild_with_no_trusted_image_cert_pre_v263(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.62') + self.set_compute_api_version('2.62') arglist = [ self.server.id, @@ -6433,189 +7224,176 @@ def test_rebuild_with_no_trusted_image_cert_pre_v263(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_rebuild_with_hostname(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.90') + self.set_compute_api_version('2.90') arglist = [ self.server.id, - '--hostname', 'new-hostname' + '--hostname', + 'new-hostname', ] verifylist = [ ('server', self.server.id), - ('hostname', 'new-hostname') + ('hostname', 'new-hostname'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.get_image_mock.assert_called_with(self.image.id) - self.server.rebuild.assert_called_with( - self.image, None, hostname='new-hostname') + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_not_called() + self.image_client.get_image.assert_has_calls( + [mock.call(self.image.id), mock.call(self.image.id)] + ) + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.image, hostname='new-hostname' + ) def test_rebuild_with_hostname_pre_v290(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.89') + self.set_compute_api_version('2.89') arglist = [ self.server.id, - '--hostname', 'new-hostname', - ] - verifylist = [ - ('server', self.server.id), - ('hostname', 'new-hostname') + '--hostname', + 'new-hostname', ] + verifylist = [('server', self.server.id), ('hostname', 'new-hostname')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestServerRebuildVolumeBacked(TestServer): - def setUp(self): super().setUp() self.new_image = image_fakes.create_one_image() - self.find_image_mock.return_value = self.new_image + self.image_client.find_image.return_value = self.new_image attrs = { + 'status': 'ACTIVE', 'image': '', - 'networks': {}, - 'adminPass': 'passw0rd', - } - new_server = compute_fakes.FakeServer.create_one_server(attrs=attrs) - - # Fake the server to be rebuilt. The IDs of them should be the same. - attrs['id'] = new_server.id - methods = { - 'rebuild': new_server, } - self.server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=methods - ) - - # Return value for utils.find_resource for server. - self.servers_mock.get.return_value = self.server + self.server = compute_fakes.create_one_server(attrs=attrs) + self.compute_client.find_server.return_value = self.server + self.compute_client.rebuild_server.return_value = self.server self.cmd = server.RebuildServer(self.app, None) def test_rebuild_with_reimage_boot_volume(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.93') + self.set_compute_api_version('2.93') arglist = [ self.server.id, '--reimage-boot-volume', - '--image', self.new_image.id + '--image', + self.new_image.id, ] verifylist = [ ('server', self.server.id), ('reimage_boot_volume', True), - ('image', self.new_image.id) + ('image', self.new_image.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.rebuild.assert_called_with( - self.new_image, None) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.image_client.find_image.assert_called_with( + self.new_image.id, ignore_missing=False + ) + self.image_client.get_image.assert_not_called() + self.compute_client.rebuild_server.assert_called_once_with( + self.server, self.new_image + ) def test_rebuild_with_no_reimage_boot_volume(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.93') + self.set_compute_api_version('2.93') arglist = [ self.server.id, '--no-reimage-boot-volume', - '--image', self.new_image.id + '--image', + self.new_image.id, ] verifylist = [ ('server', self.server.id), ('reimage_boot_volume', False), - ('image', self.new_image.id) + ('image', self.new_image.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn('--reimage-boot-volume is required', str(exc)) def test_rebuild_with_reimage_boot_volume_pre_v293(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.92') + self.set_compute_api_version('2.92') arglist = [ self.server.id, '--reimage-boot-volume', - '--image', self.new_image.id + '--image', + self.new_image.id, ] verifylist = [ ('server', self.server.id), - ('reimage_boot_volume', True) + ('reimage_boot_volume', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.93 or greater is required', str(exc)) - + '--os-compute-api-version 2.93 or greater is required', str(exc) + ) -class TestEvacuateServer(TestServer): +class TestServerEvacuate(TestServer): def setUp(self): - super(TestEvacuateServer, self).setUp() - # Return value for utils.find_resource for image + super().setUp() + self.image = image_fakes.create_one_image() - self.images_mock.get.return_value = self.image + self.image_client.get_image.return_value = self.image - # Fake the rebuilt new server. attrs = { - 'image': { - 'id': self.image.id - }, + 'image': self.image, 'networks': {}, 'adminPass': 'passw0rd', + 'status': 'ACTIVE', } - new_server = compute_fakes.FakeServer.create_one_server(attrs=attrs) - - # Fake the server to be rebuilt. The IDs of them should be the same. - attrs['id'] = new_server.id - methods = { - 'evacuate': new_server, - } - self.server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=methods - ) + self.server = compute_fakes.create_one_server(attrs=attrs) + attrs['id'] = self.server.id + self.new_server = compute_fakes.create_one_server(attrs=attrs) # Return value for utils.find_resource for server. - self.servers_mock.get.return_value = self.server + self.compute_client.find_server.return_value = self.server + self.compute_client.get_server.return_value = self.server self.cmd = server.EvacuateServer(self.app, None) def _test_evacuate(self, args, verify_args, evac_args): parsed_args = self.check_parser(self.cmd, args, verify_args) - - # Get the command object to test self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.evacuate.assert_called_with(**evac_args) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.evacuate_server.assert_called_once_with( + self.server, **evac_args + ) + self.compute_client.get_server.assert_called_once_with(self.server.id) def test_evacuate(self): args = [ @@ -6625,48 +7403,53 @@ def test_evacuate(self): ('server', self.server.id), ] evac_args = { - 'host': None, 'on_shared_storage': False, 'password': None, + 'host': None, + 'on_shared_storage': False, + 'admin_pass': None, } self._test_evacuate(args, verify_args, evac_args) def test_evacuate_with_password(self): args = [ self.server.id, - '--password', 'password', + '--password', + 'password', ] verify_args = [ ('server', self.server.id), ('password', 'password'), ] evac_args = { - 'host': None, 'on_shared_storage': False, 'password': 'password', + 'host': None, + 'on_shared_storage': False, + 'admin_pass': 'password', } self._test_evacuate(args, verify_args, evac_args) def test_evacuate_with_host(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.29') + self.set_compute_api_version('2.29') host = 'target-host' args = [ self.server.id, - '--host', 'target-host', + '--host', + 'target-host', ] verify_args = [ ('server', self.server.id), ('host', 'target-host'), ] - evac_args = {'host': host, 'password': None} + evac_args = {'host': host, 'admin_pass': None} self._test_evacuate(args, verify_args, evac_args) def test_evacuate_with_host_pre_v229(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.28') + self.set_compute_api_version('2.28') args = [ self.server.id, - '--host', 'target-host', + '--host', + 'target-host', ] verify_args = [ ('server', self.server.id), @@ -6675,35 +7458,28 @@ def test_evacuate_with_host_pre_v229(self): parsed_args = self.check_parser(self.cmd, args, verify_args) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_evacuate_without_share_storage(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.13') + self.set_compute_api_version('2.13') - args = [ - self.server.id, - '--shared-storage' - ] + args = [self.server.id, '--shared-storage'] verify_args = [ ('server', self.server.id), ('shared_storage', True), ] evac_args = { - 'host': None, 'on_shared_storage': True, 'password': None, + 'host': None, + 'on_shared_storage': True, + 'admin_pass': None, } self._test_evacuate(args, verify_args, evac_args) def test_evacuate_without_share_storage_post_v213(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.14') + self.set_compute_api_version('2.14') - args = [ - self.server.id, - '--shared-storage' - ] + args = [self.server.id, '--shared-storage'] verify_args = [ ('server', self.server.id), ('shared_storage', True), @@ -6711,9 +7487,8 @@ def test_evacuate_without_share_storage_post_v213(self): parsed_args = self.check_parser(self.cmd, args, verify_args) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_evacuate_with_wait_ok(self, mock_wait_for_status): @@ -6726,189 +7501,202 @@ def test_evacuate_with_wait_ok(self, mock_wait_for_status): ('wait', True), ] evac_args = { - 'host': None, 'on_shared_storage': False, 'password': None, + 'host': None, + 'on_shared_storage': False, + 'admin_pass': None, } self._test_evacuate(args, verify_args, evac_args) mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, + self.compute_client.get_server, self.server.id, + success_status=['ACTIVE'], callback=mock.ANY, ) + @mock.patch.object(common_utils, 'wait_for_status', return_value=True) + def test_evacuate_with_wait_ok_shutoff(self, mock_wait_for_status): + self.server.status = 'SHUTOFF' + self.compute_client.get_server.return_value = self.server + + args = [ + self.server.id, + '--wait', + ] + verify_args = [ + ('server', self.server.id), + ('wait', True), + ] + evac_args = { + 'host': None, + 'on_shared_storage': False, + 'admin_pass': None, + } + self._test_evacuate(args, verify_args, evac_args) + mock_wait_for_status.assert_called_once_with( + self.compute_client.get_server, + self.server.id, + success_status=['ACTIVE', 'SHUTOFF'], + callback=mock.ANY, + ) -class TestServerRemoveFixedIP(TestServer): +class TestServerRemoveFixedIP(compute_fakes.TestComputev2): def setUp(self): - super(TestServerRemoveFixedIP, self).setUp() + super().setUp() + + self.server = compute_fakes.create_one_server() # Get the command object to test self.cmd = server.RemoveFixedIP(self.app, None) - # Set unshelve method to be tested. - self.methods = { - 'remove_fixed_ip': None, - } - def test_server_remove_fixed_ip(self): - servers = self.setup_servers_mock(count=1) - arglist = [ - servers[0].id, + self.server.id, '1.2.3.4', ] verifylist = [ - ('server', servers[0].id), + ('server', self.server.id), ('ip_address', '1.2.3.4'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - servers[0].remove_fixed_ip.assert_called_once_with('1.2.3.4') + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.remove_fixed_ip_from_server(self.server, '1.2.3.4') self.assertIsNone(result) -class TestServerRescue(TestServer): - +class TestServerRescue(compute_fakes.TestComputev2): def setUp(self): - super(TestServerRescue, self).setUp() - - # Return value for utils.find_resource for image - self.image = image_fakes.create_one_image() - self.get_image_mock.return_value = self.image - - new_server = compute_fakes.FakeServer.create_one_server() - attrs = { - 'id': new_server.id, - 'image': { - 'id': self.image.id, - }, - 'networks': {}, - 'adminPass': 'passw0rd', - } - methods = { - 'rescue': new_server, - } - self.server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=methods, - ) + super().setUp() - # Return value for utils.find_resource for server - self.servers_mock.get.return_value = self.server + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server self.cmd = server.RescueServer(self.app, None) - def test_rescue_with_current_image(self): + def test_rescue(self): arglist = [ self.server.id, ] verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.rescue.assert_called_with(image=None, password=None) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.rescue_server.assert_called_once_with( + self.server, admin_pass=None, image_ref=None + ) + self.assertIsNone(result) - def test_rescue_with_new_image(self): + def test_rescue_with_image(self): new_image = image_fakes.create_one_image() - self.find_image_mock.return_value = new_image + self.image_client.find_image.return_value = new_image arglist = [ - '--image', new_image.id, + '--image', + new_image.id, self.server.id, ] verifylist = [ ('image', new_image.id), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.find_image_mock.assert_called_with(new_image.id) - self.server.rescue.assert_called_with(image=new_image, password=None) + self.image_client.find_image.assert_called_with( + new_image.id, ignore_missing=False + ) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.rescue_server.assert_called_once_with( + self.server, admin_pass=None, image_ref=new_image.id + ) + self.assertIsNone(result) - def test_rescue_with_current_image_and_password(self): + def test_rescue_with_password(self): password = 'password-xxx' arglist = [ - '--password', password, + '--password', + password, self.server.id, ] verifylist = [ ('password', password), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # Get the command object to test - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.rescue.assert_called_with(image=None, password=password) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.rescue_server.assert_called_once_with( + self.server, admin_pass=password, image_ref=None + ) + self.assertIsNone(result) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_remove' -) class TestServerRemoveFloatingIPCompute(compute_fakes.TestComputev2): - def setUp(self): - super(TestServerRemoveFloatingIPCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server - # Get the command object to test self.cmd = server.RemoveFloatingIP(self.app, None) - def test_server_remove_floating_ip(self, fip_mock): - _floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip() - + def test_server_remove_floating_ip(self): arglist = [ - 'server1', - _floating_ip['ip'], + self.server.name, + '1.2.3.4', ] verifylist = [ - ('server', 'server1'), - ('ip_address', _floating_ip['ip']), + ('server', self.server.name), + ('ip_address', '1.2.3.4'), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - fip_mock.assert_called_once_with( - 'server1', - _floating_ip['ip'], + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False + ) + self.compute_client.remove_floating_ip_from_server.assert_called_once_with( + self.server, '1.2.3.4' ) class TestServerRemoveFloatingIPNetwork(network_fakes.TestNetworkV2): - def setUp(self): - super(TestServerRemoveFloatingIPNetwork, self).setUp() + super().setUp() - self.app.client_manager.network = mock.Mock() - self.network = self.app.client_manager.network - self.network.update_ip = mock.Mock(return_value=None) + self.network_client.update_ip.return_value = None # Get the command object to test - self.cmd = server.RemoveFloatingIP(self.app, self.namespace) + self.cmd = server.RemoveFloatingIP(self.app, None) def test_server_remove_floating_ip_default(self): - _server = compute_fakes.FakeServer.create_one_server() _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - self.network.find_ip = mock.Mock(return_value=_floating_ip) + self.network_client.find_ip.return_value = _floating_ip arglist = [ - _server.id, + 'fake_server', _floating_ip['ip'], ] verifylist = [ - ('server', _server.id), + ('server', 'fake_server'), ('ip_address', _floating_ip['ip']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -6919,32 +7707,22 @@ def test_server_remove_floating_ip_default(self): 'port_id': None, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( _floating_ip['ip'], ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - _floating_ip, - **attrs + self.network_client.update_ip.assert_called_once_with( + _floating_ip, **attrs ) class TestServerRemovePort(TestServer): - def setUp(self): - super(TestServerRemovePort, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.RemovePort(self.app, None) - # Set method to be tested. - self.methods = { - 'delete_server_interface': None, - } - - self.find_port = mock.Mock() - self.app.client_manager.network.find_port = self.find_port - def _test_server_remove_port(self, port_id): servers = self.setup_sdk_servers_mock(count=1) port = 'fake-port' @@ -6961,43 +7739,38 @@ def _test_server_remove_port(self, port_id): result = self.cmd.take_action(parsed_args) - self.sdk_client.delete_server_interface.assert_called_with( - port_id, server=servers[0], ignore_missing=False) + self.compute_client.delete_server_interface.assert_called_with( + port_id, server=servers[0], ignore_missing=False + ) self.assertIsNone(result) def test_server_remove_port(self): - self._test_server_remove_port(self.find_port.return_value.id) - self.find_port.assert_called_once_with( - 'fake-port', ignore_missing=False) + self._test_server_remove_port( + self.network_client.find_port.return_value.id + ) + self.network_client.find_port.assert_called_once_with( + 'fake-port', ignore_missing=False + ) def test_server_remove_port_no_neutron(self): self.app.client_manager.network_endpoint_enabled = False self._test_server_remove_port('fake-port') - self.find_port.assert_not_called() + self.network_client.find_port.assert_not_called() class TestServerRemoveNetwork(TestServer): - def setUp(self): - super(TestServerRemoveNetwork, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.RemoveNetwork(self.app, None) - # Set method to be tested. - self.fake_inf = mock.Mock() - self.methods = { - 'server_interfaces': [self.fake_inf], - 'delete_server_interface': None, - } - - self.find_network = mock.Mock() - self.app.client_manager.network.find_network = self.find_network - self.sdk_client.server_interfaces.return_value = [self.fake_inf] + self.fake_if = mock.Mock() + self.compute_client.server_interfaces.return_value = [self.fake_if] def _test_server_remove_network(self, network_id): - self.fake_inf.net_id = network_id - self.fake_inf.port_id = 'fake-port' + self.fake_if.net_id = network_id + self.fake_if.port_id = 'fake-port' servers = self.setup_sdk_servers_mock(count=1) network = 'fake-network' @@ -7013,90 +7786,102 @@ def _test_server_remove_network(self, network_id): result = self.cmd.take_action(parsed_args) - self.sdk_client.server_interfaces.assert_called_once_with(servers[0]) - self.sdk_client.delete_server_interface.assert_called_once_with( - 'fake-port', server=servers[0]) + self.compute_client.server_interfaces.assert_called_once_with( + servers[0] + ) + self.compute_client.delete_server_interface.assert_called_once_with( + 'fake-port', server=servers[0] + ) self.assertIsNone(result) def test_server_remove_network(self): - self._test_server_remove_network(self.find_network.return_value.id) - self.find_network.assert_called_once_with( - 'fake-network', ignore_missing=False) + self._test_server_remove_network( + self.network_client.find_network.return_value.id + ) + self.network_client.find_network.assert_called_once_with( + 'fake-network', ignore_missing=False + ) def test_server_remove_network_no_neutron(self): self.app.client_manager.network_endpoint_enabled = False self._test_server_remove_network('fake-network') - self.find_network.assert_not_called() + self.network_client.find_network.assert_not_called() -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_find' -) class TestServerRemoveSecurityGroup(TestServer): - def setUp(self): - super(TestServerRemoveSecurityGroup, self).setUp() - - self.security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group() - - attrs = { - 'security_groups': [{'name': self.security_group['id']}] - } - methods = { - 'remove_security_group': None, - } + super().setUp() - self.server = compute_fakes.FakeServer.create_one_server( - attrs=attrs, - methods=methods + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.remove_security_group_from_server.return_value = ( + None ) - # This is the return value for utils.find_resource() for server - self.servers_mock.get.return_value = self.server # Get the command object to test self.cmd = server.RemoveServerSecurityGroup(self.app, None) - def test_server_remove_security_group(self, sg_find_mock): - sg_find_mock.return_value = self.security_group - arglist = [ - self.server.id, - self.security_group['id'] - ] + def test_server_remove_security_group__nova_network(self): + arglist = [self.server.id, 'fake_sg'] verifylist = [ ('server', self.server.id), - ('group', self.security_group['id']), + ('security_groups', ['fake_sg']), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - sg_find_mock.assert_called_with( - self.security_group['id'], + with mock.patch.object( + self.app.client_manager, + 'is_network_endpoint_enabled', + return_value=False, + ): + with mock.patch.object( + compute_v2, + 'find_security_group', + return_value={'name': 'fake_sg'}, + ) as mock_find_nova_net_sg: + result = self.cmd.take_action(parsed_args) + + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False ) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.remove_security_group.assert_called_with( - self.security_group['id'], + self.compute_client.remove_security_group_from_server.assert_called_once_with( + self.server, {'name': 'fake_sg'} + ) + mock_find_nova_net_sg.assert_called_once_with( + self.compute_client, 'fake_sg' ) self.assertIsNone(result) + def test_server_remove_security_group(self): + arglist = [self.server.id, 'fake_sg'] + verifylist = [ + ('server', self.server.id), + ('security_groups', ['fake_sg']), + ] -class TestServerResize(TestServer): - - def setUp(self): - super(TestServerResize, self).setUp() + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.server = compute_fakes.FakeServer.create_one_server() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.remove_security_group_from_server.assert_called_once_with( + self.server, {'name': 'fake_sg'} + ) + self.assertIsNone(result) - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server - self.servers_mock.resize.return_value = None - self.servers_mock.confirm_resize.return_value = None - self.servers_mock.revert_resize.return_value = None +class TestServerResize(compute_fakes.TestComputev2): + def setUp(self): + super().setUp() - # This is the return value for utils.find_resource() - self.flavors_get_return_value = \ - compute_fakes.FakeFlavor.create_one_flavor() - self.flavors_mock.get.return_value = self.flavors_get_return_value + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.flavor = compute_fakes.create_one_flavor() + self.compute_client.find_flavor.return_value = self.flavor + self.compute_client.resize_server.return_value = None + self.compute_client.revert_server_resize.return_value = None + self.compute_client.confirm_server_resize.return_value = None # Get the command object to test self.cmd = server.ResizeServer(self.app, None) @@ -7110,42 +7895,44 @@ def test_server_resize_no_options(self): ('revert', False), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - - self.assertNotCalled(self.servers_mock.resize) - self.assertNotCalled(self.servers_mock.confirm_resize) - self.assertNotCalled(self.servers_mock.revert_resize) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.find_flavor.assert_not_called() + self.compute_client.resize_server.assert_not_called() self.assertIsNone(result) def test_server_resize(self): arglist = [ - '--flavor', self.flavors_get_return_value.id, + '--flavor', + self.flavor.id, self.server.id, ] verifylist = [ - ('flavor', self.flavors_get_return_value.id), + ('flavor', self.flavor.id), ('confirm', False), ('revert', False), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.flavors_mock.get.assert_called_with( - self.flavors_get_return_value.id, + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False ) - self.servers_mock.resize.assert_called_with( - self.server, - self.flavors_get_return_value, + self.compute_client.find_flavor.assert_called_once_with( + self.flavor.id, ignore_missing=False + ) + self.compute_client.resize_server.assert_called_once_with( + self.server, self.flavor ) - self.assertNotCalled(self.servers_mock.confirm_resize) - self.assertNotCalled(self.servers_mock.revert_resize) + self.compute_client.confirm_server_resize.assert_not_called() + self.compute_client.revert_server_resize.assert_not_called() self.assertIsNone(result) def test_server_resize_confirm(self): @@ -7158,20 +7945,28 @@ def test_server_resize_confirm(self): ('revert', False), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object(self.cmd.log, 'warning') as mock_warning: result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.assertNotCalled(self.servers_mock.resize) - self.servers_mock.confirm_resize.assert_called_with(self.server) - self.assertNotCalled(self.servers_mock.revert_resize) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.find_flavor.assert_not_called() + self.compute_client.resize_server.assert_not_called() + self.compute_client.confirm_server_resize.assert_called_once_with( + self.server + ) + self.compute_client.revert_server_resize.assert_not_called() self.assertIsNone(result) + # A warning should have been logged for using --confirm. mock_warning.assert_called_once() - self.assertIn('The --confirm option has been deprecated.', - str(mock_warning.call_args[0][0])) + self.assertIn( + 'The --confirm option has been deprecated.', + str(mock_warning.call_args[0][0]), + ) def test_server_resize_revert(self): arglist = [ @@ -7183,115 +7978,114 @@ def test_server_resize_revert(self): ('revert', True), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object(self.cmd.log, 'warning') as mock_warning: result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.assertNotCalled(self.servers_mock.resize) - self.assertNotCalled(self.servers_mock.confirm_resize) - self.servers_mock.revert_resize.assert_called_with(self.server) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.find_flavor.assert_not_called() + self.compute_client.resize_server.assert_not_called() + self.compute_client.confirm_server_resize.assert_not_called() + self.compute_client.revert_server_resize.assert_called_once_with( + self.server + ) self.assertIsNone(result) # A warning should have been logged for using --revert. mock_warning.assert_called_once() - self.assertIn('The --revert option has been deprecated.', - str(mock_warning.call_args[0][0])) + self.assertIn( + 'The --revert option has been deprecated.', + str(mock_warning.call_args[0][0]), + ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_server_resize_with_wait_ok(self, mock_wait_for_status): - arglist = [ - '--flavor', self.flavors_get_return_value.id, + '--flavor', + self.flavor.id, '--wait', self.server.id, ] - verifylist = [ - ('flavor', self.flavors_get_return_value.id), + ('flavor', self.flavor.id), ('confirm', False), ('revert', False), ('wait', True), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with( - self.server.id, + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False ) - - kwargs = dict(success_status=['active', 'verify_resize'],) + self.compute_client.find_flavor.assert_called_once_with( + self.flavor.id, ignore_missing=False + ) + self.compute_client.resize_server.assert_called_once_with( + self.server, self.flavor + ) + self.compute_client.confirm_server_resize.assert_not_called() + self.compute_client.revert_server_resize.assert_not_called() mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, + self.compute_client.get_server, self.server.id, + success_status=('active', 'verify_resize'), callback=mock.ANY, - **kwargs ) - self.servers_mock.resize.assert_called_with( - self.server, - self.flavors_get_return_value - ) - self.assertNotCalled(self.servers_mock.confirm_resize) - self.assertNotCalled(self.servers_mock.revert_resize) - @mock.patch.object(common_utils, 'wait_for_status', return_value=False) def test_server_resize_with_wait_fails(self, mock_wait_for_status): - arglist = [ - '--flavor', self.flavors_get_return_value.id, + '--flavor', + self.flavor.id, '--wait', self.server.id, ] - verifylist = [ - ('flavor', self.flavors_get_return_value.id), + ('flavor', self.flavor.id), ('confirm', False), ('revert', False), ('wait', True), ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(SystemExit, self.cmd.take_action, parsed_args) - self.servers_mock.get.assert_called_with( - self.server.id, + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args ) - kwargs = dict(success_status=['active', 'verify_resize'],) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.find_flavor.assert_called_once_with( + self.flavor.id, ignore_missing=False + ) + self.compute_client.resize_server.assert_called_once_with( + self.server, self.flavor + ) + self.compute_client.confirm_server_resize.assert_not_called() + self.compute_client.revert_server_resize.assert_not_called() mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, + self.compute_client.get_server, self.server.id, + success_status=('active', 'verify_resize'), callback=mock.ANY, - **kwargs - ) - - self.servers_mock.resize.assert_called_with( - self.server, - self.flavors_get_return_value ) -class TestServerResizeConfirm(TestServer): - +class TestServerResizeConfirm(compute_fakes.TestComputev2): def setUp(self): - super(TestServerResizeConfirm, self).setUp() - - methods = { - 'confirm_resize': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server + super().setUp() - self.servers_mock.confirm_resize.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.confirm_server_resize.return_value = None # Get the command object to test self.cmd = server.ResizeConfirm(self.app, None) @@ -7303,30 +8097,27 @@ def test_resize_confirm(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.confirm_resize.assert_called_with() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.confirm_server_resize.assert_called_once_with( + self.server + ) + self.assertIsNone(result) # TODO(stephenfin): Remove in OSC 7.0 -class TestServerMigrateConfirm(TestServer): - +class TestServerMigrateConfirm(compute_fakes.TestComputev2): def setUp(self): super().setUp() - methods = { - 'confirm_resize': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server - - self.servers_mock.confirm_resize.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.confirm_server_resize.return_value = None # Get the command object to test self.cmd = server.MigrateConfirm(self.app, None) @@ -7341,33 +8132,30 @@ def test_migrate_confirm(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object(self.cmd.log, 'warning') as mock_warning: - self.cmd.take_action(parsed_args) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.confirm_resize.assert_called_with() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.confirm_server_resize.assert_called_once_with( + self.server + ) + self.assertIsNone(result) mock_warning.assert_called_once() self.assertIn( "The 'server migrate confirm' command has been deprecated", - str(mock_warning.call_args[0][0]) + str(mock_warning.call_args[0][0]), ) -class TestServerConfirmMigration(TestServerResizeConfirm): - +class TestServerConfirmMigration(compute_fakes.TestComputev2): def setUp(self): super().setUp() - methods = { - 'confirm_resize': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server - - self.servers_mock.confirm_resize.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.confirm_server_resize.return_value = None # Get the command object to test self.cmd = server.ConfirmMigration(self.app, None) @@ -7379,29 +8167,26 @@ def test_migration_confirm(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.confirm_resize.assert_called_with() + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.confirm_server_resize.assert_called_once_with( + self.server + ) + self.assertIsNone(result) -class TestServerResizeRevert(TestServer): +class TestServerResizeRevert(compute_fakes.TestComputev2): def setUp(self): - super(TestServerResizeRevert, self).setUp() - - methods = { - 'revert_resize': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server + super().setUp() - self.servers_mock.revert_resize.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.revert_server_resize.return_value = None # Get the command object to test self.cmd = server.ResizeRevert(self.app, None) @@ -7413,30 +8198,27 @@ def test_resize_revert(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.revert_resize.assert_called_with() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.revert_server_resize.assert_called_once_with( + self.server + ) + self.assertIsNone(result) # TODO(stephenfin): Remove in OSC 7.0 -class TestServerMigrateRevert(TestServer): - +class TestServerMigrateRevert(compute_fakes.TestComputev2): def setUp(self): super().setUp() - methods = { - 'revert_resize': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server - - self.servers_mock.revert_resize.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.revert_server_resize.return_value = None # Get the command object to test self.cmd = server.MigrateRevert(self.app, None) @@ -7448,36 +8230,33 @@ def test_migrate_revert(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object(self.cmd.log, 'warning') as mock_warning: - self.cmd.take_action(parsed_args) + result = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.revert_resize.assert_called_with() + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.revert_server_resize.assert_called_once_with( + self.server + ) + self.assertIsNone(result) mock_warning.assert_called_once() self.assertIn( "The 'server migrate revert' command has been deprecated", - str(mock_warning.call_args[0][0]) + str(mock_warning.call_args[0][0]), ) -class TestServerRevertMigration(TestServer): - +class TestServerRevertMigration(compute_fakes.TestComputev2): def setUp(self): super().setUp() - methods = { - 'revert_resize': None, - } - self.server = compute_fakes.FakeServer.create_one_server( - methods=methods) - - # This is the return value for utils.find_resource() - self.servers_mock.get.return_value = self.server - - self.servers_mock.revert_resize.return_value = None + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + self.compute_client.revert_server_resize.return_value = None # Get the command object to test self.cmd = server.RevertMigration(self.app, None) @@ -7489,47 +8268,40 @@ def test_migration_revert(self): verifylist = [ ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(self.server.id) - self.server.revert_resize.assert_called_with() + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.revert_server_resize.assert_called_once_with( + self.server + ) + self.assertIsNone(result) -class TestServerRestore(TestServer): +class TestServerRestore(TestServerAction): def setUp(self): - super(TestServerRestore, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.RestoreServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'restore': None, - } - def test_server_restore_one_server(self): - self.run_method_with_servers('restore', 1) + self.run_method_with_sdk_servers('restore_server', 1) def test_server_restore_multi_servers(self): - self.run_method_with_servers('restore', 3) - + self.run_method_with_sdk_servers('restore_server', 3) -class TestServerResume(TestServer): +class TestServerResume(TestServerAction): def setUp(self): - super(TestServerResume, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.ResumeServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'resume': None, - } - def test_server_resume_one_server(self): self.run_method_with_sdk_servers('resume_server', 1) @@ -7538,288 +8310,405 @@ def test_server_resume_multi_servers(self): class TestServerSet(TestServer): - def setUp(self): - super(TestServerSet, self).setUp() - - self.attrs = { - 'api_version': None, - } - - self.methods = { - 'update': None, - 'reset_state': None, - 'change_password': None, - 'clear_password': None, - 'add_tag': None, - 'set_tags': None, - } + super().setUp() - self.fake_servers = self.setup_servers_mock(2) + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server # Get the command object to test self.cmd = server.SetServer(self.app, None) def test_server_set_no_option(self): + arglist = [self.server.id] + verifylist = [('server', self.server.id)] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() + self.assertIsNone(result) + + def test_server_set_with_state(self): arglist = [ - 'foo_vm' + '--state', + 'active', + '--auto-approve', + self.server.id, ] verifylist = [ - ('server', 'foo_vm') + ('state', 'active'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertNotCalled(self.fake_servers[0].update) - self.assertNotCalled(self.fake_servers[0].reset_state) - self.assertNotCalled(self.fake_servers[0].change_password) - self.assertNotCalled(self.servers_mock.set_meta) + + self.compute_client.reset_server_state.assert_called_once_with( + self.server, state='active' + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() self.assertIsNone(result) - def test_server_set_with_state(self): - for index, state in enumerate(['active', 'error']): - arglist = [ - '--state', state, - 'foo_vm', - ] - verifylist = [ - ('state', state), - ('server', 'foo_vm'), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + def test_server_set_with_state_prompt_y(self): + arglist = [ + '--state', + 'active', + self.server.id, + ] + verifylist = [ + ('state', 'active'), + ('server', self.server.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch('getpass.getpass', return_value='y'): result = self.cmd.take_action(parsed_args) - self.fake_servers[index].reset_state.assert_called_once_with( - state=state) - self.assertIsNone(result) + + self.compute_client.reset_server_state.assert_called_once_with( + self.server, state='active' + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() + self.assertIsNone(result) + + def test_server_set_with_state_prompt_n(self): + arglist = [ + '--state', + 'active', + self.server.id, + ] + verifylist = [ + ('state', 'active'), + ('server', self.server.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + with mock.patch('getpass.getpass', return_value='n'): + result = self.cmd.take_action(parsed_args) + + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() + self.assertIsNone(result) def test_server_set_with_invalid_state(self): arglist = [ - '--state', 'foo_state', - 'foo_vm', + '--state', + 'foo_state', + self.server.id, ] verifylist = [ ('state', 'foo_state'), - ('server', 'foo_vm'), + ('server', self.server.id), ] - self.assertRaises(utils.ParserException, - self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_server_set_with_name(self): arglist = [ - '--name', 'foo_name', - 'foo_vm', + '--name', + 'foo_name', + self.server.id, ] verifylist = [ ('name', 'foo_name'), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.fake_servers[0].update.assert_called_once_with(name='foo_name') + + self.compute_client.update_server.assert_called_once_with( + self.server, name='foo_name' + ) + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() self.assertIsNone(result) def test_server_set_with_property(self): arglist = [ - '--property', 'key1=value1', - '--property', 'key2=value2', - 'foo_vm', + '--property', + 'key1=value1', + '--property', + 'key2=value2', + self.server.id, ] verifylist = [ ('properties', {'key1': 'value1', 'key2': 'value2'}), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.set_meta.assert_called_once_with( - self.fake_servers[0], parsed_args.properties) + + self.compute_client.set_server_metadata.assert_called_once_with( + self.server, key1='value1', key2='value2' + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() self.assertIsNone(result) def test_server_set_with_password(self): arglist = [ - '--password', 'foo', - 'foo_vm', + '--password', + 'foo', + self.server.id, ] verifylist = [ ('password', 'foo'), - ('server', 'foo_vm'), + ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.fake_servers[0].change_password.assert_called_once_with('foo') + self.compute_client.change_server_password.assert_called_once_with( + self.server, 'foo' + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() + self.assertIsNone(result) def test_server_set_with_no_password(self): arglist = [ '--no-password', - 'foo_vm', + self.server.id, ] verifylist = [ ('no_password', True), - ('server', 'foo_vm'), + ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - self.fake_servers[0].clear_password.assert_called_once_with() + self.compute_client.clear_server_password.assert_called_once_with( + self.server + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() + self.assertIsNone(result) # TODO(stephenfin): Remove this in a future major version - @mock.patch.object(getpass, 'getpass', - return_value=mock.sentinel.fake_pass) + @mock.patch.object( + getpass, 'getpass', return_value=mock.sentinel.fake_pass + ) def test_server_set_with_root_password(self, mock_getpass): arglist = [ '--root-password', - 'foo_vm', + self.server.id, ] verifylist = [ ('root_password', True), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.fake_servers[0].change_password.assert_called_once_with( - mock.sentinel.fake_pass) + + self.compute_client.change_server_password.assert_called_once_with( + self.server, mock.sentinel.fake_pass + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() self.assertIsNone(result) def test_server_set_with_description(self): - - # Description is supported for nova api version 2.19 or above - self.fake_servers[0].api_version = api_versions.APIVersion('2.19') + self.set_compute_api_version('2.19') arglist = [ - '--description', 'foo_description', - 'foo_vm', + '--description', + 'foo_description', + self.server.id, ] verifylist = [ ('description', 'foo_description'), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.fake_servers[0].update.assert_called_once_with( - description='foo_description') + + self.compute_client.update_server.assert_called_once_with( + self.server, description='foo_description' + ) + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() self.assertIsNone(result) def test_server_set_with_description_pre_v219(self): - - # Description is not supported for nova api version below 2.19 - self.fake_servers[0].api_version = api_versions.APIVersion('2.18') + self.set_compute_api_version('2.18') arglist = [ - '--description', 'foo_description', - 'foo_vm', + '--description', + 'foo_description', + self.server.id, ] verifylist = [ ('description', 'foo_description'), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_server_set_with_tag(self): - self.fake_servers[0].api_version = api_versions.APIVersion('2.26') + self.set_compute_api_version('2.26') arglist = [ - '--tag', 'tag1', - '--tag', 'tag2', - 'foo_vm', + '--tag', + 'tag1', + '--tag', + 'tag2', + self.server.id, ] verifylist = [ ('tags', ['tag1', 'tag2']), - ('server', 'foo_vm'), + ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.fake_servers[0].add_tag.assert_has_calls([ - mock.call(tag='tag1'), - mock.call(tag='tag2'), - ]) + self.compute_client.add_tag_to_server.assert_has_calls( + [ + mock.call(self.server, tag='tag1'), + mock.call(self.server, tag='tag2'), + ] + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() self.assertIsNone(result) def test_server_set_with_tag_pre_v226(self): - self.fake_servers[0].api_version = api_versions.APIVersion('2.25') + self.set_compute_api_version('2.25') arglist = [ - '--tag', 'tag1', - '--tag', 'tag2', - 'foo_vm', + '--tag', + 'tag1', + '--tag', + 'tag2', + self.server.id, ] verifylist = [ ('tags', ['tag1', 'tag2']), - ('server', 'foo_vm'), + ('server', self.server.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.26 or greater is required', - str(ex)) + '--os-compute-api-version 2.26 or greater is required', str(ex) + ) def test_server_set_with_hostname(self): - - self.fake_servers[0].api_version = api_versions.APIVersion('2.90') + self.set_compute_api_version('2.90') arglist = [ - '--hostname', 'foo-hostname', - 'foo_vm', + '--hostname', + 'foo-hostname', + self.server.id, ] verifylist = [ ('hostname', 'foo-hostname'), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.fake_servers[0].update.assert_called_once_with( - hostname='foo-hostname') + + self.compute_client.update_server.assert_called_once_with( + self.server, hostname='foo-hostname' + ) + self.compute_client.set_server_metadata.assert_not_called() + self.compute_client.reset_server_state.assert_not_called() + self.compute_client.change_server_password.assert_not_called() + self.compute_client.clear_server_password.assert_not_called() + self.compute_client.add_tag_to_server.assert_not_called() self.assertIsNone(result) def test_server_set_with_hostname_pre_v290(self): - - self.fake_servers[0].api_version = api_versions.APIVersion('2.89') + self.set_compute_api_version('2.89') arglist = [ - '--hostname', 'foo-hostname', - 'foo_vm', + '--hostname', + 'foo-hostname', + self.server.id, ] verifylist = [ ('hostname', 'foo-hostname'), - ('server', 'foo_vm'), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestServerShelve(TestServer): - def setUp(self): - super(TestServerShelve, self).setUp() + super().setUp() + + self.server = compute_fakes.create_one_server( + attrs={'status': 'ACTIVE'}, + ) + + self.compute_client.find_server.return_value = self.server + self.compute_client.shelve_server.return_value = None # Get the command object to test self.cmd = server.ShelveServer(self.app, None) def test_shelve(self): - server_info = {'status': 'ACTIVE'} - server_methods = { - 'shelve': None, - 'shelve_offload': None, - } - - server = compute_fakes.FakeServer.create_one_server( - attrs=server_info, methods=server_methods) - self.servers_mock.get.return_value = server - - arglist = [server.name] + arglist = [self.server.name] verifylist = [ - ('servers', [server.name]), + ('servers', [self.server.name]), ('wait', False), ('offload', False), ] @@ -7828,24 +8717,19 @@ def test_shelve(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.servers_mock.get.assert_called_once_with(server.name) - server.shelve.assert_called_once_with() - server.shelve_offload.assert_not_called() + self.compute_client.find_server.assert_called_with( + self.server.name, + ignore_missing=False, + ) + self.compute_client.shelve_server.assert_called_with(self.server.id) + self.compute_client.shelve_offload_server.assert_not_called() def test_shelve_already_shelved(self): - server_info = {'status': 'SHELVED'} - server_methods = { - 'shelve': None, - 'shelve_offload': None, - } + self.server.status = 'SHELVED' - server = compute_fakes.FakeServer.create_one_server( - attrs=server_info, methods=server_methods) - self.servers_mock.get.return_value = server - - arglist = [server.name] + arglist = [self.server.name] verifylist = [ - ('servers', [server.name]), + ('servers', [self.server.name]), ('wait', False), ('offload', False), ] @@ -7854,25 +8738,18 @@ def test_shelve_already_shelved(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.servers_mock.get.assert_called_once_with(server.name) - server.shelve.assert_not_called() - server.shelve_offload.assert_not_called() + self.compute_client.find_server.assert_called_with( + self.server.name, + ignore_missing=False, + ) + self.compute_client.shelve_server.assert_not_called() + self.compute_client.shelve_offload_server.assert_not_called() @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_shelve_with_wait(self, mock_wait_for_status): - server_info = {'status': 'ACTIVE'} - server_methods = { - 'shelve': None, - 'shelve_offload': None, - } - - server = compute_fakes.FakeServer.create_one_server( - attrs=server_info, methods=server_methods) - self.servers_mock.get.return_value = server - - arglist = ['--wait', server.name] + arglist = ['--wait', self.server.name] verifylist = [ - ('servers', [server.name]), + ('servers', [self.server.name]), ('wait', True), ('offload', False), ] @@ -7881,31 +8758,24 @@ def test_shelve_with_wait(self, mock_wait_for_status): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.servers_mock.get.assert_called_once_with(server.name) - server.shelve.assert_called_once_with() - server.shelve_offload.assert_not_called() + self.compute_client.find_server.assert_called_with( + self.server.name, + ignore_missing=False, + ) + self.compute_client.shelve_server.assert_called_with(self.server.id) + self.compute_client.shelve_offload_server.assert_not_called() mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, - server.id, + self.compute_client.get_server, + self.server.id, callback=mock.ANY, success_status=('shelved', 'shelved_offloaded'), ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_shelve_offload(self, mock_wait_for_status): - server_info = {'status': 'ACTIVE'} - server_methods = { - 'shelve': None, - 'shelve_offload': None, - } - - server = compute_fakes.FakeServer.create_one_server( - attrs=server_info, methods=server_methods) - self.servers_mock.get.return_value = server - - arglist = ['--offload', server.name] + arglist = ['--offload', self.server.name] verifylist = [ - ('servers', [server.name]), + ('servers', [self.server.name]), ('wait', False), ('offload', True), ] @@ -7914,27 +8784,38 @@ def test_shelve_offload(self, mock_wait_for_status): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.servers_mock.get.assert_has_calls([ - mock.call(server.name), - mock.call(server.name), - ]) - server.shelve.assert_called_once_with() - server.shelve_offload.assert_called_once_with() + # one call to retrieve to retrieve the server state before shelving + self.compute_client.find_server.assert_called_once_with( + self.server.name, + ignore_missing=False, + ) + # one call to retrieve the server state before offloading + self.compute_client.get_server.assert_called_once_with(self.server.id) + # one call to shelve the server + self.compute_client.shelve_server.assert_called_with(self.server.id) + # one call to shelve offload the server + self.compute_client.shelve_offload_server.assert_called_once_with( + self.server.id, + ) + # one call to wait for the shelve offload to complete mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, - server.id, + self.compute_client.get_server, + self.server.id, callback=mock.ANY, success_status=('shelved', 'shelved_offloaded'), ) class TestServerShow(TestServer): - def setUp(self): - super(TestServerShow, self).setUp() + super().setUp() self.image = image_fakes.create_one_image() - self.flavor = compute_fakes.FakeFlavor.create_one_flavor() + self.image_client.get_image.return_value = self.image + + self.flavor = compute_fakes.create_one_flavor() + self.compute_client.find_flavor.return_value = self.flavor + self.topology = { 'nodes': [{'vcpu_set': [0, 1]}, {'vcpu_set': [2, 3]}], 'pagesize_kb': None, @@ -7943,54 +8824,126 @@ def setUp(self): 'image': {'id': self.image.id}, 'flavor': {'id': self.flavor.id}, 'tenant_id': 'tenant-id-xxx', - 'networks': {'public': ['10.20.30.40', '2001:db8::f']}, + 'addresses': {'public': ['10.20.30.40', '2001:db8::f']}, } - self.sdk_client.get_server_diagnostics.return_value = {'test': 'test'} - server_method = { - 'fetch_topology': self.topology, + self.compute_client.get_server_diagnostics.return_value = { + 'test': 'test' } - self.server = compute_fakes.FakeServer.create_one_server( - attrs=server_info, methods=server_method) - - # This is the return value for utils.find_resource() - self.sdk_client.get_server.return_value = self.server - self.get_image_mock.return_value = self.image - self.flavors_mock.get.return_value = self.flavor + self.server = compute_fakes.create_one_server( + attrs=server_info, + ) + self.server.fetch_topology = mock.MagicMock(return_value=self.topology) + self.compute_client.find_server.return_value = self.server # Get the command object to test self.cmd = server.ShowServer(self.app, None) self.columns = ( + 'OS-DCF:diskConfig', + 'OS-EXT-AZ:availability_zone', + 'OS-EXT-SRV-ATTR:host', + 'OS-EXT-SRV-ATTR:hostname', + 'OS-EXT-SRV-ATTR:hypervisor_hostname', + 'OS-EXT-SRV-ATTR:instance_name', + 'OS-EXT-SRV-ATTR:kernel_id', + 'OS-EXT-SRV-ATTR:launch_index', + 'OS-EXT-SRV-ATTR:ramdisk_id', + 'OS-EXT-SRV-ATTR:reservation_id', + 'OS-EXT-SRV-ATTR:root_device_name', + 'OS-EXT-SRV-ATTR:user_data', 'OS-EXT-STS:power_state', + 'OS-EXT-STS:task_state', + 'OS-EXT-STS:vm_state', + 'OS-SRV-USG:launched_at', + 'OS-SRV-USG:terminated_at', + 'accessIPv4', + 'accessIPv6', 'addresses', + 'config_drive', + 'created', + 'description', 'flavor', + 'hostId', + 'host_status', 'id', 'image', + 'key_name', + 'locked', + 'locked_reason', 'name', - 'networks', + 'progress', 'project_id', 'properties', + 'server_groups', + 'status', + 'tags', + 'trusted_image_certificates', + 'updated', + 'user_id', + 'volumes_attached', ) self.data = ( + None, # OS-DCF:diskConfig + None, # OS-EXT-AZ:availability_zone + None, # OS-EXT-SRV-ATTR:host + None, # OS-EXT-SRV-ATTR:hostname + None, # OS-EXT-SRV-ATTR:hypervisor_hostname + None, # OS-EXT-SRV-ATTR:instance_name + None, # OS-EXT-SRV-ATTR:kernel_id + None, # OS-EXT-SRV-ATTR:launch_index + None, # OS-EXT-SRV-ATTR:ramdisk_id + None, # OS-EXT-SRV-ATTR:reservation_id + None, # OS-EXT-SRV-ATTR:root_device_name + None, # OS-EXT-SRV-ATTR:user_data server.PowerStateColumn( - getattr(self.server, 'OS-EXT-STS:power_state')), - format_columns.DictListColumn(self.server.networks), - self.flavor.name + " (" + self.flavor.id + ")", - self.server.id, - self.image.name + " (" + self.image.id + ")", + self.server.power_state + ), # OS-EXT-STS:power_state # noqa: E501 + None, # OS-EXT-STS:task_state + None, # OS-EXT-STS:vm_state + None, # OS-SRV-USG:launched_at + None, # OS-SRV-USG:terminated_at + None, # accessIPv4 + None, # accessIPv6 + server.AddressesColumn( + {'public': ['10.20.30.40', '2001:db8::f']} + ), # addresses + None, # config_drive + None, # created + None, # description + self.flavor.name + " (" + self.flavor.id + ")", # flavor + None, # hostId + None, # host_status + self.server.id, # id + self.image.name + " (" + self.image.id + ")", # image + None, # key_name + None, # locked + None, # locked_reason self.server.name, - {'public': ['10.20.30.40', '2001:db8::f']}, - 'tenant-id-xxx', - format_columns.DictColumn({}), - ) + None, # progress + 'tenant-id-xxx', # project_id + format_columns.DictColumn({}), # properties + None, # server_groups + None, # status + format_columns.ListColumn([]), # tags + None, # trusted_image_certificates + None, # updated + None, # user_id + format_columns.ListDictColumn([]), # volumes_attached + ) + self.assertEqual(len(self.columns), len(self.data)) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show(self): arglist = [ @@ -8005,8 +8958,12 @@ def test_show(self): columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) + self.assertTupleEqual(self.columns, columns) + self.assertTupleEqual(self.data, data) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False, details=True + ) + self.compute_client.get_server.assert_not_called() def test_show_embedded_flavor(self): # Tests using --os-compute-api-version >= 2.47 where the flavor @@ -8020,21 +8977,25 @@ def test_show_embedded_flavor(self): ('server', self.server.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.server.info['flavor'] = { + self.server.flavor = { 'ephemeral': 0, 'ram': 512, 'original_name': 'm1.tiny', 'vcpus': 1, 'extra_specs': {}, 'swap': 0, - 'disk': 1 + 'disk': 1, } columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) # Since the flavor details are in a dict we can't be sure of the # ordering so just assert that one of the keys is in the output. - self.assertIn('original_name', data[2]._value) + self.assertIn('original_name', data[columns.index('flavor')]._value) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False, details=True + ) + self.compute_client.get_server.assert_not_called() def test_show_diagnostics(self): arglist = [ @@ -8052,9 +9013,16 @@ def test_show_diagnostics(self): self.assertEqual(('test',), columns) self.assertEqual(('test',), data) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False, details=True + ) + self.compute_client.get_server_diagnostics.assert_called_once_with( + self.server + ) + self.compute_client.get_server.assert_not_called() def test_show_topology(self): - self._set_mock_microversion('2.78') + self.set_compute_api_version('2.78') arglist = [ '--topology', @@ -8074,9 +9042,14 @@ def test_show_topology(self): self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False, details=True + ) + self.server.fetch_topology.assert_called_once_with(self.compute_client) + self.compute_client.get_server.assert_not_called() def test_show_topology_pre_v278(self): - self._set_mock_microversion('2.77') + self.set_compute_api_version('2.77') arglist = [ '--topology', @@ -8090,12 +9063,17 @@ def test_show_topology_pre_v278(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False, details=True + ) + self.server.fetch_topology.assert_not_called() + self.compute_client.get_server.assert_not_called() @mock.patch('openstackclient.compute.v2.server.os.system') class TestServerSsh(TestServer): - def setUp(self): super().setUp() @@ -8116,10 +9094,10 @@ def setUp(self): ], }, } - self.server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods, + self.server = compute_fakes.create_one_server( + attrs=self.attrs, ) - self.servers_mock.get.return_value = self.server + self.compute_client.find_server.return_value = self.server def test_server_ssh_no_opts(self, mock_exec): arglist = [ @@ -8142,6 +9120,9 @@ def test_server_ssh_no_opts(self, mock_exec): with mock.patch.object(self.cmd.log, 'warning') as mock_warning: result = self.cmd.take_action(parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False + ) self.assertIsNone(result) mock_exec.assert_called_once_with('ssh 192.168.1.30 -l cloud') mock_warning.assert_not_called() @@ -8150,8 +9131,10 @@ def test_server_ssh_passthrough_opts(self, mock_exec): arglist = [ self.server.name, '--', - '-l', 'username', - '-p', '2222', + '-l', + 'username', + '-p', + '2222', ] verifylist = [ ('server', self.server.name), @@ -8170,6 +9153,9 @@ def test_server_ssh_passthrough_opts(self, mock_exec): with mock.patch.object(self.cmd.log, 'warning') as mock_warning: result = self.cmd.take_action(parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False + ) self.assertIsNone(result) mock_exec.assert_called_once_with( 'ssh 192.168.1.30 -l username -p 2222' @@ -8179,8 +9165,10 @@ def test_server_ssh_passthrough_opts(self, mock_exec): def test_server_ssh_deprecated_opts(self, mock_exec): arglist = [ self.server.name, - '-l', 'username', - '-p', '2222', + '-l', + 'username', + '-p', + '2222', ] verifylist = [ ('server', self.server.name), @@ -8199,6 +9187,9 @@ def test_server_ssh_deprecated_opts(self, mock_exec): with mock.patch.object(self.cmd.log, 'warning') as mock_warning: result = self.cmd.take_action(parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.name, ignore_missing=False + ) self.assertIsNone(result) mock_exec.assert_called_once_with( 'ssh 192.168.1.30 -p 2222 -l username' @@ -8210,103 +9201,85 @@ def test_server_ssh_deprecated_opts(self, mock_exec): ) -class TestServerStart(TestServer): - +class TestServerStart(TestServerAction): def setUp(self): - super(TestServerStart, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.StartServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'start': None, - } - def test_server_start_one_server(self): - self.run_method_with_servers('start', 1) + self.run_method_with_sdk_servers('start_server', 1) def test_server_start_multi_servers(self): - self.run_method_with_servers('start', 3) + self.run_method_with_sdk_servers('start_server', 3) - @mock.patch.object(common_utils, 'find_resource') - def test_server_start_with_all_projects(self, mock_find_resource): - servers = self.setup_servers_mock(count=1) - mock_find_resource.side_effect = compute_fakes.FakeServer.get_servers( - servers, 0, - ) + def test_server_start_with_all_projects(self): + server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = server arglist = [ - servers[0].id, + server.id, '--all-projects', ] verifylist = [ - ('server', [servers[0].id]), + ('server', [server.id]), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - mock_find_resource.assert_called_once_with( - mock.ANY, servers[0].id, all_tenants=True, + self.compute_client.find_server.assert_called_once_with( + server.id, + ignore_missing=False, + details=False, + all_projects=True, ) -class TestServerStop(TestServer): - +class TestServerStop(TestServerAction): def setUp(self): - super(TestServerStop, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.StopServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'stop': None, - } - def test_server_stop_one_server(self): - self.run_method_with_servers('stop', 1) + self.run_method_with_sdk_servers('stop_server', 1) def test_server_stop_multi_servers(self): - self.run_method_with_servers('stop', 3) + self.run_method_with_sdk_servers('stop_server', 3) - @mock.patch.object(common_utils, 'find_resource') - def test_server_start_with_all_projects(self, mock_find_resource): - servers = self.setup_servers_mock(count=1) - mock_find_resource.side_effect = compute_fakes.FakeServer.get_servers( - servers, 0, - ) + def test_server_start_with_all_projects(self): + server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = server arglist = [ - servers[0].id, + server.id, '--all-projects', ] verifylist = [ - ('server', [servers[0].id]), + ('server', [server.id]), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - mock_find_resource.assert_called_once_with( - mock.ANY, servers[0].id, all_tenants=True, + self.compute_client.find_server.assert_called_once_with( + server.id, + ignore_missing=False, + details=False, + all_projects=True, ) -class TestServerSuspend(TestServer): - +class TestServerSuspend(TestServerAction): def setUp(self): - super(TestServerSuspend, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.SuspendServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'suspend': None, - } - def test_server_suspend_one_server(self): self.run_method_with_sdk_servers('suspend_server', 1) @@ -8314,39 +9287,27 @@ def test_server_suspend_multi_servers(self): self.run_method_with_sdk_servers('suspend_server', 3) -class TestServerUnlock(TestServer): - +class TestServerUnlock(TestServerAction): def setUp(self): - super(TestServerUnlock, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.UnlockServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'unlock': None, - } - def test_server_unlock_one_server(self): - self.run_method_with_servers('unlock', 1) + self.run_method_with_sdk_servers('unlock_server', 1) def test_server_unlock_multi_servers(self): - self.run_method_with_servers('unlock', 3) - + self.run_method_with_sdk_servers('unlock_server', 3) -class TestServerUnpause(TestServer): +class TestServerUnpause(TestServerAction): def setUp(self): - super(TestServerUnpause, self).setUp() + super().setUp() # Get the command object to test self.cmd = server.UnpauseServer(self.app, None) - # Set methods to be tested. - self.methods = { - 'unpause': None, - } - def test_server_unpause_one_server(self): self.run_method_with_sdk_servers('unpause_server', 1) @@ -8354,377 +9315,395 @@ def test_server_unpause_multi_servers(self): self.run_method_with_sdk_servers('unpause_server', 3) -class TestServerUnset(TestServer): +class TestServerUnrescue(compute_fakes.TestComputev2): + def setUp(self): + super().setUp() + + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server + + self.cmd = server.UnrescueServer(self.app, None) + + def test_rescue(self): + arglist = [ + self.server.id, + ] + verifylist = [ + ('server', self.server.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.compute_client.unrescue_server.assert_called_once_with( + self.server + ) + self.assertIsNone(result) + + +class TestServerUnset(TestServer): def setUp(self): - super(TestServerUnset, self).setUp() + super().setUp() - self.fake_server = self.setup_servers_mock(1)[0] + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server # Get the command object to test self.cmd = server.UnsetServer(self.app, None) def test_server_unset_no_option(self): arglist = [ - 'foo_vm', + self.server.id, ] verifylist = [ - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertNotCalled(self.servers_mock.delete_meta) + + self.compute_client.find_server(self.server.id, ignore_missing=False) + self.compute_client.delete_server_metadata.assert_not_called() + self.compute_client.update_server.assert_not_called() + self.compute_client.remove_tag_from_server.assert_not_called() self.assertIsNone(result) def test_server_unset_with_property(self): arglist = [ - '--property', 'key1', - '--property', 'key2', - 'foo_vm', + '--property', + 'key1', + '--property', + 'key2', + self.server.id, ] verifylist = [ ('properties', ['key1', 'key2']), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.servers_mock.delete_meta.assert_called_once_with( - self.fake_server, ['key1', 'key2']) - self.assertIsNone(result) - def test_server_unset_with_description_api_newer(self): + self.compute_client.find_server(self.server.id, ignore_missing=False) + self.compute_client.delete_server_metadata.assert_called_once_with( + self.server, + ['key1', 'key2'], + ) + self.compute_client.update_server.assert_not_called() + self.compute_client.remove_tag_from_server.assert_not_called() + self.assertIsNone(result) + def test_server_unset_with_description(self): # Description is supported for nova api version 2.19 or above - self.app.client_manager.compute.api_version = 2.19 + self.set_compute_api_version('2.19') arglist = [ '--description', - 'foo_vm', + self.server.id, ] verifylist = [ ('description', True), - ('server', 'foo_vm'), + ('server', self.server.id), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.19): - result = self.cmd.take_action(parsed_args) - self.servers_mock.update.assert_called_once_with( - self.fake_server, description="") + self.compute_client.find_server(self.server.id, ignore_missing=False) + self.compute_client.update_server.assert_called_once_with( + self.server, description='' + ) + self.compute_client.delete_server_metadata.assert_not_called() + self.compute_client.remove_tag_from_server.assert_not_called() self.assertIsNone(result) - def test_server_unset_with_description_api_older(self): - + def test_server_unset_with_description_pre_v219(self): # Description is not supported for nova api version below 2.19 - self.app.client_manager.compute.api_version = 2.18 + self.set_compute_api_version('2.18') arglist = [ '--description', - 'foo_vm', + self.server.id, ] verifylist = [ ('description', True), - ('server', 'foo_vm'), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(api_versions, - 'APIVersion', - return_value=2.19): - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + ex = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-compute-api-version 2.19 or greater is required', str(ex) + ) def test_server_unset_with_tag(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.26') + self.set_compute_api_version('2.26') arglist = [ - '--tag', 'tag1', - '--tag', 'tag2', - 'foo_vm', + '--tag', + 'tag1', + '--tag', + 'tag2', + self.server.id, ] verifylist = [ ('tags', ['tag1', 'tag2']), - ('server', 'foo_vm'), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.servers_mock.delete_tag.assert_has_calls([ - mock.call(self.fake_server, tag='tag1'), - mock.call(self.fake_server, tag='tag2'), - ]) + self.compute_client.find_server(self.server.id, ignore_missing=False) + self.compute_client.remove_tag_from_server.assert_has_calls( + [ + mock.call(self.server, 'tag1'), + mock.call(self.server, 'tag2'), + ] + ) + self.compute_client.delete_server_metadata.assert_not_called() + self.compute_client.update_server.assert_not_called() def test_server_unset_with_tag_pre_v226(self): - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.25') + self.set_compute_api_version('2.25') arglist = [ - '--tag', 'tag1', - '--tag', 'tag2', - 'foo_vm', + '--tag', + 'tag1', + '--tag', + 'tag2', + self.server.id, ] verifylist = [ ('tags', ['tag1', 'tag2']), - ('server', 'foo_vm'), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.26 or greater is required', - str(ex)) + '--os-compute-api-version 2.26 or greater is required', str(ex) + ) class TestServerUnshelve(TestServer): - def setUp(self): - super(TestServerUnshelve, self).setUp() - - # Get the command object to test - self.cmd = server.UnshelveServer(self.app, None) - - # Set unshelve method to be tested. - self.methods = { - 'unshelve': None, - } - self.attrs = { - 'status': 'SHELVED', - } - - def test_unshelve_one_server(self): - self.run_method_with_servers('unshelve', 1) - - def test_unshelve_multi_servers(self): - self.run_method_with_servers('unshelve', 3) - - def test_unshelve_v277(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.77') - - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server - arglist = [server.id] - verifylist = [('server', [server.id])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + super().setUp() - self.cmd.take_action(parsed_args) + self.server = compute_fakes.create_one_server( + attrs={'status': 'SHELVED'}, + ) - self.servers_mock.get.assert_called_with(server.id) - server.unshelve.assert_called_with() + self.compute_client.find_server.return_value = self.server + self.compute_client.unshelve_server.return_value = None - def test_unshelve_with_specified_az_v277(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.77') + # Get the command object to test + self.cmd = server.UnshelveServer(self.app, None) - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server + def test_unshelve(self): arglist = [ - '--availability-zone', "foo-az", - server.id, + self.server.id, ] verifylist = [ - ('availability_zone', "foo-az"), - ('server', [server.id]) + ('server', [self.server.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(server.id) - server.unshelve.assert_called_with(availability_zone="foo-az") + self.compute_client.find_server.assert_called_once_with( + self.server.id, + ignore_missing=False, + ) + self.compute_client.unshelve_server.assert_called_once_with( + self.server.id + ) - def test_unshelve_with_specified_az_pre_v277(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.76') + def test_unshelve_with_az(self): + self.set_compute_api_version('2.77') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) arglist = [ - server.id, - '--availability-zone', "foo-az", + '--availability-zone', + 'foo-az', + self.server.id, ] verifylist = [ - ('availability_zone', "foo-az"), - ('server', [server.id]) + ('availability_zone', 'foo-az'), + ('server', [self.server.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn( - '--os-compute-api-version 2.77 or greater is required', str(ex)) - - def test_unshelve_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) - - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server - arglist = [server.id] - verifylist = [('server', [server.id])] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(server.id) - server.unshelve.assert_called_with() + self.compute_client.find_server.assert_called_once_with( + self.server.id, + ignore_missing=False, + ) + self.compute_client.unshelve_server.assert_called_once_with( + self.server.id, + availability_zone='foo-az', + ) - def test_unshelve_with_specified_az_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) + def test_unshelve_with_az_pre_v277(self): + self.set_compute_api_version('2.76') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server arglist = [ - '--availability-zone', "foo-az", - server.id, + self.server.id, + '--availability-zone', + 'foo-az', ] verifylist = [ - ('availability_zone', "foo-az"), - ('server', [server.id]) + ('availability_zone', 'foo-az'), + ('server', [self.server.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - self.servers_mock.get.assert_called_with(server.id) - server.unshelve.assert_called_with(availability_zone="foo-az") + ex = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + '--os-compute-api-version 2.77 or greater is required ', + str(ex), + ) - def test_unshelve_with_specified_host_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) + def test_unshelve_with_host(self): + self.set_compute_api_version('2.91') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server arglist = [ - '--host', "server1", - server.id, - ] - verifylist = [ - ('host', "server1"), - ('server', [server.id]) + '--host', + 'server1', + self.server.id, ] + verifylist = [('host', 'server1'), ('server', [self.server.id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(server.id) - server.unshelve.assert_called_with(host="server1") + self.compute_client.find_server.assert_called_once_with( + self.server.id, + ignore_missing=False, + ) + self.compute_client.unshelve_server.assert_called_once_with( + self.server.id, + host='server1', + ) - def test_unshelve_with_unpin_az_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) + def test_unshelve_with_host_pre_v291(self): + self.set_compute_api_version('2.90') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server - arglist = ['--no-availability-zone', server.id] - verifylist = [ - ('no_availability_zone', True), - ('server', [server.id]) + arglist = [ + '--host', + 'server1', + self.server.id, ] + verifylist = [('host', 'server1'), ('server', [self.server.id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - self.servers_mock.get.assert_called_with(server.id) - server.unshelve.assert_called_with(availability_zone=None) + ex = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + '--os-compute-api-version 2.91 or greater is required ' + 'to support the --host option', + str(ex), + ) - def test_unshelve_with_specified_az_and_host_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) + def test_unshelve_with_no_az(self): + self.set_compute_api_version('2.91') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server arglist = [ - '--host', "server1", - '--availability-zone', "foo-az", - server.id, + '--no-availability-zone', + self.server.id, ] verifylist = [ - ('host', "server1"), - ('availability_zone', "foo-az"), - ('server', [server.id]) + ('no_availability_zone', True), + ('server', [self.server.id]), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_with(server.id) + self.compute_client.find_server.assert_called_once_with( + self.server.id, + ignore_missing=False, + ) + self.compute_client.unshelve_server.assert_called_once_with( + self.server.id, + availability_zone=None, + ) - def test_unshelve_with_unpin_az_and_host_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) + def test_unshelve_with_no_az_pre_v291(self): + self.set_compute_api_version('2.90') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server arglist = [ - '--host', "server1", '--no-availability-zone', - server.id, + self.server.id, ] verifylist = [ - ('host', "server1"), ('no_availability_zone', True), - ('server', [server.id]) + ('server', [self.server.id]), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - - self.servers_mock.get.assert_called_with(server.id) + ex = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + '--os-compute-api-version 2.91 or greater is required ' + 'to support the --no-availability-zone option', + str(ex), + ) - def test_unshelve_fails_with_unpin_az_and_az_v291(self): - self.app.client_manager.compute.api_version = ( - api_versions.APIVersion('2.91')) + def test_unshelve_with_no_az_and_az_conflict(self): + self.set_compute_api_version('2.91') - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server arglist = [ - '--availability-zone', "foo-az", + '--availability-zone', + "foo-az", '--no-availability-zone', - server.id, + self.server.id, ] verifylist = [ ('availability_zone', "foo-az"), ('no_availability_zone', True), - ('server', [server.id]) + ('server', [self.server.id]), ] - ex = self.assertRaises(utils.ParserException, - self.check_parser, - self.cmd, arglist, verifylist) - self.assertIn('argument --no-availability-zone: not allowed ' - 'with argument --availability-zone', str(ex)) + ex = self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + self.assertIn( + 'argument --no-availability-zone: not allowed ' + 'with argument --availability-zone', + str(ex), + ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_unshelve_with_wait(self, mock_wait_for_status): - server = compute_fakes.FakeServer.create_one_server( - attrs=self.attrs, methods=self.methods) - self.servers_mock.get.return_value = server - - arglist = ['--wait', server.name] + arglist = [ + '--wait', + self.server.name, + ] verifylist = [ - ('server', [server.name]), + ('server', [self.server.name]), ('wait', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -8732,11 +9711,14 @@ def test_unshelve_with_wait(self, mock_wait_for_status): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.servers_mock.get.assert_called_once_with(server.name) - server.unshelve.assert_called_once_with() + self.compute_client.find_server.assert_called_with( + self.server.name, + ignore_missing=False, + ) + self.compute_client.unshelve_server.assert_called_with(self.server.id) mock_wait_for_status.assert_called_once_with( - self.servers_mock.get, - server.id, + self.compute_client.get_server, + self.server.id, callback=mock.ANY, success_status=('active', 'shutoff'), ) @@ -8770,73 +9752,216 @@ class TestServerGeneral(TestServer): ODD = {'jenkins': ['10.3.3.18', '124.12.125.4']} def test_get_ip_address(self): - self.assertEqual("192.168.0.3", - server._get_ip_address(self.OLD, 'private', [4, 6])) - self.assertEqual("10.10.1.2", - server._get_ip_address(self.NEW, 'fixed', [4, 6])) - self.assertEqual("10.10.1.2", - server._get_ip_address(self.NEW, 'private', [4, 6])) - self.assertEqual("0:0:0:0:0:ffff:a0a:103", - server._get_ip_address(self.NEW, 'public', [6])) - self.assertEqual("0:0:0:0:0:ffff:a0a:103", - server._get_ip_address(self.NEW, 'floating', [6])) - self.assertEqual("124.12.125.4", - server._get_ip_address(self.ODD, 'public', [4, 6])) - self.assertEqual("10.3.3.18", - server._get_ip_address(self.ODD, 'private', [4, 6])) - self.assertRaises(exceptions.CommandError, - server._get_ip_address, self.NEW, 'public', [4]) - self.assertRaises(exceptions.CommandError, - server._get_ip_address, self.NEW, 'admin', [4]) - self.assertRaises(exceptions.CommandError, - server._get_ip_address, self.OLD, 'public', [4, 6]) - self.assertRaises(exceptions.CommandError, - server._get_ip_address, self.OLD, 'private', [6]) - - @mock.patch('osc_lib.utils.find_resource') - def test_prep_server_detail(self, find_resource): - # Setup mock method return value. utils.find_resource() will be called - # three times in _prep_server_detail(): - # - The first time, return server info. - # - The second time, return image info. - # - The third time, return flavor info. + self.assertEqual( + "192.168.0.3", server._get_ip_address(self.OLD, 'private', [4, 6]) + ) + self.assertEqual( + "10.10.1.2", server._get_ip_address(self.NEW, 'fixed', [4, 6]) + ) + self.assertEqual( + "10.10.1.2", server._get_ip_address(self.NEW, 'private', [4, 6]) + ) + self.assertEqual( + "0:0:0:0:0:ffff:a0a:103", + server._get_ip_address(self.NEW, 'public', [6]), + ) + self.assertEqual( + "0:0:0:0:0:ffff:a0a:103", + server._get_ip_address(self.NEW, 'floating', [6]), + ) + self.assertEqual( + "124.12.125.4", server._get_ip_address(self.ODD, 'public', [4, 6]) + ) + self.assertEqual( + "10.3.3.18", server._get_ip_address(self.ODD, 'private', [4, 6]) + ) + self.assertRaises( + exceptions.CommandError, + server._get_ip_address, + self.NEW, + 'public', + [4], + ) + self.assertRaises( + exceptions.CommandError, + server._get_ip_address, + self.NEW, + 'admin', + [4], + ) + self.assertRaises( + exceptions.CommandError, + server._get_ip_address, + self.OLD, + 'public', + [4, 6], + ) + self.assertRaises( + exceptions.CommandError, + server._get_ip_address, + self.OLD, + 'private', + [6], + ) + + def test_prep_server_detail(self): _image = image_fakes.create_one_image() - _flavor = compute_fakes.FakeFlavor.create_one_flavor() + self.image_client.get_image.return_value = _image + + _flavor = compute_fakes.create_one_flavor() + self.compute_client.find_flavor.return_value = _flavor + server_info = { - 'image': {u'id': _image.id}, - 'flavor': {u'id': _flavor.id}, - 'tenant_id': u'tenant-id-xxx', - 'networks': {u'public': [u'10.20.30.40', u'2001:db8::f']}, - 'links': u'http://xxx.yyy.com', + 'image': {'id': _image.id}, + 'flavor': {'id': _flavor.id}, + 'tenant_id': 'tenant-id-xxx', + 'addresses': {'public': ['10.20.30.40', '2001:db8::f']}, + 'links': 'http://xxx.yyy.com', 'properties': '', 'volumes_attached': [{"id": "6344fe9d-ef20-45b2-91a6"}], } - _server = compute_fakes.FakeServer.create_one_server(attrs=server_info) - find_resource.side_effect = [_server, _flavor] - self.get_image_mock.return_value = _image - - # Prepare result data. - info = { + _server = compute_fakes.create_one_server(server_info) + self.compute_client.get_server.return_value = _server + + expected = { + 'OS-DCF:diskConfig': None, + 'OS-EXT-AZ:availability_zone': None, + 'OS-EXT-SRV-ATTR:host': None, + 'OS-EXT-SRV-ATTR:hostname': None, + 'OS-EXT-SRV-ATTR:hypervisor_hostname': None, + 'OS-EXT-SRV-ATTR:instance_name': None, + 'OS-EXT-SRV-ATTR:kernel_id': None, + 'OS-EXT-SRV-ATTR:launch_index': None, + 'OS-EXT-SRV-ATTR:ramdisk_id': None, + 'OS-EXT-SRV-ATTR:reservation_id': None, + 'OS-EXT-SRV-ATTR:root_device_name': None, + 'OS-EXT-SRV-ATTR:user_data': None, + 'OS-EXT-STS:power_state': server.PowerStateColumn( + _server.power_state + ), + 'OS-EXT-STS:task_state': None, + 'OS-EXT-STS:vm_state': None, + 'OS-SRV-USG:launched_at': None, + 'OS-SRV-USG:terminated_at': None, + 'accessIPv4': None, + 'accessIPv6': None, + 'addresses': server.AddressesColumn(_server.addresses), + 'config_drive': None, + 'created': None, + 'description': None, + 'flavor': f'{_flavor.name} ({_flavor.id})', + 'hostId': None, + 'host_status': None, 'id': _server.id, + 'image': f'{_image.name} ({_image.id})', + 'key_name': None, + 'locked': None, + 'locked_reason': None, 'name': _server.name, - 'image': '%s (%s)' % (_image.name, _image.id), - 'flavor': '%s (%s)' % (_flavor.name, _flavor.id), - 'OS-EXT-STS:power_state': server.PowerStateColumn( - getattr(_server, 'OS-EXT-STS:power_state')), + 'progress': None, + 'project_id': 'tenant-id-xxx', + 'properties': format_columns.DictColumn({}), + 'server_groups': None, + 'status': None, + 'tags': format_columns.ListColumn([]), + 'trusted_image_certificates': None, + 'updated': None, + 'user_id': None, + 'volumes_attached': format_columns.ListDictColumn([]), + } + + actual = server._prep_server_detail( + self.compute_client, + self.image_client, + _server, + ) + + self.assertCountEqual(expected, actual) + # this should be called since we need the flavor (< 2.47) + self.compute_client.find_flavor.assert_called_once_with( + _flavor.id, ignore_missing=False + ) + + def test_prep_server_detail_v247(self): + _image = image_fakes.create_one_image() + self.image_client.get_image.return_value = _image + + _flavor = compute_fakes.create_one_flavor() + self.compute_client.find_flavor.return_value = _flavor + + server_info = { + 'image': {'id': _image.id}, + 'flavor': { + 'vcpus': _flavor.vcpus, + 'ram': _flavor.ram, + 'disk': _flavor.disk, + 'ephemeral': _flavor.ephemeral, + 'swap': _flavor.swap, + 'original_name': _flavor.name, + 'extra_specs': {}, + }, + 'tenant_id': 'tenant-id-xxx', + 'addresses': {'public': ['10.20.30.40', '2001:db8::f']}, + 'links': 'http://xxx.yyy.com', 'properties': '', 'volumes_attached': [{"id": "6344fe9d-ef20-45b2-91a6"}], - 'addresses': format_columns.DictListColumn(_server.networks), + } + _server = compute_fakes.create_one_server(server_info) + self.compute_client.get_server.return_value = _server + + expected = { + 'OS-DCF:diskConfig': None, + 'OS-EXT-AZ:availability_zone': None, + 'OS-EXT-SRV-ATTR:host': None, + 'OS-EXT-SRV-ATTR:hostname': None, + 'OS-EXT-SRV-ATTR:hypervisor_hostname': None, + 'OS-EXT-SRV-ATTR:instance_name': None, + 'OS-EXT-SRV-ATTR:kernel_id': None, + 'OS-EXT-SRV-ATTR:launch_index': None, + 'OS-EXT-SRV-ATTR:ramdisk_id': None, + 'OS-EXT-SRV-ATTR:reservation_id': None, + 'OS-EXT-SRV-ATTR:root_device_name': None, + 'OS-EXT-SRV-ATTR:user_data': None, + 'OS-EXT-STS:power_state': server.PowerStateColumn( + _server.power_state + ), + 'OS-EXT-STS:task_state': None, + 'OS-EXT-STS:vm_state': None, + 'OS-SRV-USG:launched_at': None, + 'OS-SRV-USG:terminated_at': None, + 'accessIPv4': None, + 'accessIPv6': None, + 'addresses': server.AddressesColumn(_server.addresses), + 'config_drive': None, + 'created': None, + 'description': None, + 'flavor': f'{_flavor.name} ({_flavor.id})', + 'hostId': None, + 'host_status': None, + 'id': _server.id, + 'image': f'{_image.name} ({_image.id})', + 'key_name': None, + 'locked': None, + 'locked_reason': None, + 'name': _server.name, + 'progress': None, 'project_id': 'tenant-id-xxx', + 'properties': format_columns.DictColumn({}), + 'server_groups': None, + 'status': None, + 'tags': format_columns.ListColumn([]), + 'trusted_image_certificates': None, + 'updated': None, + 'user_id': None, + 'volumes_attached': format_columns.ListDictColumn([]), } - # Call _prep_server_detail(). - server_detail = server._prep_server_detail( - self.app.client_manager.compute, - self.app.client_manager.image, - _server + actual = server._prep_server_detail( + self.compute_client, + self.image_client, + _server, ) - # 'networks' is used to create _server. Remove it. - server_detail.pop('networks') - # Check the results. - self.assertCountEqual(info, server_detail) + self.assertCountEqual(expected, actual) + # this shouldn't be called since we have a full flavor (>= 2.47) + self.compute_client.find_flavor.assert_not_called() diff --git a/openstackclient/tests/unit/compute/v2/test_server_backup.py b/openstackclient/tests/unit/compute/v2/test_server_backup.py index 1a5e0a1225..cc8acfb34f 100644 --- a/openstackclient/tests/unit/compute/v2/test_server_backup.py +++ b/openstackclient/tests/unit/compute/v2/test_server_backup.py @@ -22,48 +22,16 @@ from openstackclient.tests.unit.image.v2 import fakes as image_fakes -class TestServerBackup(compute_fakes.TestComputev2): - - def setUp(self): - super(TestServerBackup, self).setUp() - - # Get a shortcut to the compute client ServerManager Mock - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - - # Get a shortcut to the image client ImageManager Mock - self.images_mock = self.app.client_manager.image - self.images_mock.find_image.reset_mock() - - # Set object attributes to be tested. Could be overwritten in subclass. - self.attrs = {} - - # Set object methods to be tested. Could be overwritten in subclass. - self.methods = {} - - def setup_servers_mock(self, count): - servers = compute_fakes.FakeServer.create_sdk_servers( - attrs=self.attrs, - methods=self.methods, - count=count, - ) - - # This is the return value for compute_client.find_server() - self.sdk_client.find_server = compute_fakes.FakeServer.get_servers( - servers, - 0, - ) - return servers - - -class TestServerBackupCreate(TestServerBackup): - - # Just return whatever Image is testing with these days +class TestServerBackupCreate(compute_fakes.TestComputev2): def image_columns(self, image): - # columnlist = tuple(sorted(image.keys())) columnlist = ( - 'id', 'name', 'owner', 'protected', 'status', 'tags', 'visibility' + 'id', + 'name', + 'owner', + 'protected', + 'status', + 'tags', + 'visibility', ) return columnlist @@ -80,49 +48,29 @@ def image_data(self, image): return datalist def setUp(self): - super(TestServerBackupCreate, self).setUp() - - # Get the command object to test - self.cmd = server_backup.CreateServerBackup(self.app, None) + super().setUp() - self.methods = { - 'backup': None, - } + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server - def setup_images_mock(self, count, servers=None): - if servers: - images = image_fakes.create_images( - attrs={ - 'name': servers[0].name, - 'status': 'active', - }, - count=count, - ) - else: - images = image_fakes.create_images( - attrs={ - 'status': 'active', - }, - count=count, - ) + self.image = image_fakes.create_one_image( + {'name': self.server.name, 'status': 'active'} + ) + self.image_client.find_image.return_value = self.image - # self.images_mock.get = mock.Mock(side_effect=images) - self.images_mock.find_image = mock.Mock(side_effect=images) - return images + # Get the command object to test + self.cmd = server_backup.CreateServerBackup(self.app, None) def test_server_backup_defaults(self): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - arglist = [ - servers[0].id, + self.server.id, ] verifylist = [ ('name', None), ('type', None), ('rotate', None), ('wait', False), - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -131,31 +79,31 @@ def test_server_backup_defaults(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.backup_server.assert_called_with( - servers[0].id, - servers[0].name, + self.compute_client.backup_server.assert_called_with( + self.server.id, + self.server.name, '', 1, ) - self.assertEqual(self.image_columns(images[0]), columns) - self.assertCountEqual(self.image_data(images[0]), data) + self.assertEqual(self.image_columns(self.image), columns) + self.assertCountEqual(self.image_data(self.image), data) def test_server_backup_create_options(self): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - arglist = [ - '--name', 'image', - '--type', 'daily', - '--rotate', '2', - servers[0].id, + '--name', + 'image', + '--type', + 'daily', + '--rotate', + '2', + self.server.id, ] verifylist = [ ('name', 'image'), ('type', 'daily'), ('rotate', 2), - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -164,35 +112,33 @@ def test_server_backup_create_options(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.backup_server.assert_called_with( - servers[0].id, + self.compute_client.backup_server.assert_called_with( + self.server.id, 'image', 'daily', 2, ) - self.assertEqual(self.image_columns(images[0]), columns) - self.assertCountEqual(self.image_data(images[0]), data) + self.assertEqual(self.image_columns(self.image), columns) + self.assertCountEqual(self.image_data(self.image), data) @mock.patch.object(common_utils, 'wait_for_status', return_value=False) def test_server_backup_wait_fail(self, mock_wait_for_status): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - self.images_mock.get_image = mock.Mock( - side_effect=images[0], - ) + self.image_client.get_image.return_value = self.image arglist = [ - '--name', 'image', - '--type', 'daily', + '--name', + 'image', + '--type', + 'daily', '--wait', - servers[0].id, + self.server.id, ] verifylist = [ ('name', 'image'), ('type', 'daily'), ('wait', True), - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -202,39 +148,34 @@ def test_server_backup_wait_fail(self, mock_wait_for_status): parsed_args, ) - self.sdk_client.backup_server.assert_called_with( - servers[0].id, + self.compute_client.backup_server.assert_called_with( + self.server.id, 'image', 'daily', 1, ) mock_wait_for_status.assert_called_once_with( - self.images_mock.get_image, - images[0].id, - callback=mock.ANY + self.image_client.get_image, self.image.id, callback=mock.ANY ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_server_backup_wait_ok(self, mock_wait_for_status): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - - self.images_mock.get_image = mock.Mock( - side_effect=images[0], - ) + self.image_client.get_image.side_effect = (self.image,) arglist = [ - '--name', 'image', - '--type', 'daily', + '--name', + 'image', + '--type', + 'daily', '--wait', - servers[0].id, + self.server.id, ] verifylist = [ ('name', 'image'), ('type', 'daily'), ('wait', True), - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -243,18 +184,16 @@ def test_server_backup_wait_ok(self, mock_wait_for_status): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.backup_server.assert_called_with( - servers[0].id, + self.compute_client.backup_server.assert_called_with( + self.server.id, 'image', 'daily', 1, ) mock_wait_for_status.assert_called_once_with( - self.images_mock.get_image, - images[0].id, - callback=mock.ANY + self.image_client.get_image, self.image.id, callback=mock.ANY ) - self.assertEqual(self.image_columns(images[0]), columns) - self.assertCountEqual(self.image_data(images[0]), data) + self.assertEqual(self.image_columns(self.image), columns) + self.assertCountEqual(self.image_data(self.image), data) diff --git a/openstackclient/tests/unit/compute/v2/test_server_event.py b/openstackclient/tests/unit/compute/v2/test_server_event.py index 058a44d8b6..710a82ea78 100644 --- a/openstackclient/tests/unit/compute/v2/test_server_event.py +++ b/openstackclient/tests/unit/compute/v2/test_server_event.py @@ -15,31 +15,15 @@ from unittest import mock import iso8601 -from novaclient import api_versions from osc_lib import exceptions from openstackclient.compute.v2 import server_event from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -class TestServerEvent(compute_fakes.TestComputev2): - - fake_server = compute_fakes.FakeServer.create_one_server() - - def setUp(self): - super(TestServerEvent, self).setUp() - - self.servers_mock = self.app.client_manager.compute.servers - self.servers_mock.reset_mock() - self.events_mock = self.app.client_manager.compute.instance_action - self.events_mock.reset_mock() - - self.servers_mock.get.return_value = self.fake_server - - -class TestListServerEvent(TestServerEvent): - - fake_event = compute_fakes.FakeServerEvent.create_one_server_event() +class TestListServerEvent(compute_fakes.TestComputev2): + fake_server = compute_fakes.create_one_server() + fake_event = compute_fakes.create_one_server_action() columns = ( 'Request ID', @@ -47,12 +31,14 @@ class TestListServerEvent(TestServerEvent): 'Action', 'Start Time', ) - data = (( - fake_event.request_id, - fake_event.instance_uuid, - fake_event.action, - fake_event.start_time, - ), ) + data = ( + ( + fake_event.request_id, + fake_event.server_id, + fake_event.action, + fake_event.start_time, + ), + ) long_columns = ( 'Request ID', @@ -63,20 +49,26 @@ class TestListServerEvent(TestServerEvent): 'Project ID', 'User ID', ) - long_data = (( - fake_event.request_id, - fake_event.instance_uuid, - fake_event.action, - fake_event.start_time, - fake_event.message, - fake_event.project_id, - fake_event.user_id, - ), ) + long_data = ( + ( + fake_event.request_id, + fake_event.server_id, + fake_event.action, + fake_event.start_time, + fake_event.message, + fake_event.project_id, + fake_event.user_id, + ), + ) def setUp(self): - super(TestListServerEvent, self).setUp() + super().setUp() + + self.compute_client.find_server.return_value = self.fake_server + self.compute_client.server_actions.return_value = [ + self.fake_event, + ] - self.events_mock.list.return_value = [self.fake_event, ] self.cmd = server_event.ListServerEvent(self.app, None) def test_server_event_list(self): @@ -89,11 +81,15 @@ def test_server_event_list(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_once_with(self.fake_server.name) - self.events_mock.list.assert_called_once_with(self.fake_server.id) + self.compute_client.find_server.assert_called_with( + self.fake_server.name, + ignore_missing=False, + ) + self.compute_client.server_actions.assert_called_with( + self.fake_server.id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) @@ -109,21 +105,25 @@ def test_server_event_list_long(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_once_with(self.fake_server.name) - self.events_mock.list.assert_called_once_with(self.fake_server.id) + self.compute_client.find_server.assert_called_with( + self.fake_server.name, + ignore_missing=False, + ) + self.compute_client.server_actions.assert_called_with( + self.fake_server.id + ) self.assertEqual(self.long_columns, columns) self.assertEqual(self.long_data, tuple(data)) def test_server_event_list_with_changes_since(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.58') + self.set_compute_api_version('2.58') arglist = [ - '--changes-since', '2016-03-04T06:27:59Z', + '--changes-since', + '2016-03-04T06:27:59Z', self.fake_server.name, ] verifylist = [ @@ -134,22 +134,28 @@ def test_server_event_list_with_changes_since(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_once_with(self.fake_server.name) - self.events_mock.list.assert_called_once_with( - self.fake_server.id, changes_since='2016-03-04T06:27:59Z') + self.compute_client.find_server.assert_called_with( + self.fake_server.name, + ignore_missing=False, + ) + self.compute_client.server_actions.assert_called_with( + self.fake_server.id, + changes_since='2016-03-04T06:27:59Z', + ) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) @mock.patch.object(iso8601, 'parse_date', side_effect=iso8601.ParseError) def test_server_event_list_with_changes_since_invalid( - self, mock_parse_isotime, + self, + mock_parse_isotime, ): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.58') + self.set_compute_api_version('2.58') arglist = [ - '--changes-since', 'Invalid time value', + '--changes-since', + 'Invalid time value', self.fake_server.name, ] verifylist = [ @@ -161,20 +167,18 @@ def test_server_event_list_with_changes_since_invalid( ex = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) - - self.assertIn( - 'Invalid changes-since value:', str(ex)) - mock_parse_isotime.assert_called_once_with( - 'Invalid time value' + parsed_args, ) + self.assertIn('Invalid changes-since value:', str(ex)) + mock_parse_isotime.assert_called_once_with('Invalid time value') + def test_server_event_list_with_changes_since_pre_v258(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.57') + self.set_compute_api_version('2.57') arglist = [ - '--changes-since', '2016-03-04T06:27:59Z', + '--changes-since', + '2016-03-04T06:27:59Z', self.fake_server.name, ] verifylist = [ @@ -186,17 +190,20 @@ def test_server_event_list_with_changes_since_pre_v258(self): ex = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args, + ) self.assertIn( - '--os-compute-api-version 2.58 or greater is required', str(ex)) + '--os-compute-api-version 2.58 or greater is required', + str(ex), + ) def test_server_event_list_with_changes_before(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.66') + self.set_compute_api_version('2.66') arglist = [ - '--changes-before', '2016-03-04T06:27:59Z', + '--changes-before', + '2016-03-04T06:27:59Z', self.fake_server.name, ] verifylist = [ @@ -207,22 +214,28 @@ def test_server_event_list_with_changes_before(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_once_with(self.fake_server.name) - self.events_mock.list.assert_called_once_with( - self.fake_server.id, changes_before='2016-03-04T06:27:59Z') + self.compute_client.find_server.assert_called_with( + self.fake_server.name, + ignore_missing=False, + ) + self.compute_client.server_actions.assert_called_with( + self.fake_server.id, + changes_before='2016-03-04T06:27:59Z', + ) self.assertEqual(self.columns, columns) self.assertEqual(tuple(self.data), tuple(data)) @mock.patch.object(iso8601, 'parse_date', side_effect=iso8601.ParseError) def test_server_event_list_with_changes_before_invalid( - self, mock_parse_isotime, + self, + mock_parse_isotime, ): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.66') + self.set_compute_api_version('2.66') arglist = [ - '--changes-before', 'Invalid time value', + '--changes-before', + 'Invalid time value', self.fake_server.name, ] verifylist = [ @@ -232,22 +245,18 @@ def test_server_event_list_with_changes_before_invalid( parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - - self.assertIn( - 'Invalid changes-before value:', str(ex)) - mock_parse_isotime.assert_called_once_with( - 'Invalid time value' + exceptions.CommandError, self.cmd.take_action, parsed_args ) + self.assertIn('Invalid changes-before value:', str(ex)) + mock_parse_isotime.assert_called_once_with('Invalid time value') + def test_server_event_list_with_changes_before_pre_v266(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.65') + self.set_compute_api_version('2.65') arglist = [ - '--changes-before', '2016-03-04T06:27:59Z', + '--changes-before', + '2016-03-04T06:27:59Z', self.fake_server.name, ] verifylist = [ @@ -257,19 +266,20 @@ def test_server_event_list_with_changes_before_pre_v266(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.66 or greater is required', str(ex)) + '--os-compute-api-version 2.66 or greater is required', + str(ex), + ) def test_server_event_list_with_limit(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.58') + self.set_compute_api_version('2.58') arglist = [ - '--limit', '1', + '--limit', + '1', self.fake_server.name, ] verifylist = [ @@ -280,15 +290,18 @@ def test_server_event_list_with_limit(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.events_mock.list.assert_called_once_with( - self.fake_server.id, limit=1) + self.compute_client.server_actions.assert_called_with( + self.fake_server.id, + limit=1, + paginated=False, + ) def test_server_event_list_with_limit_pre_v258(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.57') + self.set_compute_api_version('2.57') arglist = [ - '--limit', '1', + '--limit', + '1', self.fake_server.name, ] verifylist = [ @@ -300,17 +313,20 @@ def test_server_event_list_with_limit_pre_v258(self): ex = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args, + ) self.assertIn( - '--os-compute-api-version 2.58 or greater is required', str(ex)) + '--os-compute-api-version 2.58 or greater is required', + str(ex), + ) def test_server_event_list_with_marker(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.58') + self.set_compute_api_version('2.58') arglist = [ - '--marker', 'test_event', + '--marker', + 'test_event', self.fake_server.name, ] verifylist = [ @@ -321,15 +337,17 @@ def test_server_event_list_with_marker(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.events_mock.list.assert_called_once_with( - self.fake_server.id, marker='test_event') + self.compute_client.server_actions.assert_called_with( + self.fake_server.id, + marker='test_event', + ) def test_server_event_list_with_marker_pre_v258(self): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.57') + self.set_compute_api_version('2.57') arglist = [ - '--marker', 'test_event', + '--marker', + 'test_event', self.fake_server.name, ] verifylist = [ @@ -339,22 +357,21 @@ def test_server_event_list_with_marker_pre_v258(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.58 or greater is required', str(ex)) - - -class TestShowServerEvent(TestServerEvent): + '--os-compute-api-version 2.58 or greater is required', str(ex) + ) - fake_event = compute_fakes.FakeServerEvent.create_one_server_event() +class TestShowServerEvent(compute_fakes.TestComputev2): + fake_server = compute_fakes.create_one_server() + fake_event = compute_fakes.create_one_server_action() columns = ( 'action', 'events', - 'instance_uuid', + 'id', 'message', 'project_id', 'request_id', @@ -363,8 +380,8 @@ class TestShowServerEvent(TestServerEvent): ) data = ( fake_event.action, - fake_event.events, - fake_event.instance_uuid, + server_event.ServerActionEventColumn(fake_event.events), + fake_event.id, fake_event.message, fake_event.project_id, fake_event.request_id, @@ -373,9 +390,11 @@ class TestShowServerEvent(TestServerEvent): ) def setUp(self): - super(TestShowServerEvent, self).setUp() + super().setUp() + + self.compute_client.find_server.return_value = self.fake_server + self.compute_client.get_server_action.return_value = self.fake_event - self.events_mock.get.return_value = self.fake_event self.cmd = server_event.ShowServerEvent(self.app, None) def test_server_event_show(self): @@ -389,12 +408,16 @@ def test_server_event_show(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - self.servers_mock.get.assert_called_once_with(self.fake_server.name) - self.events_mock.get.assert_called_once_with( - self.fake_server.id, self.fake_event.request_id) + self.compute_client.find_server.assert_called_with( + self.fake_server.name, + ignore_missing=False, + ) + self.compute_client.get_server_action.assert_called_with( + self.fake_event.request_id, + self.fake_server.id, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/compute/v2/test_server_group.py b/openstackclient/tests/unit/compute/v2/test_server_group.py index 655366a8e5..a91824a9cb 100644 --- a/openstackclient/tests/unit/compute/v2/test_server_group.py +++ b/openstackclient/tests/unit/compute/v2/test_server_group.py @@ -13,9 +13,8 @@ # under the License. # -from unittest import mock - -from openstack import utils as sdk_utils +from openstack.compute.v2 import server_group as _server_group +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions @@ -25,51 +24,48 @@ class TestServerGroup(compute_fakes.TestComputev2): - - fake_server_group = compute_fakes.create_one_server_group() - - columns = ( - 'id', - 'members', - 'name', - 'policy', - 'project_id', - 'rules', - 'user_id', - ) - - data = ( - fake_server_group.id, - format_columns.ListColumn(fake_server_group.member_ids), - fake_server_group.name, - fake_server_group.policy, - fake_server_group.project_id, - format_columns.DictColumn(fake_server_group.rules), - fake_server_group.user_id, - ) - def setUp(self): super().setUp() - # Create and get a shortcut to the compute client mock - self.app.client_manager.sdk_connection = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - self.sdk_client.reset_mock() + self.fake_server_group = sdk_fakes.generate_fake_resource( + _server_group.ServerGroup + ) + + self.columns = ( + 'id', + 'members', + 'name', + 'policy', + 'project_id', + 'rules', + 'user_id', + ) + self.data = ( + self.fake_server_group.id, + format_columns.ListColumn(self.fake_server_group.member_ids), + self.fake_server_group.name, + self.fake_server_group.policy, + self.fake_server_group.project_id, + format_columns.DictColumn(self.fake_server_group.rules), + self.fake_server_group.user_id, + ) class TestServerGroupCreate(TestServerGroup): - def setUp(self): super().setUp() - self.sdk_client.create_server_group.return_value = \ + self.compute_client.create_server_group.return_value = ( self.fake_server_group + ) self.cmd = server_group.CreateServerGroup(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_create(self, sm_mock): + def test_server_group_create(self): + self.set_compute_api_version('2.64') + arglist = [ - '--policy', 'anti-affinity', + '--policy', + 'anti-affinity', 'affinity_group', ] verifylist = [ @@ -78,7 +74,7 @@ def test_server_group_create(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_group.assert_called_once_with( + self.compute_client.create_server_group.assert_called_once_with( name=parsed_args.name, policy=parsed_args.policy, ) @@ -86,10 +82,12 @@ def test_server_group_create(self, sm_mock): self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_create_with_soft_policies(self, sm_mock): + def test_server_group_create_with_soft_policies(self): + self.set_compute_api_version('2.64') + arglist = [ - '--policy', 'soft-anti-affinity', + '--policy', + 'soft-anti-affinity', 'affinity_group', ] verifylist = [ @@ -98,7 +96,7 @@ def test_server_group_create_with_soft_policies(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_group.assert_called_once_with( + self.compute_client.create_server_group.assert_called_once_with( name=parsed_args.name, policy=parsed_args.policy, ) @@ -106,10 +104,12 @@ def test_server_group_create_with_soft_policies(self, sm_mock): self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_group_create_with_soft_policies_pre_v215(self, sm_mock): + def test_server_group_create_with_soft_policies_pre_v215(self): + self.set_compute_api_version('2.14') + arglist = [ - '--policy', 'soft-anti-affinity', + '--policy', + 'soft-anti-affinity', 'affinity_group', ] verifylist = [ @@ -118,18 +118,20 @@ def test_server_group_create_with_soft_policies_pre_v215(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.15 or greater is required', - str(ex)) + '--os-compute-api-version 2.15 or greater is required', str(ex) + ) + + def test_server_group_create_with_rules(self): + self.set_compute_api_version('2.64') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_create_with_rules(self, sm_mock): arglist = [ - '--policy', 'soft-anti-affinity', - '--rule', 'max_server_per_host=2', + '--policy', + 'soft-anti-affinity', + '--rule', + 'max_server_per_host=2', 'affinity_group', ] verifylist = [ @@ -139,7 +141,7 @@ def test_server_group_create_with_rules(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_group.assert_called_once_with( + self.compute_client.create_server_group.assert_called_once_with( name=parsed_args.name, policy=parsed_args.policy, rules=parsed_args.rules, @@ -148,12 +150,14 @@ def test_server_group_create_with_rules(self, sm_mock): self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) - @mock.patch.object( - sdk_utils, 'supports_microversion', side_effect=[True, False]) - def test_server_group_create_with_rules_pre_v264(self, sm_mock): + def test_server_group_create_with_rules_pre_v264(self): + self.set_compute_api_version('2.63') + arglist = [ - '--policy', 'soft-anti-affinity', - '--rule', 'max_server_per_host=2', + '--policy', + 'soft-anti-affinity', + '--rule', + 'max_server_per_host=2', 'affinity_group', ] verifylist = [ @@ -164,20 +168,20 @@ def test_server_group_create_with_rules_pre_v264(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.64 or greater is required', - str(ex)) + '--os-compute-api-version 2.64 or greater is required', str(ex) + ) class TestServerGroupDelete(TestServerGroup): - def setUp(self): super().setUp() - self.sdk_client.find_server_group.return_value = self.fake_server_group + self.compute_client.find_server_group.return_value = ( + self.fake_server_group + ) self.cmd = server_group.DeleteServerGroup(self.app, None) def test_server_group_delete(self): @@ -189,150 +193,84 @@ def test_server_group_delete(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_server_group.assert_called_once_with( - 'affinity_group' + self.compute_client.find_server_group.assert_called_once_with( + 'affinity_group', ignore_missing=False ) - self.sdk_client.delete_server_group.assert_called_once_with( + self.compute_client.delete_server_group.assert_called_once_with( self.fake_server_group.id ) self.assertIsNone(result) def test_server_group_multiple_delete(self): - arglist = [ - 'affinity_group', - 'anti_affinity_group' - ] + arglist = ['affinity_group', 'anti_affinity_group'] verifylist = [ ('server_group', ['affinity_group', 'anti_affinity_group']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.find_server_group.assert_any_call('affinity_group') - self.sdk_client.find_server_group.assert_any_call( - 'anti_affinity_group' + self.compute_client.find_server_group.assert_any_call( + 'affinity_group', ignore_missing=False ) - self.sdk_client.delete_server_group.assert_called_with( + self.compute_client.find_server_group.assert_any_call( + 'anti_affinity_group', ignore_missing=False + ) + self.compute_client.delete_server_group.assert_called_with( self.fake_server_group.id ) - self.assertEqual(2, self.sdk_client.find_server_group.call_count) - self.assertEqual(2, self.sdk_client.delete_server_group.call_count) + self.assertEqual(2, self.compute_client.find_server_group.call_count) + self.assertEqual(2, self.compute_client.delete_server_group.call_count) self.assertIsNone(result) def test_server_group_delete_no_input(self): arglist = [] verifylist = None - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_server_group_multiple_delete_with_exception(self): - arglist = [ - 'affinity_group', - 'anti_affinity_group' - ] + arglist = ['affinity_group', 'anti_affinity_group'] verifylist = [ ('server_group', ['affinity_group', 'anti_affinity_group']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.sdk_client.find_server_group.side_effect = [ - self.fake_server_group, exceptions.CommandError] + self.compute_client.find_server_group.side_effect = [ + self.fake_server_group, + exceptions.CommandError, + ] try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual('1 of 2 server groups failed to delete.', str(e)) - self.sdk_client.find_server_group.assert_any_call('affinity_group') - self.sdk_client.find_server_group.assert_any_call( - 'anti_affinity_group' + self.compute_client.find_server_group.assert_any_call( + 'affinity_group', ignore_missing=False ) - self.assertEqual(2, self.sdk_client.find_server_group.call_count) - self.sdk_client.delete_server_group.assert_called_once_with( + self.compute_client.find_server_group.assert_any_call( + 'anti_affinity_group', ignore_missing=False + ) + self.assertEqual(2, self.compute_client.find_server_group.call_count) + self.compute_client.delete_server_group.assert_called_once_with( self.fake_server_group.id ) class TestServerGroupList(TestServerGroup): - - list_columns = ( - 'ID', - 'Name', - 'Policies', - ) - - list_columns_long = ( - 'ID', - 'Name', - 'Policies', - 'Members', - 'Project Id', - 'User Id', - ) - - list_columns_v264 = ( - 'ID', - 'Name', - 'Policy', - ) - - list_columns_v264_long = ( - 'ID', - 'Name', - 'Policy', - 'Members', - 'Project Id', - 'User Id', - ) - - list_data = (( - TestServerGroup.fake_server_group.id, - TestServerGroup.fake_server_group.name, - format_columns.ListColumn( - TestServerGroup.fake_server_group.policies - ), - ),) - - list_data_long = (( - TestServerGroup.fake_server_group.id, - TestServerGroup.fake_server_group.name, - format_columns.ListColumn( - TestServerGroup.fake_server_group.policies - ), - format_columns.ListColumn( - TestServerGroup.fake_server_group.member_ids - ), - TestServerGroup.fake_server_group.project_id, - TestServerGroup.fake_server_group.user_id, - ),) - - list_data_v264 = (( - TestServerGroup.fake_server_group.id, - TestServerGroup.fake_server_group.name, - TestServerGroup.fake_server_group.policy, - ),) - - list_data_v264_long = (( - TestServerGroup.fake_server_group.id, - TestServerGroup.fake_server_group.name, - TestServerGroup.fake_server_group.policy, - format_columns.ListColumn( - TestServerGroup.fake_server_group.member_ids - ), - TestServerGroup.fake_server_group.project_id, - TestServerGroup.fake_server_group.user_id, - ),) - def setUp(self): super().setUp() - self.sdk_client.server_groups.return_value = [self.fake_server_group] + self.compute_client.server_groups.return_value = [ + self.fake_server_group + ] self.cmd = server_group.ListServerGroup(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_group_list(self, sm_mock): + def test_server_group_list(self): arglist = [] verifylist = [ ('all_projects', False), @@ -343,13 +281,25 @@ def test_server_group_list(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.server_groups.assert_called_once_with() + self.compute_client.server_groups.assert_called_once_with() + + expected_columns = ( + 'ID', + 'Name', + 'Policies', + ) + expected_data = ( + ( + self.fake_server_group.id, + self.fake_server_group.name, + format_columns.ListColumn(self.fake_server_group.policies), + ), + ) - self.assertCountEqual(self.list_columns, columns) - self.assertCountEqual(self.list_data, tuple(data)) + self.assertCountEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False) - def test_server_group_list_with_all_projects_and_long(self, sm_mock): + def test_server_group_list_with_all_projects_and_long(self): arglist = [ '--all-projects', '--long', @@ -362,16 +312,36 @@ def test_server_group_list_with_all_projects_and_long(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.server_groups.assert_called_once_with( - all_projects=True) + self.compute_client.server_groups.assert_called_once_with( + all_projects=True + ) - self.assertCountEqual(self.list_columns_long, columns) - self.assertCountEqual(self.list_data_long, tuple(data)) + expected_columns = ( + 'ID', + 'Name', + 'Policies', + 'Members', + 'Project Id', + 'User Id', + ) + expected_data = ( + ( + self.fake_server_group.id, + self.fake_server_group.name, + format_columns.ListColumn(self.fake_server_group.policies), + format_columns.ListColumn(self.fake_server_group.member_ids), + self.fake_server_group.project_id, + self.fake_server_group.user_id, + ), + ) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_list_with_limit(self, sm_mock): + self.assertCountEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) + + def test_server_group_list_with_limit(self): arglist = [ - '--limit', '1', + '--limit', + '1', ] verifylist = [ ('all_projects', False), @@ -383,12 +353,12 @@ def test_server_group_list_with_limit(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.server_groups.assert_called_once_with(limit=1) + self.compute_client.server_groups.assert_called_once_with(limit=1) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_list_with_offset(self, sm_mock): + def test_server_group_list_with_offset(self): arglist = [ - '--offset', '5', + '--offset', + '5', ] verifylist = [ ('all_projects', False), @@ -400,10 +370,11 @@ def test_server_group_list_with_offset(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.sdk_client.server_groups.assert_called_once_with(offset=5) + self.compute_client.server_groups.assert_called_once_with(offset=5) + + def test_server_group_list_v264(self): + self.set_compute_api_version('2.64') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_list_v264(self, sm_mock): arglist = [] verifylist = [ ('all_projects', False), @@ -411,13 +382,27 @@ def test_server_group_list_v264(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.server_groups.assert_called_once_with() + self.compute_client.server_groups.assert_called_once_with() - self.assertCountEqual(self.list_columns_v264, columns) - self.assertCountEqual(self.list_data_v264, tuple(data)) + expected_columns = ( + 'ID', + 'Name', + 'Policy', + ) + expected_data = ( + ( + self.fake_server_group.id, + self.fake_server_group.name, + self.fake_server_group.policy, + ), + ) + + self.assertCountEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) + + def test_server_group_list_with_all_projects_and_long_v264(self): + self.set_compute_api_version('2.64') - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_list_with_all_projects_and_long_v264(self, sm_mock): arglist = [ '--all-projects', '--long', @@ -428,23 +413,45 @@ def test_server_group_list_with_all_projects_and_long_v264(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.server_groups.assert_called_once_with( - all_projects=True) + self.compute_client.server_groups.assert_called_once_with( + all_projects=True + ) - self.assertCountEqual(self.list_columns_v264_long, columns) - self.assertCountEqual(self.list_data_v264_long, tuple(data)) + expected_columns = ( + 'ID', + 'Name', + 'Policy', + 'Members', + 'Project Id', + 'User Id', + ) + expected_data = ( + ( + self.fake_server_group.id, + self.fake_server_group.name, + self.fake_server_group.policy, + format_columns.ListColumn(self.fake_server_group.member_ids), + self.fake_server_group.project_id, + self.fake_server_group.user_id, + ), + ) + self.assertCountEqual(expected_columns, columns) + self.assertCountEqual(expected_data, tuple(data)) -class TestServerGroupShow(TestServerGroup): +class TestServerGroupShow(TestServerGroup): def setUp(self): super().setUp() - self.sdk_client.find_server_group.return_value = self.fake_server_group + self.compute_client.find_server_group.return_value = ( + self.fake_server_group + ) self.cmd = server_group.ShowServerGroup(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True) - def test_server_group_show(self, sm_mock): + def test_server_group_show(self): + self.set_compute_api_version('2.64') + arglist = [ 'affinity_group', ] diff --git a/openstackclient/tests/unit/compute/v2/test_server_image.py b/openstackclient/tests/unit/compute/v2/test_server_image.py index db0e1d9e98..726bc81ecf 100644 --- a/openstackclient/tests/unit/compute/v2/test_server_image.py +++ b/openstackclient/tests/unit/compute/v2/test_server_image.py @@ -21,47 +21,16 @@ from openstackclient.tests.unit.image.v2 import fakes as image_fakes -class TestServerImage(compute_fakes.TestComputev2): - - def setUp(self): - super(TestServerImage, self).setUp() - - # Get a shortcut to the compute client ServerManager Mock - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - - # Get a shortcut to the image client ImageManager Mock - self.images_mock = self.app.client_manager.image - self.images_mock.find_image.reset_mock() - - # Set object attributes to be tested. Could be overwritten in subclass. - self.attrs = {} - - # Set object methods to be tested. Could be overwritten in subclass. - self.methods = {} - - def setup_servers_mock(self, count): - servers = compute_fakes.FakeServer.create_sdk_servers( - attrs=self.attrs, - methods=self.methods, - count=count, - ) - - # This is the return value for compute_client.find_server() - self.sdk_client.find_server = compute_fakes.FakeServer.get_servers( - servers, - 0, - ) - return servers - - -class TestServerImageCreate(TestServerImage): - +class TestServerImageCreate(compute_fakes.TestComputev2): def image_columns(self, image): - # columnlist = tuple(sorted(image.keys())) columnlist = ( - 'id', 'name', 'owner', 'protected', 'status', 'tags', 'visibility' + 'id', + 'name', + 'owner', + 'protected', + 'status', + 'tags', + 'visibility', ) return columnlist @@ -78,47 +47,26 @@ def image_data(self, image): return datalist def setUp(self): - super(TestServerImageCreate, self).setUp() + super().setUp() - # Get the command object to test - self.cmd = server_image.CreateServerImage(self.app, None) + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server - self.methods = { - 'create_image': None, - } - - def setup_images_mock(self, count, servers=None): - if servers: - images = image_fakes.create_images( - attrs={ - 'name': servers[0].name, - 'status': 'active', - }, - count=count, - ) - else: - images = image_fakes.create_images( - attrs={ - 'status': 'active', - }, - count=count, - ) - - self.images_mock.find_image = mock.Mock(side_effect=images) - self.sdk_client.create_server_image = mock.Mock( - return_value=images[0], + self.image = image_fakes.create_one_image( + {'name': self.server.name, 'status': 'active'} ) - return images + self.image_client.find_image.return_value = self.image + self.compute_client.create_server_image.return_value = self.image - def test_server_image_create_defaults(self): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) + # Get the command object to test + self.cmd = server_image.CreateServerImage(self.app, None) + def test_server_image_create_defaults(self): arglist = [ - servers[0].id, + self.server.id, ] verifylist = [ - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -127,27 +75,26 @@ def test_server_image_create_defaults(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_image.assert_called_with( - servers[0].id, - servers[0].name, + self.compute_client.create_server_image.assert_called_with( + self.server.id, + self.server.name, None, ) - self.assertEqual(self.image_columns(images[0]), columns) - self.assertCountEqual(self.image_data(images[0]), data) + self.assertEqual(self.image_columns(self.image), columns) + self.assertCountEqual(self.image_data(self.image), data) def test_server_image_create_options(self): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - arglist = [ - '--name', 'img-nam', - '--property', 'key=value', - servers[0].id, + '--name', + 'img-nam', + '--property', + 'key=value', + self.server.id, ] verifylist = [ ('name', 'img-nam'), - ('server', servers[0].id), + ('server', self.server.id), ('properties', {'key': 'value'}), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -157,27 +104,24 @@ def test_server_image_create_options(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_image.assert_called_with( - servers[0].id, + self.compute_client.create_server_image.assert_called_with( + self.server.id, 'img-nam', {'key': 'value'}, ) - self.assertEqual(self.image_columns(images[0]), columns) - self.assertCountEqual(self.image_data(images[0]), data) + self.assertEqual(self.image_columns(self.image), columns) + self.assertCountEqual(self.image_data(self.image), data) @mock.patch.object(common_utils, 'wait_for_status', return_value=False) def test_server_create_image_wait_fail(self, mock_wait_for_status): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - arglist = [ '--wait', - servers[0].id, + self.server.id, ] verifylist = [ ('wait', True), - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -187,30 +131,25 @@ def test_server_create_image_wait_fail(self, mock_wait_for_status): parsed_args, ) - self.sdk_client.create_server_image.assert_called_with( - servers[0].id, - servers[0].name, + self.compute_client.create_server_image.assert_called_with( + self.server.id, + self.server.name, None, ) mock_wait_for_status.assert_called_once_with( - self.images_mock.get_image, - images[0].id, - callback=mock.ANY + self.image_client.get_image, self.image.id, callback=mock.ANY ) @mock.patch.object(common_utils, 'wait_for_status', return_value=True) def test_server_create_image_wait_ok(self, mock_wait_for_status): - servers = self.setup_servers_mock(count=1) - images = self.setup_images_mock(count=1, servers=servers) - arglist = [ '--wait', - servers[0].id, + self.server.id, ] verifylist = [ ('wait', True), - ('server', servers[0].id), + ('server', self.server.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -219,17 +158,15 @@ def test_server_create_image_wait_ok(self, mock_wait_for_status): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.create_server_image.assert_called_with( - servers[0].id, - servers[0].name, + self.compute_client.create_server_image.assert_called_with( + self.server.id, + self.server.name, None, ) mock_wait_for_status.assert_called_once_with( - self.images_mock.get_image, - images[0].id, - callback=mock.ANY + self.image_client.get_image, self.image.id, callback=mock.ANY ) - self.assertEqual(self.image_columns(images[0]), columns) - self.assertCountEqual(self.image_data(images[0]), data) + self.assertEqual(self.image_columns(self.image), columns) + self.assertCountEqual(self.image_data(self.image), data) diff --git a/openstackclient/tests/unit/compute/v2/test_server_migration.py b/openstackclient/tests/unit/compute/v2/test_server_migration.py index afe868d9f2..5016c25720 100644 --- a/openstackclient/tests/unit/compute/v2/test_server_migration.py +++ b/openstackclient/tests/unit/compute/v2/test_server_migration.py @@ -10,10 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock - -from novaclient import api_versions -from openstack import utils as sdk_utils from osc_lib import exceptions from osc_lib import utils as common_utils @@ -22,65 +18,50 @@ from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestServerMigration(compute_fakes.TestComputev2): - - def setUp(self): - super().setUp() - - # Get a shortcut to the compute client ServerManager Mock - self.servers_mock = self.app.client_manager.compute.servers - self.servers_mock.reset_mock() - - # Get a shortcut to the compute client ServerMigrationsManager Mock - self.server_migrations_mock = \ - self.app.client_manager.compute.server_migrations - self.server_migrations_mock.reset_mock() - - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - - patcher = mock.patch.object( - sdk_utils, 'supports_microversion', return_value=True) - self.addCleanup(patcher.stop) - self.supports_microversion_mock = patcher.start() - - def _set_mock_microversion(self, mock_v): - """Set a specific microversion for the mock supports_microversion().""" - self.supports_microversion_mock.reset_mock(return_value=True) - self.supports_microversion_mock.side_effect = ( - lambda _, v: - api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v)) - - -class TestListMigration(TestServerMigration): +class TestListMigration(compute_fakes.TestComputev2): """Test fetch all migrations.""" MIGRATION_COLUMNS = [ - 'Source Node', 'Dest Node', 'Source Compute', - 'Dest Compute', 'Dest Host', 'Status', 'Server UUID', - 'Old Flavor', 'New Flavor', 'Created At', 'Updated At' + 'Source Node', + 'Dest Node', + 'Source Compute', + 'Dest Compute', + 'Dest Host', + 'Status', + 'Server UUID', + 'Old Flavor', + 'New Flavor', + 'Created At', + 'Updated At', ] MIGRATION_FIELDS = [ - 'source_node', 'dest_node', 'source_compute', - 'dest_compute', 'dest_host', 'status', 'server_id', 'old_flavor_id', - 'new_flavor_id', 'created_at', 'updated_at' + 'source_node', + 'dest_node', + 'source_compute', + 'dest_compute', + 'dest_host', + 'status', + 'server_id', + 'old_flavor_id', + 'new_flavor_id', + 'created_at', + 'updated_at', ] def setUp(self): super().setUp() - self._set_mock_microversion('2.1') - - self.server = compute_fakes.FakeServer.create_one_sdk_server() - self.sdk_client.find_server.return_value = self.server + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server self.migrations = compute_fakes.create_migrations(count=3) - self.sdk_client.migrations.return_value = self.migrations + self.compute_client.migrations.return_value = self.migrations - self.data = (common_utils.get_item_properties( - s, self.MIGRATION_FIELDS) for s in self.migrations) + self.data = ( + common_utils.get_item_properties(s, self.MIGRATION_FIELDS) + for s in self.migrations + ) # Get the command object to test self.cmd = server_migration.ListMigration(self.app, None) @@ -95,17 +76,21 @@ def test_server_migration_list_no_options(self): # Set expected values kwargs = {} - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_migration_list(self): arglist = [ - '--server', 'server1', - '--host', 'host1', - '--status', 'migrating', - '--type', 'cold-migration', + '--server', + 'server1', + '--host', + 'host1', + '--status', + 'migrating', + '--type', + 'cold-migration', ] verifylist = [ ('server', 'server1'), @@ -123,41 +108,59 @@ def test_server_migration_list(self): 'migration_type': 'migration', } - self.sdk_client.find_server.assert_called_with('server1') - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.find_server.assert_called_with( + 'server1', ignore_missing=False + ) + self.compute_client.migrations.assert_called_with(**kwargs) self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) class TestListMigrationV223(TestListMigration): - """Test fetch all migrations. """ + """Test fetch all migrations.""" MIGRATION_COLUMNS = [ - 'Id', 'Source Node', 'Dest Node', 'Source Compute', 'Dest Compute', - 'Dest Host', 'Status', 'Server UUID', 'Old Flavor', 'New Flavor', - 'Type', 'Created At', 'Updated At' + 'Id', + 'Source Node', + 'Dest Node', + 'Source Compute', + 'Dest Compute', + 'Dest Host', + 'Status', + 'Server UUID', + 'Old Flavor', + 'New Flavor', + 'Type', + 'Created At', + 'Updated At', ] # These are the Migration object fields. MIGRATION_FIELDS = [ - 'id', 'source_node', 'dest_node', 'source_compute', 'dest_compute', - 'dest_host', 'status', 'server_id', 'old_flavor_id', - 'new_flavor_id', 'migration_type', 'created_at', 'updated_at' + 'id', + 'source_node', + 'dest_node', + 'source_compute', + 'dest_compute', + 'dest_host', + 'status', + 'server_id', + 'old_flavor_id', + 'new_flavor_id', + 'migration_type', + 'created_at', + 'updated_at', ] def setUp(self): super().setUp() - self._set_mock_microversion('2.23') + self.set_compute_api_version('2.23') def test_server_migration_list(self): - arglist = [ - '--status', 'migrating' - ] - verifylist = [ - ('status', 'migrating') - ] + arglist = ['--status', 'migrating'] + verifylist = [('status', 'migrating')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -166,46 +169,71 @@ def test_server_migration_list(self): 'status': 'migrating', } - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) class TestListMigrationV259(TestListMigration): - """Test fetch all migrations. """ + """Test fetch all migrations.""" MIGRATION_COLUMNS = [ - 'Id', 'UUID', 'Source Node', 'Dest Node', 'Source Compute', - 'Dest Compute', 'Dest Host', 'Status', 'Server UUID', - 'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At' + 'Id', + 'UUID', + 'Source Node', + 'Dest Node', + 'Source Compute', + 'Dest Compute', + 'Dest Host', + 'Status', + 'Server UUID', + 'Old Flavor', + 'New Flavor', + 'Type', + 'Created At', + 'Updated At', ] # These are the Migration object fields. MIGRATION_FIELDS = [ - 'id', 'uuid', 'source_node', 'dest_node', 'source_compute', - 'dest_compute', 'dest_host', 'status', 'server_id', - 'old_flavor_id', 'new_flavor_id', 'migration_type', - 'created_at', 'updated_at' + 'id', + 'uuid', + 'source_node', + 'dest_node', + 'source_compute', + 'dest_compute', + 'dest_host', + 'status', + 'server_id', + 'old_flavor_id', + 'new_flavor_id', + 'migration_type', + 'created_at', + 'updated_at', ] def setUp(self): super().setUp() - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') def test_server_migration_list(self): arglist = [ - '--status', 'migrating', - '--limit', '1', - '--marker', 'test_kp', - '--changes-since', '2019-08-09T08:03:25Z' + '--status', + 'migrating', + '--limit', + '1', + '--marker', + 'test_kp', + '--changes-since', + '2019-08-09T08:03:25Z', ] verifylist = [ ('status', 'migrating'), ('limit', 1), ('marker', 'test_kp'), - ('changes_since', '2019-08-09T08:03:25Z') + ('changes_since', '2019-08-09T08:03:25Z'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -219,105 +247,121 @@ def test_server_migration_list(self): 'changes_since': '2019-08-09T08:03:25Z', } - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_migration_list_with_limit_pre_v259(self): - self._set_mock_microversion('2.58') - arglist = [ - '--status', 'migrating', - '--limit', '1' - ] - verifylist = [ - ('status', 'migrating'), - ('limit', 1) - ] + self.set_compute_api_version('2.58') + + arglist = ['--status', 'migrating', '--limit', '1'] + verifylist = [('status', 'migrating'), ('limit', 1)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.59 or greater is required', - str(ex)) + '--os-compute-api-version 2.59 or greater is required', str(ex) + ) def test_server_migration_list_with_marker_pre_v259(self): - self._set_mock_microversion('2.58') - arglist = [ - '--status', 'migrating', - '--marker', 'test_kp' - ] - verifylist = [ - ('status', 'migrating'), - ('marker', 'test_kp') - ] + self.set_compute_api_version('2.58') + + arglist = ['--status', 'migrating', '--marker', 'test_kp'] + verifylist = [('status', 'migrating'), ('marker', 'test_kp')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.59 or greater is required', - str(ex)) + '--os-compute-api-version 2.59 or greater is required', str(ex) + ) def test_server_migration_list_with_changes_since_pre_v259(self): - self._set_mock_microversion('2.58') + self.set_compute_api_version('2.58') + arglist = [ - '--status', 'migrating', - '--changes-since', '2019-08-09T08:03:25Z' + '--status', + 'migrating', + '--changes-since', + '2019-08-09T08:03:25Z', ] verifylist = [ ('status', 'migrating'), - ('changes_since', '2019-08-09T08:03:25Z') + ('changes_since', '2019-08-09T08:03:25Z'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.59 or greater is required', - str(ex)) + '--os-compute-api-version 2.59 or greater is required', str(ex) + ) class TestListMigrationV266(TestListMigration): - """Test fetch all migrations by changes-before. """ + """Test fetch all migrations by changes-before.""" MIGRATION_COLUMNS = [ - 'Id', 'UUID', 'Source Node', 'Dest Node', 'Source Compute', - 'Dest Compute', 'Dest Host', 'Status', 'Server UUID', - 'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At' + 'Id', + 'UUID', + 'Source Node', + 'Dest Node', + 'Source Compute', + 'Dest Compute', + 'Dest Host', + 'Status', + 'Server UUID', + 'Old Flavor', + 'New Flavor', + 'Type', + 'Created At', + 'Updated At', ] # These are the Migration object fields. MIGRATION_FIELDS = [ - 'id', 'uuid', 'source_node', 'dest_node', 'source_compute', - 'dest_compute', 'dest_host', 'status', 'server_id', - 'old_flavor_id', 'new_flavor_id', 'migration_type', - 'created_at', 'updated_at' + 'id', + 'uuid', + 'source_node', + 'dest_node', + 'source_compute', + 'dest_compute', + 'dest_host', + 'status', + 'server_id', + 'old_flavor_id', + 'new_flavor_id', + 'migration_type', + 'created_at', + 'updated_at', ] def setUp(self): super().setUp() - self._set_mock_microversion('2.66') + self.set_compute_api_version('2.66') def test_server_migration_list_with_changes_before(self): arglist = [ - '--status', 'migrating', - '--limit', '1', - '--marker', 'test_kp', - '--changes-since', '2019-08-07T08:03:25Z', - '--changes-before', '2019-08-09T08:03:25Z' + '--status', + 'migrating', + '--limit', + '1', + '--marker', + 'test_kp', + '--changes-since', + '2019-08-07T08:03:25Z', + '--changes-before', + '2019-08-09T08:03:25Z', ] verifylist = [ ('status', 'migrating'), ('limit', 1), ('marker', 'test_kp'), ('changes_since', '2019-08-07T08:03:25Z'), - ('changes_before', '2019-08-09T08:03:25Z') + ('changes_before', '2019-08-09T08:03:25Z'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -332,46 +376,69 @@ def test_server_migration_list_with_changes_before(self): 'changes_before': '2019-08-09T08:03:25Z', } - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) def test_server_migration_list_with_changes_before_pre_v266(self): - self._set_mock_microversion('2.65') + self.set_compute_api_version('2.65') + arglist = [ - '--status', 'migrating', - '--changes-before', '2019-08-09T08:03:25Z' + '--status', + 'migrating', + '--changes-before', + '2019-08-09T08:03:25Z', ] verifylist = [ ('status', 'migrating'), - ('changes_before', '2019-08-09T08:03:25Z') + ('changes_before', '2019-08-09T08:03:25Z'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.66 or greater is required', - str(ex)) + '--os-compute-api-version 2.66 or greater is required', str(ex) + ) class TestListMigrationV280(TestListMigration): - """Test fetch all migrations by user-id and/or project-id. """ + """Test fetch all migrations by user-id and/or project-id.""" MIGRATION_COLUMNS = [ - 'Id', 'UUID', 'Source Node', 'Dest Node', 'Source Compute', - 'Dest Compute', 'Dest Host', 'Status', 'Server UUID', - 'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At' + 'Id', + 'UUID', + 'Source Node', + 'Dest Node', + 'Source Compute', + 'Dest Compute', + 'Dest Host', + 'Status', + 'Server UUID', + 'Old Flavor', + 'New Flavor', + 'Type', + 'Created At', + 'Updated At', ] # These are the Migration object fields. MIGRATION_FIELDS = [ - 'id', 'uuid', 'source_node', 'dest_node', 'source_compute', - 'dest_compute', 'dest_host', 'status', 'server_id', - 'old_flavor_id', 'new_flavor_id', 'migration_type', - 'created_at', 'updated_at' + 'id', + 'uuid', + 'source_node', + 'dest_node', + 'source_compute', + 'dest_compute', + 'dest_host', + 'status', + 'server_id', + 'old_flavor_id', + 'new_flavor_id', + 'migration_type', + 'created_at', + 'updated_at', ] project = identity_fakes.FakeProject.create_one_project() @@ -380,25 +447,31 @@ class TestListMigrationV280(TestListMigration): def setUp(self): super().setUp() - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() - self.users_mock = self.app.client_manager.identity.users + self.users_mock = self.identity_client.users self.users_mock.reset_mock() self.projects_mock.get.return_value = self.project self.users_mock.get.return_value = self.user - self._set_mock_microversion('2.80') + self.set_compute_api_version('2.80') def test_server_migration_list_with_project(self): arglist = [ - '--status', 'migrating', - '--limit', '1', - '--marker', 'test_kp', - '--changes-since', '2019-08-07T08:03:25Z', - '--changes-before', '2019-08-09T08:03:25Z', - '--project', self.project.id + '--status', + 'migrating', + '--limit', + '1', + '--marker', + 'test_kp', + '--changes-since', + '2019-08-07T08:03:25Z', + '--changes-before', + '2019-08-09T08:03:25Z', + '--project', + self.project.id, ] verifylist = [ ('status', 'migrating'), @@ -406,7 +479,7 @@ def test_server_migration_list_with_project(self): ('marker', 'test_kp'), ('changes_since', '2019-08-07T08:03:25Z'), ('changes_before', '2019-08-09T08:03:25Z'), - ('project', self.project.id) + ('project', self.project.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -422,12 +495,14 @@ def test_server_migration_list_with_project(self): 'changes_before': "2019-08-09T08:03:25Z", } - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) self.MIGRATION_COLUMNS.insert( - len(self.MIGRATION_COLUMNS) - 2, "Project") + len(self.MIGRATION_COLUMNS) - 2, "Project" + ) self.MIGRATION_FIELDS.insert( - len(self.MIGRATION_FIELDS) - 2, "project_id") + len(self.MIGRATION_FIELDS) - 2, "project_id" + ) self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) # Clean up global variables MIGRATION_COLUMNS @@ -436,34 +511,43 @@ def test_server_migration_list_with_project(self): self.MIGRATION_FIELDS.remove('project_id') def test_get_migrations_with_project_pre_v280(self): - self._set_mock_microversion('2.79') + self.set_compute_api_version('2.79') + arglist = [ - '--status', 'migrating', - '--changes-before', '2019-08-09T08:03:25Z', - '--project', '0c2accde-644a-45fa-8c10-e76debc7fbc3' + '--status', + 'migrating', + '--changes-before', + '2019-08-09T08:03:25Z', + '--project', + '0c2accde-644a-45fa-8c10-e76debc7fbc3', ] verifylist = [ ('status', 'migrating'), ('changes_before', '2019-08-09T08:03:25Z'), - ('project', '0c2accde-644a-45fa-8c10-e76debc7fbc3') + ('project', '0c2accde-644a-45fa-8c10-e76debc7fbc3'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.80 or greater is required', - str(ex)) + '--os-compute-api-version 2.80 or greater is required', str(ex) + ) def test_server_migration_list_with_user(self): arglist = [ - '--status', 'migrating', - '--limit', '1', - '--marker', 'test_kp', - '--changes-since', '2019-08-07T08:03:25Z', - '--changes-before', '2019-08-09T08:03:25Z', - '--user', self.user.id, + '--status', + 'migrating', + '--limit', + '1', + '--marker', + 'test_kp', + '--changes-since', + '2019-08-07T08:03:25Z', + '--changes-before', + '2019-08-09T08:03:25Z', + '--user', + self.user.id, ] verifylist = [ ('status', 'migrating'), @@ -488,12 +572,10 @@ def test_server_migration_list_with_user(self): 'changes_before': "2019-08-09T08:03:25Z", } - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) - self.MIGRATION_COLUMNS.insert( - len(self.MIGRATION_COLUMNS) - 2, "User") - self.MIGRATION_FIELDS.insert( - len(self.MIGRATION_FIELDS) - 2, "user_id") + self.MIGRATION_COLUMNS.insert(len(self.MIGRATION_COLUMNS) - 2, "User") + self.MIGRATION_FIELDS.insert(len(self.MIGRATION_FIELDS) - 2, "user_id") self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) # Clean up global variables MIGRATION_COLUMNS @@ -502,11 +584,15 @@ def test_server_migration_list_with_user(self): self.MIGRATION_FIELDS.remove('user_id') def test_get_migrations_with_user_pre_v280(self): - self._set_mock_microversion('2.79') + self.set_compute_api_version('2.79') + arglist = [ - '--status', 'migrating', - '--changes-before', '2019-08-09T08:03:25Z', - '--user', self.user.id, + '--status', + 'migrating', + '--changes-before', + '2019-08-09T08:03:25Z', + '--user', + self.user.id, ] verifylist = [ ('status', 'migrating'), @@ -515,21 +601,26 @@ def test_get_migrations_with_user_pre_v280(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.80 or greater is required', - str(ex)) + '--os-compute-api-version 2.80 or greater is required', str(ex) + ) def test_server_migration_list_with_project_and_user(self): arglist = [ - '--status', 'migrating', - '--limit', '1', - '--changes-since', '2019-08-07T08:03:25Z', - '--changes-before', '2019-08-09T08:03:25Z', - '--project', self.project.id, - '--user', self.user.id, + '--status', + 'migrating', + '--limit', + '1', + '--changes-since', + '2019-08-07T08:03:25Z', + '--changes-before', + '2019-08-09T08:03:25Z', + '--project', + self.project.id, + '--user', + self.user.id, ] verifylist = [ ('status', 'migrating'), @@ -553,16 +644,16 @@ def test_server_migration_list_with_project_and_user(self): 'changes_before': "2019-08-09T08:03:25Z", } - self.sdk_client.migrations.assert_called_with(**kwargs) + self.compute_client.migrations.assert_called_with(**kwargs) self.MIGRATION_COLUMNS.insert( - len(self.MIGRATION_COLUMNS) - 2, "Project") - self.MIGRATION_FIELDS.insert( - len(self.MIGRATION_FIELDS) - 2, "project_id") - self.MIGRATION_COLUMNS.insert( - len(self.MIGRATION_COLUMNS) - 2, "User") + len(self.MIGRATION_COLUMNS) - 2, "Project" + ) self.MIGRATION_FIELDS.insert( - len(self.MIGRATION_FIELDS) - 2, "user_id") + len(self.MIGRATION_FIELDS) - 2, "project_id" + ) + self.MIGRATION_COLUMNS.insert(len(self.MIGRATION_COLUMNS) - 2, "User") + self.MIGRATION_FIELDS.insert(len(self.MIGRATION_FIELDS) - 2, "user_id") self.assertEqual(self.MIGRATION_COLUMNS, columns) self.assertEqual(tuple(self.data), tuple(data)) # Clean up global variables MIGRATION_COLUMNS @@ -572,41 +663,45 @@ def test_server_migration_list_with_project_and_user(self): self.MIGRATION_FIELDS.remove('user_id') def test_get_migrations_with_project_and_user_pre_v280(self): - self._set_mock_microversion('2.79') + self.set_compute_api_version('2.79') + arglist = [ - '--status', 'migrating', - '--changes-before', '2019-08-09T08:03:25Z', - '--project', self.project.id, - '--user', self.user.id, + '--status', + 'migrating', + '--changes-before', + '2019-08-09T08:03:25Z', + '--project', + self.project.id, + '--user', + self.user.id, ] verifylist = [ ('status', 'migrating'), ('changes_before', '2019-08-09T08:03:25Z'), ('project', self.project.id), - ('user', self.user.id) + ('user', self.user.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.80 or greater is required', - str(ex)) - + '--os-compute-api-version 2.80 or greater is required', str(ex) + ) -class TestServerMigrationShow(TestServerMigration): +class TestServerMigrationShow(compute_fakes.TestComputev2): def setUp(self): super().setUp() - self.server = compute_fakes.FakeServer.create_one_sdk_server() - self.sdk_client.find_server.return_value = self.server + self.server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = self.server self.server_migration = compute_fakes.create_one_server_migration() - self.sdk_client.get_server_migration.return_value =\ + self.compute_client.get_server_migration.return_value = ( self.server_migration - self.sdk_client.server_migrations.return_value = iter( + ) + self.compute_client.server_migrations.return_value = iter( [self.server_migration] ) @@ -664,18 +759,20 @@ def _test_server_migration_show(self): self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - self.sdk_client.find_server.assert_called_with( - self.server.id, ignore_missing=False) - self.sdk_client.get_server_migration.assert_called_with( - self.server.id, '2', ignore_missing=False) + self.compute_client.find_server.assert_called_with( + self.server.id, ignore_missing=False + ) + self.compute_client.get_server_migration.assert_called_with( + self.server.id, '2', ignore_missing=False + ) def test_server_migration_show(self): - self._set_mock_microversion('2.24') + self.set_compute_api_version('2.24') self._test_server_migration_show() def test_server_migration_show_v259(self): - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') self.columns += ('UUID',) self.data += (self.server_migration.uuid,) @@ -683,7 +780,7 @@ def test_server_migration_show_v259(self): self._test_server_migration_show() def test_server_migration_show_v280(self): - self._set_mock_microversion('2.80') + self.set_compute_api_version('2.80') self.columns += ('UUID', 'User ID', 'Project ID') self.data += ( @@ -695,7 +792,7 @@ def test_server_migration_show_v280(self): self._test_server_migration_show() def test_server_migration_show_pre_v224(self): - self._set_mock_microversion('2.23') + self.set_compute_api_version('2.23') arglist = [ self.server.id, @@ -705,17 +802,16 @@ def test_server_migration_show_pre_v224(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.24 or greater is required', - str(ex)) + '--os-compute-api-version 2.24 or greater is required', str(ex) + ) def test_server_migration_show_by_uuid(self): - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') - self.sdk_client.server_migrations.return_value = iter( + self.compute_client.server_migrations.return_value = iter( [self.server_migration] ) @@ -734,14 +830,18 @@ def test_server_migration_show_by_uuid(self): self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - self.sdk_client.find_server.assert_called_with( - self.server.id, ignore_missing=False) - self.sdk_client.server_migrations.assert_called_with(self.server.id) - self.sdk_client.get_server_migration.assert_not_called() + self.compute_client.find_server.assert_called_with( + self.server.id, ignore_missing=False + ) + self.compute_client.server_migrations.assert_called_with( + self.server.id + ) + self.compute_client.get_server_migration.assert_not_called() def test_server_migration_show_by_uuid_no_matches(self): - self._set_mock_microversion('2.59') - self.sdk_client.server_migrations.return_value = iter([]) + self.set_compute_api_version('2.59') + + self.compute_client.server_migrations.return_value = iter([]) arglist = [ self.server.id, @@ -751,15 +851,15 @@ def test_server_migration_show_by_uuid_no_matches(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( 'In-progress live migration 69f95745-bfe3-4302-90f7-5b0022cba1ce', - str(ex)) + str(ex), + ) def test_server_migration_show_by_uuid_pre_v259(self): - self._set_mock_microversion('2.58') + self.set_compute_api_version('2.58') arglist = [ self.server.id, @@ -769,15 +869,14 @@ def test_server_migration_show_by_uuid_pre_v259(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.59 or greater is required', - str(ex)) + '--os-compute-api-version 2.59 or greater is required', str(ex) + ) def test_server_migration_show_invalid_id(self): - self._set_mock_microversion('2.24') + self.set_compute_api_version('2.24') arglist = [ self.server.id, @@ -787,29 +886,27 @@ def test_server_migration_show_invalid_id(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - 'The argument must be an ID or UUID', - str(ex)) - + 'The argument must be an ID or UUID', str(ex) + ) -class TestServerMigrationAbort(TestServerMigration): +class TestServerMigrationAbort(compute_fakes.TestComputev2): def setUp(self): super().setUp() - self.server = compute_fakes.FakeServer.create_one_sdk_server() + self.server = compute_fakes.create_one_server() # Return value for utils.find_resource for server. - self.sdk_client.find_server.return_value = self.server + self.compute_client.find_server.return_value = self.server # Get the command object to test self.cmd = server_migration.AbortMigration(self.app, None) def test_migration_abort(self): - self._set_mock_microversion('2.24') + self.set_compute_api_version('2.24') arglist = [ self.server.id, @@ -820,14 +917,16 @@ def test_migration_abort(self): result = self.cmd.take_action(parsed_args) - self.sdk_client.find_server.assert_called_with( - self.server.id, ignore_missing=False) - self.sdk_client.abort_server_migration.assert_called_with( - '2', self.server.id, ignore_missing=False) + self.compute_client.find_server.assert_called_with( + self.server.id, ignore_missing=False + ) + self.compute_client.abort_server_migration.assert_called_with( + '2', self.server.id, ignore_missing=False + ) self.assertIsNone(result) def test_migration_abort_pre_v224(self): - self._set_mock_microversion('2.23') + self.set_compute_api_version('2.23') arglist = [ self.server.id, @@ -837,18 +936,17 @@ def test_migration_abort_pre_v224(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.24 or greater is required', - str(ex)) + '--os-compute-api-version 2.24 or greater is required', str(ex) + ) def test_server_migration_abort_by_uuid(self): - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') self.server_migration = compute_fakes.create_one_server_migration() - self.sdk_client.server_migrations.return_value = iter( + self.compute_client.server_migrations.return_value = iter( [self.server_migration] ) @@ -861,19 +959,21 @@ def test_server_migration_abort_by_uuid(self): result = self.cmd.take_action(parsed_args) - self.sdk_client.find_server.assert_called_with( - self.server.id, ignore_missing=False) - self.sdk_client.server_migrations.assert_called_with(self.server.id) - self.sdk_client.abort_server_migration.assert_called_with( - self.server_migration.id, self.server.id, ignore_missing=False) + self.compute_client.find_server.assert_called_with( + self.server.id, ignore_missing=False + ) + self.compute_client.server_migrations.assert_called_with( + self.server.id + ) + self.compute_client.abort_server_migration.assert_called_with( + self.server_migration.id, self.server.id, ignore_missing=False + ) self.assertIsNone(result) def test_server_migration_abort_by_uuid_no_matches(self): - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') - self.sdk_client.server_migrations.return_value = iter( - [] - ) + self.compute_client.server_migrations.return_value = iter([]) arglist = [ self.server.id, @@ -883,15 +983,15 @@ def test_server_migration_abort_by_uuid_no_matches(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( 'In-progress live migration 69f95745-bfe3-4302-90f7-5b0022cba1ce', - str(ex)) + str(ex), + ) def test_server_migration_abort_by_uuid_pre_v259(self): - self._set_mock_microversion('2.58') + self.set_compute_api_version('2.58') arglist = [ self.server.id, @@ -901,29 +1001,27 @@ def test_server_migration_abort_by_uuid_pre_v259(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.59 or greater is required', - str(ex)) - + '--os-compute-api-version 2.59 or greater is required', str(ex) + ) -class TestServerMigrationForceComplete(TestServerMigration): +class TestServerMigrationForceComplete(compute_fakes.TestComputev2): def setUp(self): super().setUp() - self.server = compute_fakes.FakeServer.create_one_sdk_server() + self.server = compute_fakes.create_one_server() # Return value for utils.find_resource for server. - self.sdk_client.find_server.return_value = self.server + self.compute_client.find_server.return_value = self.server # Get the command object to test self.cmd = server_migration.ForceCompleteMigration(self.app, None) def test_migration_force_complete(self): - self._set_mock_microversion('2.22') + self.set_compute_api_version('2.22') arglist = [ self.server.id, @@ -934,14 +1032,16 @@ def test_migration_force_complete(self): result = self.cmd.take_action(parsed_args) - self.sdk_client.find_server.assert_called_with( - self.server.id, ignore_missing=False) - self.sdk_client.force_complete_server_migration\ - .assert_called_with('2', self.server.id) + self.compute_client.find_server.assert_called_with( + self.server.id, ignore_missing=False + ) + self.compute_client.force_complete_server_migration.assert_called_with( + '2', self.server.id + ) self.assertIsNone(result) def test_migration_force_complete_pre_v222(self): - self._set_mock_microversion('2.21') + self.set_compute_api_version('2.21') arglist = [ self.server.id, @@ -951,18 +1051,17 @@ def test_migration_force_complete_pre_v222(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.22 or greater is required', - str(ex)) + '--os-compute-api-version 2.22 or greater is required', str(ex) + ) def test_server_migration_force_complete_by_uuid(self): - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') self.server_migration = compute_fakes.create_one_server_migration() - self.sdk_client.server_migrations.return_value = iter( + self.compute_client.server_migrations.return_value = iter( [self.server_migration] ) @@ -975,17 +1074,21 @@ def test_server_migration_force_complete_by_uuid(self): result = self.cmd.take_action(parsed_args) - self.sdk_client.find_server.assert_called_with( - self.server.id, ignore_missing=False) - self.sdk_client.server_migrations.assert_called_with(self.server.id) - self.sdk_client.force_complete_server_migration.\ - assert_called_with(self.server_migration.id, self.server.id) + self.compute_client.find_server.assert_called_with( + self.server.id, ignore_missing=False + ) + self.compute_client.server_migrations.assert_called_with( + self.server.id + ) + self.compute_client.force_complete_server_migration.assert_called_with( + self.server_migration.id, self.server.id + ) self.assertIsNone(result) def test_server_migration_force_complete_by_uuid_no_matches(self): - self._set_mock_microversion('2.59') + self.set_compute_api_version('2.59') - self.sdk_client.server_migrations.return_value = iter([]) + self.compute_client.server_migrations.return_value = iter([]) arglist = [ self.server.id, @@ -995,15 +1098,15 @@ def test_server_migration_force_complete_by_uuid_no_matches(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( 'In-progress live migration 69f95745-bfe3-4302-90f7-5b0022cba1ce', - str(ex)) + str(ex), + ) def test_server_migration_force_complete_by_uuid_pre_v259(self): - self._set_mock_microversion('2.58') + self.set_compute_api_version('2.58') arglist = [ self.server.id, @@ -1013,9 +1116,8 @@ def test_server_migration_force_complete_by_uuid_pre_v259(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) ex = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-compute-api-version 2.59 or greater is required', - str(ex)) + '--os-compute-api-version 2.59 or greater is required', str(ex) + ) diff --git a/openstackclient/tests/unit/compute/v2/test_server_volume.py b/openstackclient/tests/unit/compute/v2/test_server_volume.py index f86bc7ddbe..9f26c6d5e3 100644 --- a/openstackclient/tests/unit/compute/v2/test_server_volume.py +++ b/openstackclient/tests/unit/compute/v2/test_server_volume.py @@ -9,52 +9,37 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# - -from unittest import mock -from novaclient import api_versions -from openstack import utils as sdk_utils +from openstack.block_storage.v3 import volume as _volume +from openstack.compute.v2 import server as _server +from openstack.compute.v2 import volume_attachment as _volume_attachment +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.compute.v2 import server_volume from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes - -class TestServerVolume(compute_fakes.TestComputev2): +class TestServerVolumeList(compute_fakes.TestComputev2): def setUp(self): super().setUp() - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.app.client_manager.sdk_connection.volume = mock.Mock() - self.compute_client = self.app.client_manager.sdk_connection.compute - self.volume_client = self.app.client_manager.sdk_connection.volume - - -class TestServerVolumeList(TestServerVolume): - - def setUp(self): - super().setUp() - - self.server = compute_fakes.FakeServer.create_one_sdk_server() - self.volume_attachments = compute_fakes.create_volume_attachments() + self.server = sdk_fakes.generate_fake_resource(_server.Server) + self.volume_attachments = list( + sdk_fakes.generate_fake_resources( + _volume_attachment.VolumeAttachment, count=2 + ) + ) self.compute_client.find_server.return_value = self.server self.compute_client.volume_attachments.return_value = ( - self.volume_attachments) + self.volume_attachments + ) # Get the command object to test self.cmd = server_volume.ListServerVolume(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_list(self, sm_mock): - self.app.client_manager.compute.api_version = \ - api_versions.APIVersion('2.1') - sm_mock.side_effect = [False, False, False, False] - + def test_server_volume_list(self): arglist = [ self.server.id, ] @@ -87,9 +72,8 @@ def test_server_volume_list(self, sm_mock): self.server, ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_list_with_tags(self, sm_mock): - sm_mock.side_effect = [False, True, False, False] + def test_server_volume_list_with_tags(self): + self.set_compute_api_version('2.70') arglist = [ self.server.id, @@ -102,7 +86,14 @@ def test_server_volume_list_with_tags(self, sm_mock): columns, data = self.cmd.take_action(parsed_args) self.assertEqual( - ('ID', 'Device', 'Server ID', 'Volume ID', 'Tag',), columns, + ( + 'ID', + 'Device', + 'Server ID', + 'Volume ID', + 'Tag', + ), + columns, ) self.assertEqual( ( @@ -127,9 +118,9 @@ def test_server_volume_list_with_tags(self, sm_mock): self.server, ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_list_with_delete_on_attachment(self, sm_mock): - sm_mock.side_effect = [False, True, True, False] + def test_server_volume_list_with_delete_on_attachment(self): + self.set_compute_api_version('2.79') + arglist = [ self.server.id, ] @@ -142,7 +133,11 @@ def test_server_volume_list_with_delete_on_attachment(self, sm_mock): self.assertEqual( ( - 'ID', 'Device', 'Server ID', 'Volume ID', 'Tag', + 'ID', + 'Device', + 'Server ID', + 'Volume ID', + 'Tag', 'Delete On Termination?', ), columns, @@ -172,10 +167,9 @@ def test_server_volume_list_with_delete_on_attachment(self, sm_mock): self.server, ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_list_with_attachment_ids(self, sm_mock): + def test_server_volume_list_with_attachment_ids(self): + self.set_compute_api_version('2.89') - sm_mock.side_effect = [True, True, True, True] arglist = [ self.server.id, ] @@ -188,8 +182,12 @@ def test_server_volume_list_with_attachment_ids(self, sm_mock): self.assertEqual( ( - 'Device', 'Server ID', 'Volume ID', 'Tag', - 'Delete On Termination?', 'Attachment ID', + 'Device', + 'Server ID', + 'Volume ID', + 'Tag', + 'Delete On Termination?', + 'Attachment ID', 'BlockDeviceMapping UUID', ), columns, @@ -203,8 +201,7 @@ def test_server_volume_list_with_attachment_ids(self, sm_mock): self.volume_attachments[0].tag, self.volume_attachments[0].delete_on_termination, self.volume_attachments[0].attachment_id, - self.volume_attachments[0].bdm_id - + self.volume_attachments[0].bdm_id, ), ( self.volume_attachments[1].device, @@ -213,7 +210,7 @@ def test_server_volume_list_with_attachment_ids(self, sm_mock): self.volume_attachments[1].tag, self.volume_attachments[1].delete_on_termination, self.volume_attachments[1].attachment_id, - self.volume_attachments[1].bdm_id + self.volume_attachments[1].bdm_id, ), ), tuple(data), @@ -223,16 +220,15 @@ def test_server_volume_list_with_attachment_ids(self, sm_mock): ) -class TestServerVolumeUpdate(TestServerVolume): - +class TestServerVolumeUpdate(compute_fakes.TestComputev2): def setUp(self): super().setUp() - self.server = compute_fakes.FakeServer.create_one_sdk_server() + self.server = sdk_fakes.generate_fake_resource(_server.Server) self.compute_client.find_server.return_value = self.server - self.volume = volume_fakes.create_one_sdk_volume() - self.volume_client.find_volume.return_value = self.volume + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume # Get the command object to test self.cmd = server_volume.UpdateServerVolume(self.app, None) @@ -255,9 +251,8 @@ def test_server_volume_update(self): self.compute_client.update_volume_attachment.assert_not_called() self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_update_with_delete_on_termination(self, sm_mock): - sm_mock.return_value = True + def test_server_volume_update_with_delete_on_termination(self): + self.set_compute_api_version('2.85') arglist = [ self.server.id, @@ -280,9 +275,8 @@ def test_server_volume_update_with_delete_on_termination(self, sm_mock): ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_update_with_preserve_on_termination(self, sm_mock): - sm_mock.return_value = True + def test_server_volume_update_with_preserve_on_termination(self): + self.set_compute_api_version('2.85') arglist = [ self.server.id, @@ -299,17 +293,12 @@ def test_server_volume_update_with_preserve_on_termination(self, sm_mock): result = self.cmd.take_action(parsed_args) self.compute_client.update_volume_attachment.assert_called_once_with( - self.server, - self.volume, - delete_on_termination=False + self.server, self.volume, delete_on_termination=False ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_update_with_delete_on_termination_pre_v285( - self, sm_mock, - ): - sm_mock.return_value = False + def test_server_volume_update_with_delete_on_termination_pre_v285(self): + self.set_compute_api_version('2.84') arglist = [ self.server.id, @@ -330,11 +319,8 @@ def test_server_volume_update_with_delete_on_termination_pre_v285( ) self.compute_client.update_volume_attachment.assert_not_called() - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_server_volume_update_with_preserve_on_termination_pre_v285( - self, sm_mock, - ): - sm_mock.return_value = False + def test_server_volume_update_with_preserve_on_termination_pre_v285(self): + self.set_compute_api_version('2.84') arglist = [ self.server.id, diff --git a/openstackclient/tests/unit/compute/v2/test_service.py b/openstackclient/tests/unit/compute/v2/test_service.py index 5b58431aba..a47ea7298f 100644 --- a/openstackclient/tests/unit/compute/v2/test_service.py +++ b/openstackclient/tests/unit/compute/v2/test_service.py @@ -11,37 +11,26 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# from unittest import mock -from unittest.mock import call -from novaclient import api_versions -from openstack import utils as sdk_utils +from openstack.compute.v2 import service as _service +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.compute.v2 import service from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -class TestService(compute_fakes.TestComputev2): - +class TestServiceDelete(compute_fakes.TestComputev2): def setUp(self): - super(TestService, self).setUp() - - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute - - -class TestServiceDelete(TestService): + super().setUp() - services = compute_fakes.FakeService.create_services(count=2) - - def setUp(self): - super(TestServiceDelete, self).setUp() + self.services = list( + sdk_fakes.generate_fake_resources(_service.Service, count=2) + ) - self.sdk_client.delete_service.return_value = None + self.compute_client.delete_service.return_value = None # Get the command object to test self.cmd = service.DeleteService(self.app, None) @@ -57,9 +46,8 @@ def test_service_delete(self): result = self.cmd.take_action(parsed_args) - self.sdk_client.delete_service.assert_called_with( - self.services[0].binary, - ignore_missing=False + self.compute_client.delete_service.assert_called_with( + self.services[0].binary, ignore_missing=False ) self.assertIsNone(result) @@ -76,8 +64,8 @@ def test_multi_services_delete(self): calls = [] for s in self.services: - calls.append(call(s.binary, ignore_missing=False)) - self.sdk_client.delete_service.assert_has_calls(calls) + calls.append(mock.call(s.binary, ignore_missing=False)) + self.compute_client.delete_service.assert_has_calls(calls) self.assertIsNone(result) def test_multi_services_delete_with_exception(self): @@ -85,73 +73,45 @@ def test_multi_services_delete_with_exception(self): self.services[0].binary, 'unexist_service', ] - verifylist = [ - ('service', arglist) - ] + verifylist = [('service', arglist)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) delete_mock_result = [None, exceptions.CommandError] - self.sdk_client.delete_service = ( - mock.Mock(side_effect=delete_mock_result) - ) + self.compute_client.delete_service.side_effect = delete_mock_result try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual( - '1 of 2 compute services failed to delete.', str(e)) + '1 of 2 compute services failed to delete.', str(e) + ) - self.sdk_client.delete_service.assert_any_call( - self.services[0].binary, - ignore_missing=False + self.compute_client.delete_service.assert_any_call( + self.services[0].binary, ignore_missing=False ) - self.sdk_client.delete_service.assert_any_call( - 'unexist_service', - ignore_missing=False + self.compute_client.delete_service.assert_any_call( + 'unexist_service', ignore_missing=False ) -class TestServiceList(TestService): - - service = compute_fakes.FakeService.create_one_service() - - columns = ( - 'ID', - 'Binary', - 'Host', - 'Zone', - 'Status', - 'State', - 'Updated At', - ) - columns_long = columns + ( - 'Disabled Reason', - ) - - data = [( - service.id, - service.binary, - service.host, - service.availability_zone, - service.status, - service.state, - service.updated_at, - )] - data_long = [data[0] + (service.disabled_reason, )] - +class TestServiceList(compute_fakes.TestComputev2): def setUp(self): - super(TestServiceList, self).setUp() + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) - self.sdk_client.services.return_value = [self.service] + self.compute_client.services.return_value = [self.service] # Get the command object to test self.cmd = service.ListService(self.app, None) def test_service_list(self): arglist = [ - '--host', self.service.host, - '--service', self.service.binary, + '--host', + self.service.host, + '--service', + self.service.binary, ] verifylist = [ ('host', self.service.host), @@ -164,27 +124,47 @@ def test_service_list(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.services.assert_called_with( + self.compute_client.services.assert_called_with( host=self.service.host, binary=self.service.binary, ) - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, list(data)) + expected_columns = ( + 'ID', + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + ) + expected_data = [ + ( + self.service.id, + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + ) + ] - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_list_with_long_option(self, sm_mock): - sm_mock.return_value = False + self.assertEqual(expected_columns, columns) + self.assertEqual(expected_data, list(data)) + def test_service_list_with_long_option(self): arglist = [ - '--host', self.service.host, - '--service', self.service.binary, - '--long' + '--host', + self.service.host, + '--service', + self.service.binary, + '--long', ] verifylist = [ ('host', self.service.host), ('service', self.service.binary), - ('long', True) + ('long', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -193,65 +173,106 @@ def test_service_list_with_long_option(self, sm_mock): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.services.assert_called_with( + self.compute_client.services.assert_called_with( host=self.service.host, binary=self.service.binary, ) - self.assertEqual(self.columns_long, columns) - self.assertEqual(self.data_long, list(data)) + expected_columns = ( + 'ID', + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + 'Disabled Reason', + ) + expected_data = [ + ( + self.service.id, + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + self.service.disabled_reason, + ) + ] + + self.assertEqual(expected_columns, columns) + self.assertEqual(expected_data, list(data)) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_list_with_long_option_2_11(self, sm_mock): - sm_mock.return_value = True + def test_service_list_with_long_option_2_11(self): + self.set_compute_api_version('2.11') arglist = [ - '--host', self.service.host, - '--service', self.service.binary, - '--long' + '--host', + self.service.host, + '--service', + self.service.binary, + '--long', ] verifylist = [ ('host', self.service.host), ('service', self.service.binary), - ('long', True) + ('long', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.compute.api_version = api_versions.APIVersion( - '2.11') # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.services.assert_called_with( + self.compute_client.services.assert_called_with( host=self.service.host, binary=self.service.binary, ) # In 2.11 there is also a forced_down column. - columns_long = self.columns_long + ('Forced Down',) - data_long = [self.data_long[0] + (self.service.is_forced_down,)] - - self.assertEqual(columns_long, columns) - self.assertEqual(data_long, list(data)) + expected_columns = ( + 'ID', + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + 'Disabled Reason', + 'Forced Down', + ) + expected_data = [ + ( + self.service.id, + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + self.service.disabled_reason, + self.service.is_forced_down, + ) + ] + self.assertEqual(expected_columns, columns) + self.assertEqual(expected_data, list(data)) -class TestServiceSet(TestService): +class TestServiceSet(compute_fakes.TestComputev2): def setUp(self): - super(TestServiceSet, self).setUp() + super().setUp() - self.service = compute_fakes.FakeService.create_one_service() + self.service = sdk_fakes.generate_fake_resource(_service.Service) - self.sdk_client.enable_service.return_value = self.service - self.sdk_client.disable_service.return_value = self.service + self.compute_client.enable_service.return_value = self.service + self.compute_client.disable_service.return_value = self.service self.cmd = service.SetService(self.app, None) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_set_nothing(self, sm_mock): - sm_mock.return_value = False + def test_set_nothing(self): arglist = [ self.service.host, self.service.binary, @@ -263,13 +284,11 @@ def test_set_nothing(self, sm_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.enable_service.assert_not_called() - self.sdk_client.disable_service.assert_not_called() + self.compute_client.enable_service.assert_not_called() + self.compute_client.disable_service.assert_not_called() self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_enable(self, sm_mock): - sm_mock.return_value = False + def test_service_set_enable(self): arglist = [ '--enable', self.service.host, @@ -284,16 +303,12 @@ def test_service_set_enable(self, sm_mock): result = self.cmd.take_action(parsed_args) - self.sdk_client.enable_service.assert_called_with( - None, - self.service.host, - self.service.binary + self.compute_client.enable_service.assert_called_with( + None, self.service.host, self.service.binary ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_disable(self, sm_mock): - sm_mock.return_value = False + def test_service_set_disable(self): arglist = [ '--disable', self.service.host, @@ -308,21 +323,17 @@ def test_service_set_disable(self, sm_mock): result = self.cmd.take_action(parsed_args) - self.sdk_client.disable_service.assert_called_with( - None, - self.service.host, - self.service.binary, - None + self.compute_client.disable_service.assert_called_with( + None, self.service.host, self.service.binary, None ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_disable_with_reason(self, sm_mock): - sm_mock.return_value = False + def test_service_set_disable_with_reason(self): reason = 'earthquake' arglist = [ '--disable', - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -336,20 +347,16 @@ def test_service_set_disable_with_reason(self, sm_mock): result = self.cmd.take_action(parsed_args) - self.sdk_client.disable_service.assert_called_with( - None, - self.service.host, - self.service.binary, - reason + self.compute_client.disable_service.assert_called_with( + None, self.service.host, self.service.binary, reason ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_only_with_disable_reason(self, sm_mock): - sm_mock.return_value = False + def test_service_set_only_with_disable_reason(self): reason = 'earthquake' arglist = [ - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -363,16 +370,18 @@ def test_service_set_only_with_disable_reason(self, sm_mock): self.cmd.take_action(parsed_args) self.fail("CommandError should be raised.") except exceptions.CommandError as e: - self.assertEqual("Cannot specify option --disable-reason without " - "--disable specified.", str(e)) + self.assertEqual( + "Cannot specify option --disable-reason without " + "--disable specified.", + str(e), + ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_enable_with_disable_reason(self, sm_mock): - sm_mock.return_value = False + def test_service_set_enable_with_disable_reason(self): reason = 'earthquake' arglist = [ '--enable', - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -387,12 +396,15 @@ def test_service_set_enable_with_disable_reason(self, sm_mock): self.cmd.take_action(parsed_args) self.fail("CommandError should be raised.") except exceptions.CommandError as e: - self.assertEqual("Cannot specify option --disable-reason without " - "--disable specified.", str(e)) + self.assertEqual( + "Cannot specify option --disable-reason without " + "--disable specified.", + str(e), + ) + + def test_service_set_state_up(self): + self.set_compute_api_version('2.11') - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_state_up(self, sm_mock): - sm_mock.side_effect = [False, True] arglist = [ '--up', self.service.host, @@ -405,19 +417,16 @@ def test_service_set_state_up(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.update_service_forced_down.assert_called_once_with( - None, - self.service.host, - self.service.binary, - False + self.compute_client.update_service_forced_down.assert_called_once_with( + None, self.service.host, self.service.binary, False ) - self.assertNotCalled(self.sdk_client.enable_service) - self.assertNotCalled(self.sdk_client.disable_service) + self.assertNotCalled(self.compute_client.enable_service) + self.assertNotCalled(self.compute_client.disable_service) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_state_down(self, sm_mock): - sm_mock.side_effect = [False, True] + def test_service_set_state_down(self): + self.set_compute_api_version('2.11') + arglist = [ '--down', self.service.host, @@ -430,19 +439,16 @@ def test_service_set_state_down(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.update_service_forced_down.assert_called_once_with( - None, - self.service.host, - self.service.binary, - True + self.compute_client.update_service_forced_down.assert_called_once_with( + None, self.service.host, self.service.binary, True ) - self.assertNotCalled(self.sdk_client.enable_service) - self.assertNotCalled(self.sdk_client.disable_service) + self.assertNotCalled(self.compute_client.enable_service) + self.assertNotCalled(self.compute_client.disable_service) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_enable_and_state_down(self, sm_mock): - sm_mock.side_effect = [False, True] + def test_service_set_enable_and_state_down(self): + self.set_compute_api_version('2.11') + arglist = [ '--enable', '--down', @@ -457,22 +463,17 @@ def test_service_set_enable_and_state_down(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.sdk_client.enable_service.assert_called_once_with( - None, - self.service.host, - self.service.binary + self.compute_client.enable_service.assert_called_once_with( + None, self.service.host, self.service.binary ) - self.sdk_client.update_service_forced_down.assert_called_once_with( - None, - self.service.host, - self.service.binary, - True + self.compute_client.update_service_forced_down.assert_called_once_with( + None, self.service.host, self.service.binary, True ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_enable_and_state_down_with_exception(self, sm_mock): - sm_mock.side_effect = [False, True] + def test_service_set_enable_and_state_down_with_exception(self): + self.set_compute_api_version('2.11') + arglist = [ '--enable', '--down', @@ -487,22 +488,21 @@ def test_service_set_enable_and_state_down_with_exception(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object(self.sdk_client, 'enable_service', - side_effect=Exception()): - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) - self.sdk_client.update_service_forced_down.assert_called_once_with( - None, - self.service.host, - self.service.binary, - True + with mock.patch.object( + self.compute_client, 'enable_service', side_effect=Exception() + ): + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.compute_client.update_service_forced_down.assert_called_once_with( + None, self.service.host, self.service.binary, True ) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_2_53_disable_down(self, sm_mock): + def test_service_set_disable_down(self): # Tests disabling and forcing down a compute service with microversion # 2.53 which requires looking up the service by host and binary. - sm_mock.return_value = True + self.set_compute_api_version('2.53') + arglist = [ '--disable', '--down', @@ -517,30 +517,26 @@ def test_service_set_2_53_disable_down(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) service_id = '339478d0-0b95-4a94-be63-d5be05dfeb1c' - self.sdk_client.services.return_value = [mock.Mock(id=service_id)] + self.compute_client.services.return_value = [mock.Mock(id=service_id)] result = self.cmd.take_action(parsed_args) - self.sdk_client.disable_service.assert_called_once_with( - service_id, - self.service.host, - self.service.binary, - None + self.compute_client.disable_service.assert_called_once_with( + service_id, self.service.host, self.service.binary, None + ) + self.compute_client.update_service_forced_down.assert_called_once_with( + service_id, self.service.host, self.service.binary, True ) - self.sdk_client.update_service_forced_down.assert_called_once_with( - service_id, - self.service.host, - self.service.binary, - True) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_2_53_disable_reason(self, sm_mock): + def test_service_set_disable_reason(self): # Tests disabling with reason a compute service with microversion # 2.53 which requires looking up the service by host and binary. - sm_mock.return_value = True + self.set_compute_api_version('2.53') + reason = 'earthquake' arglist = [ '--disable', - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -552,21 +548,18 @@ def test_service_set_2_53_disable_reason(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) service_id = '339478d0-0b95-4a94-be63-d5be05dfeb1c' - self.sdk_client.services.return_value = [mock.Mock(id=service_id)] + self.compute_client.services.return_value = [mock.Mock(id=service_id)] result = self.cmd.take_action(parsed_args) - self.sdk_client.disable_service.assert_called_once_with( - service_id, - self.service.host, - self.service.binary, - reason + self.compute_client.disable_service.assert_called_once_with( + service_id, self.service.host, self.service.binary, reason ) self.assertIsNone(result) - @mock.patch.object(sdk_utils, 'supports_microversion') - def test_service_set_2_53_enable_up(self, sm_mock): + def test_service_set_enable_up(self): # Tests enabling and bringing up a compute service with microversion # 2.53 which requires looking up the service by host and binary. - sm_mock.return_value = True + self.set_compute_api_version('2.53') + arglist = [ '--enable', '--up', @@ -581,36 +574,47 @@ def test_service_set_2_53_enable_up(self, sm_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) service_id = '339478d0-0b95-4a94-be63-d5be05dfeb1c' - self.sdk_client.services.return_value = [mock.Mock(id=service_id)] + self.compute_client.services.return_value = [mock.Mock(id=service_id)] result = self.cmd.take_action(parsed_args) - self.sdk_client.enable_service.assert_called_once_with( - service_id, - self.service.host, - self.service.binary + self.compute_client.enable_service.assert_called_once_with( + service_id, self.service.host, self.service.binary ) - self.sdk_client.update_service_forced_down.assert_called_once_with( - service_id, - self.service.host, - self.service.binary, - False + self.compute_client.update_service_forced_down.assert_called_once_with( + service_id, self.service.host, self.service.binary, False ) self.assertIsNone(result) def test_service_set_find_service_by_host_and_binary_no_results(self): # Tests that no compute services are found by host and binary. - self.sdk_client.services.return_value = [] - ex = self.assertRaises(exceptions.CommandError, - self.cmd._find_service_by_host_and_binary, - self.sdk_client, 'fake-host', 'nova-compute') - self.assertIn('Compute service for host "fake-host" and binary ' - '"nova-compute" not found.', str(ex)) + self.compute_client.services.return_value = [] + ex = self.assertRaises( + exceptions.CommandError, + self.cmd._find_service_by_host_and_binary, + self.compute_client, + 'fake-host', + 'nova-compute', + ) + self.assertIn( + 'Compute service for host "fake-host" and binary ' + '"nova-compute" not found.', + str(ex), + ) def test_service_set_find_service_by_host_and_binary_many_results(self): # Tests that more than one compute service is found by host and binary. - self.sdk_client.services.return_value = [mock.Mock(), mock.Mock()] - ex = self.assertRaises(exceptions.CommandError, - self.cmd._find_service_by_host_and_binary, - self.sdk_client, 'fake-host', 'nova-compute') - self.assertIn('Multiple compute services found for host "fake-host" ' - 'and binary "nova-compute". Unable to proceed.', - str(ex)) + self.compute_client.services.return_value = [ + mock.Mock(), + mock.Mock(), + ] + ex = self.assertRaises( + exceptions.CommandError, + self.cmd._find_service_by_host_and_binary, + self.compute_client, + 'fake-host', + 'nova-compute', + ) + self.assertIn( + 'Multiple compute services found for host "fake-host" ' + 'and binary "nova-compute". Unable to proceed.', + str(ex), + ) diff --git a/openstackclient/tests/unit/compute/v2/test_usage.py b/openstackclient/tests/unit/compute/v2/test_usage.py index 85b45e1b28..6169678185 100644 --- a/openstackclient/tests/unit/compute/v2/test_usage.py +++ b/openstackclient/tests/unit/compute/v2/test_usage.py @@ -11,60 +11,60 @@ # under the License. # +import datetime from unittest import mock +from openstack.compute.v2 import usage as _usage +from openstack.identity.v3 import project as _project +from openstack.test import fakes as sdk_fakes + from openstackclient.compute.v2 import usage as usage_cmds from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes class TestUsage(compute_fakes.TestComputev2): - def setUp(self): - super(TestUsage, self).setUp() - - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.compute = mock.Mock() - self.sdk_client = self.app.client_manager.sdk_connection.compute + super().setUp() - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() class TestUsageList(TestUsage): - - project = identity_fakes.FakeProject.create_one_project() + project = sdk_fakes.generate_fake_resource(_project.Project) # Return value of self.usage_mock.list(). - usages = compute_fakes.FakeUsage.create_usages( - attrs={'project_id': project.name}, count=1) + usages = [ + sdk_fakes.generate_fake_resource(_usage.Usage, project_id=project.name) + ] columns = ( "Project", "Servers", "RAM MB-Hours", "CPU Hours", - "Disk GB-Hours" + "Disk GB-Hours", ) - data = [( - usage_cmds.ProjectColumn(usages[0].project_id), - usage_cmds.CountColumn(usages[0].server_usages), - usage_cmds.FloatColumn(usages[0].total_memory_mb_usage), - usage_cmds.FloatColumn(usages[0].total_vcpus_usage), - usage_cmds.FloatColumn(usages[0].total_local_gb_usage), - )] + data = [ + ( + usage_cmds.ProjectColumn(usages[0].project_id), + usage_cmds.CountColumn(usages[0].server_usages), + usage_cmds.FloatColumn(usages[0].total_memory_mb_usage), + usage_cmds.FloatColumn(usages[0].total_vcpus_usage), + usage_cmds.FloatColumn(usages[0].total_local_gb_usage), + ) + ] def setUp(self): - super(TestUsageList, self).setUp() + super().setUp() - self.sdk_client.usages.return_value = self.usages + self.compute_client.usages.return_value = self.usages self.projects_mock.list.return_value = [self.project] # Get the command object to test self.cmd = usage_cmds.ListUsage(self.app, None) def test_usage_list_no_options(self): - arglist = [] verifylist = [ ('start', None), @@ -82,8 +82,10 @@ def test_usage_list_no_options(self): def test_usage_list_with_options(self): arglist = [ - '--start', '2016-11-11', - '--end', '2016-12-20', + '--start', + '2016-11-11', + '--end', + '2016-12-20', ] verifylist = [ ('start', '2016-11-11'), @@ -95,10 +97,11 @@ def test_usage_list_with_options(self): columns, data = self.cmd.take_action(parsed_args) self.projects_mock.list.assert_called_with() - self.sdk_client.usages.assert_called_with( - start='2016-11-11T00:00:00', - end='2016-12-20T00:00:00', - detailed=True) + self.compute_client.usages.assert_called_with( + start=datetime.datetime(2016, 11, 11, 0, 0), + end=datetime.datetime(2016, 12, 20, 0, 0), + detailed=True, + ) self.assertCountEqual(self.columns, columns) self.assertCountEqual(tuple(self.data), tuple(data)) @@ -115,19 +118,19 @@ def test_usage_list_with_pagination(self): columns, data = self.cmd.take_action(parsed_args) self.projects_mock.list.assert_called_with() - self.sdk_client.usages.assert_has_calls([ - mock.call(start=mock.ANY, end=mock.ANY, detailed=True) - ]) + self.compute_client.usages.assert_has_calls( + [mock.call(start=mock.ANY, end=mock.ANY, detailed=True)] + ) self.assertCountEqual(self.columns, columns) self.assertCountEqual(tuple(self.data), tuple(data)) class TestUsageShow(TestUsage): - - project = identity_fakes.FakeProject.create_one_project() + project = sdk_fakes.generate_fake_resource(_project.Project) # Return value of self.usage_mock.list(). - usage = compute_fakes.FakeUsage.create_one_usage( - attrs={'project_id': project.name}) + usage = sdk_fakes.generate_fake_resource( + _usage.Usage, project_id=project.name + ) columns = ( 'Project', @@ -146,16 +149,15 @@ class TestUsageShow(TestUsage): ) def setUp(self): - super(TestUsageShow, self).setUp() + super().setUp() - self.sdk_client.get_usage.return_value = self.usage + self.compute_client.get_usage.return_value = self.usage self.projects_mock.get.return_value = self.project # Get the command object to test self.cmd = usage_cmds.ShowUsage(self.app, None) def test_usage_show_no_options(self): - self.app.client_manager.auth_ref = mock.Mock() self.app.client_manager.auth_ref.project_id = self.project.id @@ -174,11 +176,13 @@ def test_usage_show_no_options(self): self.assertEqual(self.data, data) def test_usage_show_with_options(self): - arglist = [ - '--project', self.project.id, - '--start', '2016-11-11', - '--end', '2016-12-20', + '--project', + self.project.id, + '--start', + '2016-11-11', + '--end', + '2016-12-20', ] verifylist = [ ('project', self.project.id), @@ -190,10 +194,11 @@ def test_usage_show_with_options(self): columns, data = self.cmd.take_action(parsed_args) - self.sdk_client.get_usage.assert_called_with( + self.compute_client.get_usage.assert_called_with( project=self.project.id, - start='2016-11-11T00:00:00', - end='2016-12-20T00:00:00') + start=datetime.datetime(2016, 11, 11, 0, 0), + end=datetime.datetime(2016, 12, 20, 0, 0), + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/fakes.py b/openstackclient/tests/unit/fakes.py index 086c246675..be9d7f218d 100644 --- a/openstackclient/tests/unit/fakes.py +++ b/openstackclient/tests/unit/fakes.py @@ -12,13 +12,43 @@ # License for the specific language governing permissions and limitations # under the License. +# TODO(stephenfin): Remove the contents of this module in favour of the osc_lib +# version once our min version is bumped to 4.3.0 + import json -import sys from unittest import mock from keystoneauth1 import fixture +from osc_lib.tests.fakes import ( + FakeApp, + FakeClientManager as BaseFakeClientManager, + FakeLog, + FakeOptions, + FakeResource as BaseFakeResource, + FakeStdout, +) import requests +__all__ = [ + 'AUTH_TOKEN', + 'AUTH_URL', + 'INTERFACE', + 'PASSWORD', + 'PROJECT_NAME', + 'REGION_NAME', + 'TEST_RESPONSE_DICT', + 'TEST_RESPONSE_DICT_V3', + 'TEST_VERSIONS', + 'USERNAME', + 'VERSION', + 'FakeApp', + 'FakeClientManager', + 'FakeLog', + 'FakeOptions', + 'FakeResource', + 'FakeResponse', + 'FakeStdout', +] AUTH_TOKEN = "foobar" AUTH_URL = "http://0.0.0.0" @@ -29,8 +59,7 @@ INTERFACE = "catchy" VERSION = "3" -TEST_RESPONSE_DICT = fixture.V2Token(token_id=AUTH_TOKEN, - user_name=USERNAME) +TEST_RESPONSE_DICT = fixture.V2Token(token_id=AUTH_TOKEN, user_name=USERNAME) _s = TEST_RESPONSE_DICT.add_service('identity', name='keystone') _s.add_endpoint(AUTH_URL + ':5000/v2.0') _s = TEST_RESPONSE_DICT.add_service('network', name='neutron') @@ -48,90 +77,25 @@ TEST_VERSIONS = fixture.DiscoveryList(href=AUTH_URL) -class FakeStdout(object): - - def __init__(self): - self.content = [] - - def write(self, text): - self.content.append(text) - - def make_string(self): - result = '' - for line in self.content: - result = result + line - return result - - -class FakeLog(object): - - def __init__(self): - self.messages = {} - - def debug(self, msg): - self.messages['debug'] = msg - - def info(self, msg): - self.messages['info'] = msg - - def warning(self, msg): - self.messages['warning'] = msg - - def error(self, msg): - self.messages['error'] = msg - - def critical(self, msg): - self.messages['critical'] = msg - - -class FakeApp(object): - - def __init__(self, _stdout, _log): - self.stdout = _stdout - self.client_manager = None - self.api_version = {} - self.stdin = sys.stdin - self.stdout = _stdout or sys.stdout - self.stderr = sys.stderr - self.log = _log - - -class FakeOptions(object): - def __init__(self, **kwargs): - self.os_beta_command = False - - -class FakeClient(object): - - def __init__(self, **kwargs): - self.endpoint = kwargs['endpoint'] - self.token = kwargs['token'] - - -class FakeClientManager(object): +class FakeClientManager(BaseFakeClientManager): _api_version = { 'image': '2', } def __init__(self): - self.compute = None - self.identity = None - self.image = None - self.object_store = None - self.volume = None - self.network = None - self.session = None - self.auth_ref = None - self.auth_plugin_name = None + super().__init__() + + self.sdk_connection = mock.Mock() + self.network_endpoint_enabled = True self.compute_endpoint_enabled = True self.volume_endpoint_enabled = True + # The source of configuration. This is either 'cloud_config' (a # clouds.yaml file) or 'global_env' ('OS_'-prefixed envvars) self.configuration_type = 'cloud_config' def get_configuration(self): - config = { 'region': REGION_NAME, 'identity_api_version': VERSION, @@ -156,69 +120,11 @@ def is_network_endpoint_enabled(self): def is_compute_endpoint_enabled(self): return self.compute_endpoint_enabled - def is_volume_endpoint_enabled(self, client): + def is_volume_endpoint_enabled(self, client=None): return self.volume_endpoint_enabled -class FakeModule(object): - - def __init__(self, name, version): - self.name = name - self.__version__ = version - # Workaround for openstacksdk case - self.version = mock.Mock() - self.version.__version__ = version - - -class FakeResource(object): - - def __init__(self, manager=None, info=None, loaded=False, methods=None): - """Set attributes and methods for a resource. - - :param manager: - The resource manager - :param Dictionary info: - A dictionary with all attributes - :param bool loaded: - True if the resource is loaded in memory - :param Dictionary methods: - A dictionary with all methods - """ - info = info or {} - methods = methods or {} - - self.__name__ = type(self).__name__ - self.manager = manager - self._info = info - self._add_details(info) - self._add_methods(methods) - self._loaded = loaded - - def _add_details(self, info): - for (k, v) in info.items(): - setattr(self, k, v) - - def _add_methods(self, methods): - """Fake methods with MagicMock objects. - - For each <@key, @value> pairs in methods, add an callable MagicMock - object named @key as an attribute, and set the mock's return_value to - @value. When users access the attribute with (), @value will be - returned, which looks like a function call. - """ - for (name, ret) in methods.items(): - method = mock.Mock(return_value=ret) - setattr(self, name, method) - - def __repr__(self): - reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and - k != 'manager') - info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) - return "<%s %s>" % (self.__class__.__name__, info) - - def keys(self): - return self._info.keys() - +class FakeResource(BaseFakeResource): def to_dict(self): return self._info @@ -237,10 +143,10 @@ def pop(self, key, default_value=None): class FakeResponse(requests.Response): - - def __init__(self, headers=None, status_code=200, - data=None, encoding=None): - super(FakeResponse, self).__init__() + def __init__( + self, headers=None, status_code=200, data=None, encoding=None + ): + super().__init__() headers = headers or {} @@ -250,12 +156,3 @@ def __init__(self, headers=None, status_code=200, self._content = json.dumps(data) if not isinstance(self._content, bytes): self._content = self._content.encode() - - -class FakeModel(dict): - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError(key) diff --git a/openstackclient/tests/unit/identity/test_common.py b/openstackclient/tests/unit/identity/test_common.py new file mode 100644 index 0000000000..ae85262dc0 --- /dev/null +++ b/openstackclient/tests/unit/identity/test_common.py @@ -0,0 +1,100 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack import exceptions as sdk_exc +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes +from osc_lib import exceptions + +from openstackclient.identity import common +from openstackclient.tests.unit import utils as test_utils + + +class TestFindSDKId(test_utils.TestCase): + def setUp(self): + super().setUp() + self.user = sdk_fakes.generate_fake_resource(_user.User) + self.identity_sdk_client = mock.Mock() + self.identity_sdk_client.find_user = mock.Mock() + + def test_find_sdk_id_validate(self): + self.identity_sdk_client.find_user.side_effect = [self.user] + + result = common._find_sdk_id( + self.identity_sdk_client.find_user, + name_or_id=self.user.id, + validate_actor_existence=True, + ) + self.assertEqual(self.user.id, result) + + def test_find_sdk_id_no_validate(self): + self.identity_sdk_client.find_user.side_effect = [self.user] + + result = common._find_sdk_id( + self.identity_sdk_client.find_user, + name_or_id=self.user.id, + validate_actor_existence=False, + ) + self.assertEqual(self.user.id, result) + + def test_find_sdk_id_not_found_validate(self): + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ResourceNotFound, + ] + + self.assertRaises( + exceptions.CommandError, + common._find_sdk_id, + self.identity_sdk_client.find_user, + name_or_id=self.user.id, + validate_actor_existence=True, + ) + + def test_find_sdk_id_not_found_no_validate(self): + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ResourceNotFound, + ] + + result = common._find_sdk_id( + self.identity_sdk_client.find_user, + name_or_id=self.user.id, + validate_actor_existence=False, + ) + self.assertEqual(self.user.id, result) + + def test_find_sdk_id_forbidden_validate(self): + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ForbiddenException, + ] + + result = common._find_sdk_id( + self.identity_sdk_client.find_user, + name_or_id=self.user.id, + validate_actor_existence=True, + ) + + self.assertEqual(self.user.id, result) + + def test_find_sdk_id_forbidden_no_validate(self): + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ForbiddenException, + ] + + result = common._find_sdk_id( + self.identity_sdk_client.find_user, + name_or_id=self.user.id, + validate_actor_existence=False, + ) + + self.assertEqual(self.user.id, result) diff --git a/openstackclient/tests/unit/identity/v2_0/fakes.py b/openstackclient/tests/unit/identity/v2_0/fakes.py index bd76a784a0..c5089d48b5 100644 --- a/openstackclient/tests/unit/identity/v2_0/fakes.py +++ b/openstackclient/tests/unit/identity/v2_0/fakes.py @@ -19,6 +19,7 @@ from keystoneauth1 import access from keystoneauth1 import fixture +from openstack.identity.v2 import _proxy from openstackclient.tests.unit import fakes from openstackclient.tests.unit import utils @@ -154,8 +155,7 @@ def fake_auth_ref(fake_token, fake_service=None): return auth_ref -class FakeIdentityv2Client(object): - +class FakeIdentityv2Client: def __init__(self, **kwargs): self.roles = mock.Mock() self.roles.resource_class = fakes.FakeResource(None, {}) @@ -184,18 +184,33 @@ def __getattr__(self, name): raise AttributeError(name) -class TestIdentityv2(utils.TestCommand): - +class FakeClientMixin: def setUp(self): - super(TestIdentityv2, self).setUp() + super().setUp() self.app.client_manager.identity = FakeIdentityv2Client( endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN, ) + self.identity_client = self.app.client_manager.identity + + # TODO(stephenfin): Rename to 'identity_client' once all commands are + # migrated to SDK + self.app.client_manager.sdk_connection.identity = mock.Mock( + _proxy.Proxy + ) + self.identity_sdk_client = ( + self.app.client_manager.sdk_connection.identity + ) + + +class TestIdentityv2( + FakeClientMixin, + utils.TestCommand, +): ... -class FakeExtension(object): +class FakeExtension: """Fake one or more extension.""" @staticmethod @@ -212,26 +227,29 @@ def create_one_extension(attrs=None): # Set default attributes. extension_info = { 'name': 'name-' + uuid.uuid4().hex, - 'namespace': ('http://docs.openstack.org/identity/' - 'api/ext/OS-KSCRUD/v1.0'), + 'namespace': ( + 'http://docs.openstack.org/identity/api/ext/OS-KSCRUD/v1.0' + ), 'description': 'description-' + uuid.uuid4().hex, 'updated': '2013-07-07T12:00:0-00:00', 'alias': 'OS-KSCRUD', - 'links': ('[{"href":' - '"https://github.com/openstack/identity-api", "type":' - ' "text/html", "rel": "describedby"}]') + 'links': ( + '[{"href":' + '"https://github.com/openstack/identity-api", "type":' + ' "text/html", "rel": "describedby"}]' + ), } # Overwrite default attributes. extension_info.update(attrs) extension = fakes.FakeResource( - info=copy.deepcopy(extension_info), - loaded=True) + info=copy.deepcopy(extension_info), loaded=True + ) return extension -class FakeCatalog(object): +class FakeCatalog: """Fake one or more catalog.""" @staticmethod @@ -275,13 +293,13 @@ def create_catalog(attrs=None): catalog_info.update(attrs) catalog = fakes.FakeResource( - info=copy.deepcopy(catalog_info), - loaded=True) + info=copy.deepcopy(catalog_info), loaded=True + ) return catalog -class FakeProject(object): +class FakeProject: """Fake one or more project.""" @staticmethod @@ -305,8 +323,9 @@ def create_one_project(attrs=None): } project_info.update(attrs) - project = fakes.FakeResource(info=copy.deepcopy(project_info), - loaded=True) + project = fakes.FakeResource( + info=copy.deepcopy(project_info), loaded=True + ) return project @staticmethod @@ -327,7 +346,7 @@ def create_projects(attrs=None, count=2): return projects -class FakeEndpoint(object): +class FakeEndpoint: """Fake one or more endpoint.""" @staticmethod @@ -352,12 +371,12 @@ def create_one_endpoint(attrs=None): 'id': 'endpoint-id-' + uuid.uuid4().hex, 'publicurl': 'http://endpoint_publicurl', 'service_id': 'service-name-' + uuid.uuid4().hex, - } endpoint_info.update(attrs) - endpoint = fakes.FakeResource(info=copy.deepcopy(endpoint_info), - loaded=True) + endpoint = fakes.FakeResource( + info=copy.deepcopy(endpoint_info), loaded=True + ) return endpoint @staticmethod @@ -378,7 +397,7 @@ def create_endpoints(attrs=None, count=2): return endpoints -class FakeService(object): +class FakeService: """Fake one or more service.""" @staticmethod @@ -399,12 +418,12 @@ def create_one_service(attrs=None): 'name': 'service-name-' + uuid.uuid4().hex, 'description': 'service_description', 'type': 'service_type', - } service_info.update(attrs) - service = fakes.FakeResource(info=copy.deepcopy(service_info), - loaded=True) + service = fakes.FakeResource( + info=copy.deepcopy(service_info), loaded=True + ) return service @staticmethod @@ -425,7 +444,7 @@ def create_services(attrs=None, count=2): return services -class FakeRole(object): +class FakeRole: """Fake one or more role.""" @staticmethod @@ -447,8 +466,7 @@ def create_one_role(attrs=None): } role_info.update(attrs) - role = fakes.FakeResource(info=copy.deepcopy(role_info), - loaded=True) + role = fakes.FakeResource(info=copy.deepcopy(role_info), loaded=True) return role @staticmethod @@ -469,7 +487,7 @@ def create_roles(attrs=None, count=2): return roles -class FakeUser(object): +class FakeUser: """Fake one or more user.""" @staticmethod @@ -493,8 +511,7 @@ def create_one_user(attrs=None): } user_info.update(attrs) - user = fakes.FakeResource(info=copy.deepcopy(user_info), - loaded=True) + user = fakes.FakeResource(info=copy.deepcopy(user_info), loaded=True) return user @staticmethod diff --git a/openstackclient/tests/unit/identity/v2_0/test_catalog.py b/openstackclient/tests/unit/identity/v2_0/test_catalog.py index bfb28f6962..ef8b1cf9a1 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_catalog.py +++ b/openstackclient/tests/unit/identity/v2_0/test_catalog.py @@ -19,11 +19,10 @@ class TestCatalog(utils.TestCommand): - service_catalog = identity_fakes.FakeCatalog.create_catalog() def setUp(self): - super(TestCatalog, self).setUp() + super().setUp() self.sc_mock = mock.Mock() self.sc_mock.service_catalog.catalog.return_value = [ @@ -37,7 +36,6 @@ def setUp(self): class TestCatalogList(TestCatalog): - columns = ( 'Name', 'Type', @@ -45,7 +43,7 @@ class TestCatalogList(TestCatalog): ) def setUp(self): - super(TestCatalogList, self).setUp() + super().setUp() # Get the command object to test self.cmd = catalog.ListCatalog(self.app, None) @@ -55,8 +53,7 @@ def test_catalog_list(self): identity_fakes.TOKEN, fake_service=self.service_catalog, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -68,12 +65,15 @@ def test_catalog_list(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - datalist = (( - 'supernova', - 'compute', - catalog.EndpointsColumn( - auth_ref.service_catalog.catalog[0]['endpoints']), - ), ) + datalist = ( + ( + 'supernova', + 'compute', + catalog.EndpointsColumn( + auth_ref.service_catalog.catalog[0]['endpoints'] + ), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_catalog_list_with_endpoint_url(self): @@ -98,8 +98,7 @@ def test_catalog_list_with_endpoint_url(self): identity_fakes.TOKEN, fake_service=service_catalog, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -111,19 +110,21 @@ def test_catalog_list_with_endpoint_url(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - datalist = (( - 'supernova', - 'compute', - catalog.EndpointsColumn( - auth_ref.service_catalog.catalog[0]['endpoints']), - ), ) + datalist = ( + ( + 'supernova', + 'compute', + catalog.EndpointsColumn( + auth_ref.service_catalog.catalog[0]['endpoints'] + ), + ), + ) self.assertCountEqual(datalist, tuple(data)) class TestCatalogShow(TestCatalog): - def setUp(self): - super(TestCatalogShow, self).setUp() + super().setUp() # Get the command object to test self.cmd = catalog.ShowCatalog(self.app, None) @@ -133,8 +134,7 @@ def test_catalog_show(self): identity_fakes.UNSCOPED_TOKEN, fake_service=self.service_catalog, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [ 'compute', @@ -153,7 +153,8 @@ def test_catalog_show(self): self.assertEqual(collist, columns) datalist = ( catalog.EndpointsColumn( - auth_ref.service_catalog.catalog[0]['endpoints']), + auth_ref.service_catalog.catalog[0]['endpoints'] + ), self.service_catalog.id, 'supernova', 'compute', @@ -174,7 +175,8 @@ def test_endpoints_column_human_readable(self): '\n publicURL: https://public.none.example.com\n ' 'internalURL: https://internal.none.example.com\n ' 'adminURL: https://admin.none.example.com\n', - col.human_readable()) + col.human_readable(), + ) def test_endpoints_column_human_readable_with_partial_endpoint_urls(self): endpoints = [ @@ -193,4 +195,5 @@ def test_endpoints_column_human_readable_with_partial_endpoint_urls(self): 'one\n publicURL: https://public.one.example.com\n' 'two\n publicURL: https://public.two.example.com\n ' 'internalURL: https://internal.two.example.com\n', - col.human_readable()) + col.human_readable(), + ) diff --git a/openstackclient/tests/unit/identity/v2_0/test_endpoint.py b/openstackclient/tests/unit/identity/v2_0/test_endpoint.py index 915e04a500..0bc82bc7ef 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_endpoint.py +++ b/openstackclient/tests/unit/identity/v2_0/test_endpoint.py @@ -16,7 +16,6 @@ class TestEndpoint(identity_fakes.TestIdentityv2): - fake_service = identity_fakes.FakeService.create_one_service() attr = { 'service_name': fake_service.name, @@ -25,21 +24,20 @@ class TestEndpoint(identity_fakes.TestIdentityv2): fake_endpoint = identity_fakes.FakeEndpoint.create_one_endpoint(attr) def setUp(self): - super(TestEndpoint, self).setUp() + super().setUp() # Get a shortcut to the EndpointManager Mock - self.endpoints_mock = self.app.client_manager.identity.endpoints + self.endpoints_mock = self.identity_client.endpoints self.endpoints_mock.reset_mock() # Get a shortcut to the ServiceManager Mock - self.services_mock = self.app.client_manager.identity.services + self.services_mock = self.identity_client.services self.services_mock.reset_mock() class TestEndpointCreate(TestEndpoint): - def setUp(self): - super(TestEndpointCreate, self).setUp() + super().setUp() self.endpoints_mock.create.return_value = self.fake_endpoint @@ -50,10 +48,14 @@ def setUp(self): def test_endpoint_create(self): arglist = [ - '--publicurl', self.fake_endpoint.publicurl, - '--internalurl', self.fake_endpoint.internalurl, - '--adminurl', self.fake_endpoint.adminurl, - '--region', self.fake_endpoint.region, + '--publicurl', + self.fake_endpoint.publicurl, + '--internalurl', + self.fake_endpoint.internalurl, + '--adminurl', + self.fake_endpoint.adminurl, + '--region', + self.fake_endpoint.region, self.fake_service.id, ] verifylist = [ @@ -80,8 +82,16 @@ def test_endpoint_create(self): self.fake_endpoint.internalurl, ) - collist = ('adminurl', 'id', 'internalurl', 'publicurl', - 'region', 'service_id', 'service_name', 'service_type') + collist = ( + 'adminurl', + 'id', + 'internalurl', + 'publicurl', + 'region', + 'service_id', + 'service_name', + 'service_type', + ) self.assertEqual(collist, columns) datalist = ( self.fake_endpoint.adminurl, @@ -98,9 +108,8 @@ def test_endpoint_create(self): class TestEndpointDelete(TestEndpoint): - def setUp(self): - super(TestEndpointDelete, self).setUp() + super().setUp() self.endpoints_mock.get.return_value = self.fake_endpoint self.endpoints_mock.delete.return_value = None @@ -126,9 +135,8 @@ def test_endpoint_delete_no_options(self): class TestEndpointList(TestEndpoint): - def setUp(self): - super(TestEndpointList, self).setUp() + super().setUp() self.endpoints_mock.list.return_value = [self.fake_endpoint] @@ -151,12 +159,14 @@ def test_endpoint_list_no_options(self): collist = ('ID', 'Region', 'Service Name', 'Service Type') self.assertEqual(collist, columns) - datalist = (( - self.fake_endpoint.id, - self.fake_endpoint.region, - self.fake_endpoint.service_name, - self.fake_endpoint.service_type, - ), ) + datalist = ( + ( + self.fake_endpoint.id, + self.fake_endpoint.region, + self.fake_endpoint.service_name, + self.fake_endpoint.service_type, + ), + ) self.assertEqual(datalist, tuple(data)) def test_endpoint_list_long(self): @@ -175,25 +185,33 @@ def test_endpoint_list_long(self): self.endpoints_mock.list.assert_called_with() - collist = ('ID', 'Region', 'Service Name', 'Service Type', - 'PublicURL', 'AdminURL', 'InternalURL') + collist = ( + 'ID', + 'Region', + 'Service Name', + 'Service Type', + 'PublicURL', + 'AdminURL', + 'InternalURL', + ) self.assertEqual(collist, columns) - datalist = (( - self.fake_endpoint.id, - self.fake_endpoint.region, - self.fake_endpoint.service_name, - self.fake_endpoint.service_type, - self.fake_endpoint.publicurl, - self.fake_endpoint.adminurl, - self.fake_endpoint.internalurl, - ), ) + datalist = ( + ( + self.fake_endpoint.id, + self.fake_endpoint.region, + self.fake_endpoint.service_name, + self.fake_endpoint.service_type, + self.fake_endpoint.publicurl, + self.fake_endpoint.adminurl, + self.fake_endpoint.internalurl, + ), + ) self.assertEqual(datalist, tuple(data)) class TestEndpointShow(TestEndpoint): - def setUp(self): - super(TestEndpointShow, self).setUp() + super().setUp() self.endpoints_mock.list.return_value = [self.fake_endpoint] @@ -223,8 +241,16 @@ def test_endpoint_show(self): self.fake_endpoint.service_id, ) - collist = ('adminurl', 'id', 'internalurl', 'publicurl', - 'region', 'service_id', 'service_name', 'service_type') + collist = ( + 'adminurl', + 'id', + 'internalurl', + 'publicurl', + 'region', + 'service_id', + 'service_name', + 'service_type', + ) self.assertEqual(collist, columns) datalist = ( self.fake_endpoint.adminurl, diff --git a/openstackclient/tests/unit/identity/v2_0/test_project.py b/openstackclient/tests/unit/identity/v2_0/test_project.py index 496214aaee..bb6a643743 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_project.py +++ b/openstackclient/tests/unit/identity/v2_0/test_project.py @@ -25,7 +25,6 @@ class TestProject(identity_fakes.TestIdentityv2): - fake_project = identity_fakes.FakeProject.create_one_project() fake_projects = identity_fakes.FakeProject.create_projects() @@ -42,24 +41,31 @@ class TestProject(identity_fakes.TestIdentityv2): fake_project.name, ) datalists = ( - (fake_projects[0].description, True, - fake_projects[0].id, fake_projects[0].name,), - (fake_projects[1].description, True, - fake_projects[1].id, fake_projects[1].name,), + ( + fake_projects[0].description, + True, + fake_projects[0].id, + fake_projects[0].name, + ), + ( + fake_projects[1].description, + True, + fake_projects[1].id, + fake_projects[1].name, + ), ) def setUp(self): - super(TestProject, self).setUp() + super().setUp() # Get a shortcut to the TenantManager Mock - self.projects_mock = self.app.client_manager.identity.tenants + self.projects_mock = self.identity_client.tenants self.projects_mock.reset_mock() class TestProjectCreate(TestProject): - def setUp(self): - super(TestProjectCreate, self).setUp() + super().setUp() self.projects_mock.create.return_value = self.fake_project @@ -88,15 +94,15 @@ def test_project_create_no_options(self): 'enabled': True, } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_description(self): arglist = [ - '--description', 'new desc', + '--description', + 'new desc', self.fake_project.name, ] verifylist = [ @@ -116,8 +122,7 @@ def test_project_create_description(self): 'enabled': True, } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) @@ -146,8 +151,7 @@ def test_project_create_enable(self): 'enabled': True, } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) @@ -176,8 +180,7 @@ def test_project_create_disable(self): 'enabled': False, } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) @@ -185,12 +188,14 @@ def test_project_create_disable(self): def test_project_create_property(self): arglist = [ - '--property', 'fee=fi', - '--property', 'fo=fum', + '--property', + 'fee=fi', + '--property', + 'fo=fum', self.fake_project.name, ] verifylist = [ - ('property', {'fee': 'fi', 'fo': 'fum'}), + ('properties', {'fee': 'fi', 'fo': 'fum'}), ('name', self.fake_project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -208,8 +213,7 @@ def test_project_create_property(self): 'fo': 'fum', } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) @@ -248,8 +252,7 @@ def _raise_conflict(*args, **kwargs): 'enabled': True, } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) @@ -277,8 +280,7 @@ def test_project_create_or_show_not_exists(self): 'enabled': True, } self.projects_mock.create.assert_called_with( - self.fake_project.name, - **kwargs + self.fake_project.name, **kwargs ) self.assertEqual(self.columns, columns) @@ -286,9 +288,8 @@ def test_project_create_or_show_not_exists(self): class TestProjectDelete(TestProject): - def setUp(self): - super(TestProjectDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.projects_mock.get.return_value = self.fake_project @@ -315,8 +316,7 @@ def test_project_delete_no_options(self): @mock.patch.object(utils, 'find_resource') def test_delete_multi_projects_with_exception(self, find_mock): - find_mock.side_effect = [self.fake_project, - exceptions.CommandError] + find_mock.side_effect = [self.fake_project, exceptions.CommandError] arglist = [ self.fake_project.id, 'unexist_project', @@ -330,8 +330,7 @@ def test_delete_multi_projects_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 projects failed to delete.', - str(e)) + self.assertEqual('1 of 2 projects failed to delete.', str(e)) find_mock.assert_any_call(self.projects_mock, self.fake_project.id) find_mock.assert_any_call(self.projects_mock, 'unexist_project') @@ -341,9 +340,8 @@ def test_delete_multi_projects_with_exception(self, find_mock): class TestProjectList(TestProject): - def setUp(self): - super(TestProjectList, self).setUp() + super().setUp() self.projects_mock.list.return_value = [self.fake_project] @@ -363,10 +361,12 @@ def test_project_list_no_options(self): collist = ('ID', 'Name') self.assertEqual(collist, columns) - datalist = (( - self.fake_project.id, - self.fake_project.name, - ), ) + datalist = ( + ( + self.fake_project.id, + self.fake_project.name, + ), + ) self.assertEqual(datalist, tuple(data)) def test_project_list_long(self): @@ -386,18 +386,23 @@ def test_project_list_long(self): collist = ('ID', 'Name', 'Description', 'Enabled') self.assertEqual(collist, columns) - datalist = (( - self.fake_project.id, - self.fake_project.name, - self.fake_project.description, - True, - ), ) + datalist = ( + ( + self.fake_project.id, + self.fake_project.name, + self.fake_project.description, + True, + ), + ) self.assertEqual(datalist, tuple(data)) def test_project_list_sort(self): self.projects_mock.list.return_value = self.fake_projects - arglist = ['--sort', 'name:asc', ] + arglist = [ + '--sort', + 'name:asc', + ] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -425,9 +430,8 @@ def test_project_list_sort(self): class TestProjectSet(TestProject): - def setUp(self): - super(TestProjectSet, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_project self.projects_mock.update.return_value = self.fake_project @@ -460,7 +464,7 @@ def test_project_set_unexist_project(self): ('description', None), ('enable', False), ('disable', False), - ('property', None), + ('properties', None), ] self.projects_mock.get.side_effect = exceptions.NotFound(None) self.projects_mock.find.side_effect = exceptions.NotFound(None) @@ -468,11 +472,13 @@ def test_project_set_unexist_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_project_set_name(self): arglist = [ - '--name', self.fake_project.name, + '--name', + self.fake_project.name, self.fake_project.name, ] verifylist = [ @@ -492,14 +498,14 @@ def test_project_set_name(self): 'tenant_name': self.fake_project.name, } self.projects_mock.update.assert_called_with( - self.fake_project.id, - **kwargs + self.fake_project.id, **kwargs ) self.assertIsNone(result) def test_project_set_description(self): arglist = [ - '--description', self.fake_project.description, + '--description', + self.fake_project.description, self.fake_project.name, ] verifylist = [ @@ -519,8 +525,7 @@ def test_project_set_description(self): 'tenant_name': self.fake_project.name, } self.projects_mock.update.assert_called_with( - self.fake_project.id, - **kwargs + self.fake_project.id, **kwargs ) self.assertIsNone(result) @@ -545,8 +550,7 @@ def test_project_set_enable(self): 'tenant_name': self.fake_project.name, } self.projects_mock.update.assert_called_with( - self.fake_project.id, - **kwargs + self.fake_project.id, **kwargs ) self.assertIsNone(result) @@ -571,19 +575,20 @@ def test_project_set_disable(self): 'tenant_name': self.fake_project.name, } self.projects_mock.update.assert_called_with( - self.fake_project.id, - **kwargs + self.fake_project.id, **kwargs ) self.assertIsNone(result) def test_project_set_property(self): arglist = [ - '--property', 'fee=fi', - '--property', 'fo=fum', + '--property', + 'fee=fi', + '--property', + 'fo=fum', self.fake_project.name, ] verifylist = [ - ('property', {'fee': 'fi', 'fo': 'fum'}), + ('properties', {'fee': 'fi', 'fo': 'fum'}), ('project', self.fake_project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -599,18 +604,16 @@ def test_project_set_property(self): 'fo': 'fum', } self.projects_mock.update.assert_called_with( - self.fake_project.id, - **kwargs + self.fake_project.id, **kwargs ) self.assertIsNone(result) class TestProjectShow(TestProject): - fake_proj_show = identity_fakes.FakeProject.create_one_project() def setUp(self): - super(TestProjectShow, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_proj_show @@ -647,12 +650,11 @@ def test_project_show(self): class TestProjectUnset(TestProject): - attr = {'fee': 'fi', 'fo': 'fum'} fake_proj = identity_fakes.FakeProject.create_one_project(attr) def setUp(self): - super(TestProjectUnset, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_proj @@ -674,12 +676,14 @@ def test_project_unset_no_options(self): def test_project_unset_key(self): arglist = [ - '--property', 'fee', - '--property', 'fo', + '--property', + 'fee', + '--property', + 'fo', self.fake_proj.name, ] verifylist = [ - ('property', ['fee', 'fo']), + ('properties', ['fee', 'fo']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -695,7 +699,6 @@ def test_project_unset_key(self): } self.projects_mock.update.assert_called_with( - self.fake_proj.id, - **kwargs + self.fake_proj.id, **kwargs ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/identity/v2_0/test_role.py b/openstackclient/tests/unit/identity/v2_0/test_role.py index 423884d9f5..117d2f0011 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_role.py +++ b/openstackclient/tests/unit/identity/v2_0/test_role.py @@ -24,7 +24,6 @@ class TestRole(identity_fakes.TestIdentityv2): - attr = {} attr['endpoints'] = [ { @@ -41,32 +40,30 @@ class TestRole(identity_fakes.TestIdentityv2): fake_user = identity_fakes.FakeUser.create_one_user(attr) def setUp(self): - super(TestRole, self).setUp() + super().setUp() # Get a shortcut to the TenantManager Mock - self.projects_mock = self.app.client_manager.identity.tenants + self.projects_mock = self.identity_client.tenants self.projects_mock.reset_mock() # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users + self.users_mock = self.identity_client.users self.users_mock.reset_mock() # Get a shortcut to the RoleManager Mock - self.roles_mock = self.app.client_manager.identity.roles + self.roles_mock = self.identity_client.roles self.roles_mock.reset_mock() auth_ref = identity_fakes.fake_auth_ref( identity_fakes.TOKEN, fake_service=self.fake_service, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref class TestRoleAdd(TestRole): - def setUp(self): - super(TestRoleAdd, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_project @@ -80,8 +77,10 @@ def setUp(self): def test_role_add(self): arglist = [ - '--project', self.fake_project.name, - '--user', self.fake_user.name, + '--project', + self.fake_project.name, + '--user', + self.fake_user.name, self.fake_role.name, ] verifylist = [ @@ -113,19 +112,15 @@ def test_role_add(self): class TestRoleCreate(TestRole): - fake_role_c = identity_fakes.FakeRole.create_one_role() - columns = ( - 'id', - 'name' - ) + columns = ('id', 'name') datalist = ( fake_role_c.id, fake_role_c.name, ) def setUp(self): - super(TestRoleCreate, self).setUp() + super().setUp() self.roles_mock.create.return_value = self.fake_role_c @@ -215,9 +210,8 @@ def test_role_create_or_show_not_exists(self): class TestRoleDelete(TestRole): - def setUp(self): - super(TestRoleDelete, self).setUp() + super().setUp() self.roles_mock.get.return_value = self.fake_role self.roles_mock.delete.return_value = None @@ -243,8 +237,7 @@ def test_role_delete_no_options(self): @mock.patch.object(utils, 'find_resource') def test_delete_multi_roles_with_exception(self, find_mock): - find_mock.side_effect = [self.fake_role, - exceptions.CommandError] + find_mock.side_effect = [self.fake_role, exceptions.CommandError] arglist = [ self.fake_role.id, 'unexist_role', @@ -258,8 +251,7 @@ def test_delete_multi_roles_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 roles failed to delete.', - str(e)) + self.assertEqual('1 of 2 roles failed to delete.', str(e)) find_mock.assert_any_call(self.roles_mock, self.fake_role.id) find_mock.assert_any_call(self.roles_mock, 'unexist_role') @@ -269,9 +261,8 @@ def test_delete_multi_roles_with_exception(self, find_mock): class TestRoleList(TestRole): - def setUp(self): - super(TestRoleList, self).setUp() + super().setUp() self.roles_mock.list.return_value = [self.fake_role] @@ -292,17 +283,18 @@ def test_role_list(self): collist = ('ID', 'Name') self.assertEqual(collist, columns) - datalist = (( - self.fake_role.id, - self.fake_role.name, - ), ) + datalist = ( + ( + self.fake_role.id, + self.fake_role.name, + ), + ) self.assertEqual(datalist, tuple(data)) class TestRoleRemove(TestRole): - def setUp(self): - super(TestRoleRemove, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_project @@ -316,8 +308,10 @@ def setUp(self): def test_role_remove(self): arglist = [ - '--project', self.fake_project.name, - '--user', self.fake_user.name, + '--project', + self.fake_project.name, + '--user', + self.fake_user.name, self.fake_role.name, ] verifylist = [ @@ -339,9 +333,8 @@ def test_role_remove(self): class TestRoleShow(TestRole): - def setUp(self): - super(TestRoleShow, self).setUp() + super().setUp() self.roles_mock.get.return_value = self.fake_role diff --git a/openstackclient/tests/unit/identity/v2_0/test_role_assignment.py b/openstackclient/tests/unit/identity/v2_0/test_role_assignment.py index 3e1231aa30..741b3ee193 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_role_assignment.py +++ b/openstackclient/tests/unit/identity/v2_0/test_role_assignment.py @@ -22,13 +22,11 @@ class TestRoleAssignment(identity_fakes.TestIdentityv2): - def setUp(self): - super(TestRoleAssignment, self).setUp() + super().setUp() class TestRoleAssignmentList(TestRoleAssignment): - columns = ( 'Role', 'User', @@ -39,15 +37,15 @@ def setUp(self): super(TestRoleAssignment, self).setUp() # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users + self.users_mock = self.identity_client.users self.users_mock.reset_mock() # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() # Get a shortcut to the RoleManager Mock - self.roles_mock = self.app.client_manager.identity.roles + self.roles_mock = self.identity_client.roles self.roles_mock.reset_mock() self.projects_mock.get.return_value = fakes.FakeResource( @@ -74,7 +72,6 @@ def setUp(self): self.cmd = role_assignment.ListRoleAssignment(self.app, None) def test_role_assignment_list_no_filters(self): - arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -87,9 +84,9 @@ def test_role_assignment_list_no_filters(self): ) def test_role_assignment_list_only_project_filter(self): - arglist = [ - '--project', identity_fakes.project_name, + '--project', + identity_fakes.project_name, ] verifylist = [ ('project', identity_fakes.project_name), @@ -104,9 +101,9 @@ def test_role_assignment_list_only_project_filter(self): ) def test_role_assignment_list_only_user_filter(self): - arglist = [ - '--user', identity_fakes.user_name, + '--user', + identity_fakes.user_name, ] verifylist = [ ('user', identity_fakes.user_name), @@ -121,25 +118,24 @@ def test_role_assignment_list_only_user_filter(self): ) def test_role_assignment_list_project_and_user(self): - self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, - copy.deepcopy( - identity_fakes.ROLE), + copy.deepcopy(identity_fakes.ROLE), loaded=True, ), fakes.FakeResource( None, - copy.deepcopy( - identity_fakes.ROLE_2), + copy.deepcopy(identity_fakes.ROLE_2), loaded=True, ), ] arglist = [ - '--project', identity_fakes.project_name, - '--user', identity_fakes.user_name, + '--project', + identity_fakes.project_name, + '--user', + identity_fakes.user_name, ] verifylist = [ ('user', identity_fakes.user_name), @@ -158,33 +154,35 @@ def test_role_assignment_list_project_and_user(self): ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - identity_fakes.project_id, - ), (identity_fakes.ROLE_2['id'], - identity_fakes.user_id, - identity_fakes.project_id, - ),) + datalist = ( + ( + identity_fakes.role_id, + identity_fakes.user_id, + identity_fakes.project_id, + ), + ( + identity_fakes.ROLE_2['id'], + identity_fakes.user_id, + identity_fakes.project_id, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_def_creds(self): - - auth_ref = self.app.client_manager.auth_ref = mock.Mock() + self.app.client_manager.auth_ref = mock.Mock() + auth_ref = self.app.client_manager.auth_ref auth_ref.project_id.return_value = identity_fakes.project_id auth_ref.user_id.return_value = identity_fakes.user_id self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, - copy.deepcopy( - identity_fakes.ROLE), + copy.deepcopy(identity_fakes.ROLE), loaded=True, ), fakes.FakeResource( None, - copy.deepcopy( - identity_fakes.ROLE_2), + copy.deepcopy(identity_fakes.ROLE_2), loaded=True, ), ] @@ -210,37 +208,40 @@ def test_role_assignment_list_def_creds(self): ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - identity_fakes.project_id, - ), (identity_fakes.ROLE_2['id'], - identity_fakes.user_id, - identity_fakes.project_id, - ),) + datalist = ( + ( + identity_fakes.role_id, + identity_fakes.user_id, + identity_fakes.project_id, + ), + ( + identity_fakes.ROLE_2['id'], + identity_fakes.user_id, + identity_fakes.project_id, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_by_name_project_and_user(self): - self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, - copy.deepcopy( - identity_fakes.ROLE), + copy.deepcopy(identity_fakes.ROLE), loaded=True, ), fakes.FakeResource( None, - copy.deepcopy( - identity_fakes.ROLE_2), + copy.deepcopy(identity_fakes.ROLE_2), loaded=True, ), ] arglist = [ - '--project', identity_fakes.project_name, - '--user', identity_fakes.user_name, - '--names' + '--project', + identity_fakes.project_name, + '--user', + identity_fakes.user_name, + '--names', ] verifylist = [ ('user', identity_fakes.user_name), @@ -260,12 +261,16 @@ def test_role_assignment_list_by_name_project_and_user(self): ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_name, - identity_fakes.user_name, - identity_fakes.project_name, - ), (identity_fakes.ROLE_2['name'], - identity_fakes.user_name, - identity_fakes.project_name, - ),) + datalist = ( + ( + identity_fakes.role_name, + identity_fakes.user_name, + identity_fakes.project_name, + ), + ( + identity_fakes.ROLE_2['name'], + identity_fakes.user_name, + identity_fakes.project_name, + ), + ) self.assertEqual(datalist, tuple(data)) diff --git a/openstackclient/tests/unit/identity/v2_0/test_service.py b/openstackclient/tests/unit/identity/v2_0/test_service.py index 6c4374eff3..22e7b837af 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_service.py +++ b/openstackclient/tests/unit/identity/v2_0/test_service.py @@ -24,15 +24,14 @@ class TestService(identity_fakes.TestIdentityv2): fake_service = identity_fakes.FakeService.create_one_service() def setUp(self): - super(TestService, self).setUp() + super().setUp() # Get a shortcut to the ServiceManager Mock - self.services_mock = self.app.client_manager.identity.services + self.services_mock = self.identity_client.services self.services_mock.reset_mock() class TestServiceCreate(TestService): - fake_service_c = identity_fakes.FakeService.create_one_service() columns = ( 'description', @@ -48,7 +47,7 @@ class TestServiceCreate(TestService): ) def setUp(self): - super(TestServiceCreate, self).setUp() + super().setUp() self.services_mock.create.return_value = self.fake_service_c @@ -83,7 +82,8 @@ def test_service_create(self): def test_service_create_with_name_option(self): arglist = [ - '--name', self.fake_service_c.name, + '--name', + self.fake_service_c.name, self.fake_service_c.type, ] verifylist = [ @@ -110,8 +110,10 @@ def test_service_create_with_name_option(self): def test_service_create_description(self): arglist = [ - '--name', self.fake_service_c.name, - '--description', self.fake_service_c.description, + '--name', + self.fake_service_c.name, + '--description', + self.fake_service_c.description, self.fake_service_c.type, ] verifylist = [ @@ -138,9 +140,8 @@ def test_service_create_description(self): class TestServiceDelete(TestService): - def setUp(self): - super(TestServiceDelete, self).setUp() + super().setUp() self.services_mock.get.side_effect = identity_exc.NotFound(None) self.services_mock.find.return_value = self.fake_service @@ -167,9 +168,8 @@ def test_service_delete_no_options(self): class TestServiceList(TestService): - def setUp(self): - super(TestServiceList, self).setUp() + super().setUp() self.services_mock.list.return_value = [self.fake_service] @@ -190,11 +190,13 @@ def test_service_list_no_options(self): collist = ('ID', 'Name', 'Type') self.assertEqual(collist, columns) - datalist = (( - self.fake_service.id, - self.fake_service.name, - self.fake_service.type, - ), ) + datalist = ( + ( + self.fake_service.id, + self.fake_service.name, + self.fake_service.type, + ), + ) self.assertEqual(datalist, tuple(data)) def test_service_list_long(self): @@ -215,21 +217,22 @@ def test_service_list_long(self): collist = ('ID', 'Name', 'Type', 'Description') self.assertEqual(collist, columns) - datalist = (( - self.fake_service.id, - self.fake_service.name, - self.fake_service.type, - self.fake_service.description, - ), ) + datalist = ( + ( + self.fake_service.id, + self.fake_service.name, + self.fake_service.type, + self.fake_service.description, + ), + ) self.assertEqual(datalist, tuple(data)) class TestServiceShow(TestService): - fake_service_s = identity_fakes.FakeService.create_one_service() def setUp(self): - super(TestServiceShow, self).setUp() + super().setUp() self.services_mock.get.side_effect = identity_exc.NotFound(None) self.services_mock.find.return_value = self.fake_service_s @@ -282,4 +285,6 @@ def test_service_show_nounique(self): except exceptions.CommandError as e: self.assertEqual( "Multiple service matches found for 'nounique_service'," - " use an ID to be more specific.", str(e)) + " use an ID to be more specific.", + str(e), + ) diff --git a/openstackclient/tests/unit/identity/v2_0/test_token.py b/openstackclient/tests/unit/identity/v2_0/test_token.py index c079ce6796..56a5a2c458 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_token.py +++ b/openstackclient/tests/unit/identity/v2_0/test_token.py @@ -11,31 +11,17 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# - -from unittest import mock from openstackclient.identity.v2_0 import token from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes -class TestToken(identity_fakes.TestIdentityv2): - - fake_user = identity_fakes.FakeUser.create_one_user() - fake_project = identity_fakes.FakeProject.create_one_project() - +class TestTokenIssue(identity_fakes.TestIdentityv2): def setUp(self): - super(TestToken, self).setUp() - - # Get a shortcut to the Auth Ref Mock - self.ar_mock = mock.PropertyMock() - type(self.app.client_manager).auth_ref = self.ar_mock + super().setUp() - -class TestTokenIssue(TestToken): - - def setUp(self): - super(TestTokenIssue, self).setUp() + self.fake_user = identity_fakes.FakeUser.create_one_user() + self.fake_project = identity_fakes.FakeProject.create_one_project() self.cmd = token.IssueToken(self.app, None) @@ -43,8 +29,7 @@ def test_token_issue(self): auth_ref = identity_fakes.fake_auth_ref( identity_fakes.TOKEN, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -69,8 +54,7 @@ def test_token_issue_with_unscoped_token(self): auth_ref = identity_fakes.fake_auth_ref( identity_fakes.UNSCOPED_TOKEN, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -93,13 +77,12 @@ def test_token_issue_with_unscoped_token(self): self.assertEqual(datalist, data) -class TestTokenRevoke(TestToken): - +class TestTokenRevoke(identity_fakes.TestIdentityv2): TOKEN = 'fob' def setUp(self): - super(TestTokenRevoke, self).setUp() - self.tokens_mock = self.app.client_manager.identity.tokens + super().setUp() + self.tokens_mock = self.identity_client.tokens self.tokens_mock.reset_mock() self.tokens_mock.delete.return_value = True self.cmd = token.RevokeToken(self.app, None) diff --git a/openstackclient/tests/unit/identity/v2_0/test_user.py b/openstackclient/tests/unit/identity/v2_0/test_user.py index c3f5f1d7a1..725a821800 100644 --- a/openstackclient/tests/unit/identity/v2_0/test_user.py +++ b/openstackclient/tests/unit/identity/v2_0/test_user.py @@ -24,7 +24,6 @@ class TestUser(identity_fakes.TestIdentityv2): - fake_project = identity_fakes.FakeProject.create_one_project() attr = { 'tenantId': fake_project.id, @@ -32,19 +31,18 @@ class TestUser(identity_fakes.TestIdentityv2): fake_user = identity_fakes.FakeUser.create_one_user(attr) def setUp(self): - super(TestUser, self).setUp() + super().setUp() # Get a shortcut to the TenantManager Mock - self.projects_mock = self.app.client_manager.identity.tenants + self.projects_mock = self.identity_client.tenants self.projects_mock.reset_mock() # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users + self.users_mock = self.identity_client.users self.users_mock.reset_mock() class TestUserCreate(TestUser): - fake_project_c = identity_fakes.FakeProject.create_one_project() attr = { 'tenantId': fake_project_c.id, @@ -67,7 +65,7 @@ class TestUserCreate(TestUser): ) def setUp(self): - super(TestUserCreate, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_project_c @@ -99,10 +97,7 @@ def test_user_create_no_options(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - None, - None, - **kwargs + self.fake_user_c.name, None, None, **kwargs ) self.assertEqual(self.columns, columns) @@ -110,13 +105,14 @@ def test_user_create_no_options(self): def test_user_create_password(self): arglist = [ - '--password', 'secret', + '--password', + 'secret', self.fake_user_c.name, ] verifylist = [ ('name', self.fake_user_c.name), ('password_prompt', False), - ('password', 'secret') + ('password', 'secret'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -132,10 +128,7 @@ def test_user_create_password(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - 'secret', - None, - **kwargs + self.fake_user_c.name, 'secret', None, **kwargs ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -147,7 +140,7 @@ def test_user_create_password_prompt(self): ] verifylist = [ ('name', self.fake_user_c.name), - ('password_prompt', True) + ('password_prompt', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -166,10 +159,7 @@ def test_user_create_password_prompt(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - 'abc123', - None, - **kwargs + self.fake_user_c.name, 'abc123', None, **kwargs ) self.assertEqual(self.columns, columns) @@ -177,7 +167,8 @@ def test_user_create_password_prompt(self): def test_user_create_email(self): arglist = [ - '--email', 'barney@example.com', + '--email', + 'barney@example.com', self.fake_user_c.name, ] verifylist = [ @@ -198,10 +189,7 @@ def test_user_create_email(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - None, - 'barney@example.com', - **kwargs + self.fake_user_c.name, None, 'barney@example.com', **kwargs ) self.assertEqual(self.columns, columns) @@ -219,7 +207,8 @@ def test_user_create_project(self): self.users_mock.create.return_value = user_2 arglist = [ - '--project', self.fake_project_c.name, + '--project', + self.fake_project_c.name, user_2.name, ] verifylist = [ @@ -240,10 +229,7 @@ def test_user_create_project(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - user_2.name, - None, - None, - **kwargs + user_2.name, None, None, **kwargs ) self.assertEqual(self.columns, columns) @@ -280,10 +266,7 @@ def test_user_create_enable(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - None, - None, - **kwargs + self.fake_user_c.name, None, None, **kwargs ) self.assertEqual(self.columns, columns) @@ -313,10 +296,7 @@ def test_user_create_disable(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - None, - None, - **kwargs + self.fake_user_c.name, None, None, **kwargs ) self.assertEqual(self.columns, columns) @@ -375,19 +355,15 @@ def test_user_create_or_show_not_exists(self): } # UserManager.create(name, password, email, tenant_id=, enabled=) self.users_mock.create.assert_called_with( - self.fake_user_c.name, - None, - None, - **kwargs + self.fake_user_c.name, None, None, **kwargs ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) class TestUserDelete(TestUser): - def setUp(self): - super(TestUserDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.users_mock.get.return_value = self.fake_user @@ -414,8 +390,7 @@ def test_user_delete_no_options(self): @mock.patch.object(utils, 'find_resource') def test_delete_multi_users_with_exception(self, find_mock): - find_mock.side_effect = [self.fake_user, - exceptions.CommandError] + find_mock.side_effect = [self.fake_user, exceptions.CommandError] arglist = [ self.fake_user.id, 'unexist_user', @@ -429,8 +404,7 @@ def test_delete_multi_users_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 users failed to delete.', - str(e)) + self.assertEqual('1 of 2 users failed to delete.', str(e)) find_mock.assert_any_call(self.users_mock, self.fake_user.id) find_mock.assert_any_call(self.users_mock, 'unexist_user') @@ -440,7 +414,6 @@ def test_delete_multi_users_with_exception(self, find_mock): class TestUserList(TestUser): - fake_project_l = identity_fakes.FakeProject.create_one_project() attr = { 'tenantId': fake_project_l.id, @@ -459,7 +432,7 @@ class TestUserList(TestUser): ) def setUp(self): - super(TestUserList, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_project_l self.projects_mock.list.return_value = [self.fake_project_l] @@ -486,7 +459,8 @@ def test_user_list_no_options(self): def test_user_list_project(self): arglist = [ - '--project', self.fake_project_l.id, + '--project', + self.fake_project_l.id, ] verifylist = [ ('project', self.fake_project_l.id), @@ -522,22 +496,24 @@ def test_user_list_long(self): collist = ('ID', 'Name', 'Project', 'Email', 'Enabled') self.assertEqual(collist, columns) - datalist = (( - self.fake_user_l.id, - self.fake_user_l.name, - user.ProjectColumn( - self.fake_project_l.id, - {self.fake_project_l.id: self.fake_project_l}), - self.fake_user_l.email, - True, - ), ) + datalist = ( + ( + self.fake_user_l.id, + self.fake_user_l.name, + user.ProjectColumn( + self.fake_project_l.id, + {self.fake_project_l.id: self.fake_project_l}, + ), + self.fake_user_l.email, + True, + ), + ) self.assertCountEqual(datalist, tuple(data)) class TestUserSet(TestUser): - def setUp(self): - super(TestUserSet, self).setUp() + super().setUp() self.projects_mock.get.return_value = self.fake_project self.users_mock.get.return_value = self.fake_user @@ -583,11 +559,13 @@ def test_user_set_unexist_user(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_user_set_name(self): arglist = [ - '--name', 'qwerty', + '--name', + 'qwerty', self.fake_user.name, ] verifylist = [ @@ -609,15 +587,13 @@ def test_user_set_name(self): 'name': 'qwerty', } # UserManager.update(user, **kwargs) - self.users_mock.update.assert_called_with( - self.fake_user.id, - **kwargs - ) + self.users_mock.update.assert_called_with(self.fake_user.id, **kwargs) self.assertIsNone(result) def test_user_set_password(self): arglist = [ - '--password', 'secret', + '--password', + 'secret', self.fake_user.name, ] verifylist = [ @@ -672,7 +648,8 @@ def test_user_set_password_prompt(self): def test_user_set_email(self): arglist = [ - '--email', 'barney@example.com', + '--email', + 'barney@example.com', self.fake_user.name, ] verifylist = [ @@ -694,15 +671,13 @@ def test_user_set_email(self): 'enabled': True, } # UserManager.update(user, **kwargs) - self.users_mock.update.assert_called_with( - self.fake_user.id, - **kwargs - ) + self.users_mock.update.assert_called_with(self.fake_user.id, **kwargs) self.assertIsNone(result) def test_user_set_project(self): arglist = [ - '--project', self.fake_project.id, + '--project', + self.fake_project.id, self.fake_user.name, ] verifylist = [ @@ -748,10 +723,7 @@ def test_user_set_enable(self): 'enabled': True, } # UserManager.update(user, **kwargs) - self.users_mock.update.assert_called_with( - self.fake_user.id, - **kwargs - ) + self.users_mock.update.assert_called_with(self.fake_user.id, **kwargs) self.assertIsNone(result) def test_user_set_disable(self): @@ -777,17 +749,13 @@ def test_user_set_disable(self): 'enabled': False, } # UserManager.update(user, **kwargs) - self.users_mock.update.assert_called_with( - self.fake_user.id, - **kwargs - ) + self.users_mock.update.assert_called_with(self.fake_user.id, **kwargs) self.assertIsNone(result) class TestUserShow(TestUser): - def setUp(self): - super(TestUserShow, self).setUp() + super().setUp() self.users_mock.get.return_value = self.fake_user diff --git a/openstackclient/tests/unit/identity/v3/fakes.py b/openstackclient/tests/unit/identity/v3/fakes.py index 58d5d14d04..c42ed47315 100644 --- a/openstackclient/tests/unit/identity/v3/fakes.py +++ b/openstackclient/tests/unit/identity/v3/fakes.py @@ -20,6 +20,7 @@ from keystoneauth1 import access from keystoneauth1 import fixture +from openstack.identity.v3 import _proxy from osc_lib.cli import format_columns from openstackclient.tests.unit import fakes @@ -58,55 +59,24 @@ DEVELOPER_GROUP_ID = "xyz" MAPPING_RULES = [ { - "local": [ - { - "group": { - "id": EMPLOYEE_GROUP_ID - } - } - ], + "local": [{"group": {"id": EMPLOYEE_GROUP_ID}}], "remote": [ - { - "type": "orgPersonType", - "not_any_of": [ - "Contractor", - "Guest" - ] - } - ] + {"type": "orgPersonType", "not_any_of": ["Contractor", "Guest"]} + ], } ] MAPPING_RULES_2 = [ { - "local": [ - { - "group": { - "id": DEVELOPER_GROUP_ID - } - } - ], - "remote": [ - { - "type": "orgPersonType", - "any_one_of": [ - "Contractor" - ] - } - ] + "local": [{"group": {"id": DEVELOPER_GROUP_ID}}], + "remote": [{"type": "orgPersonType", "any_one_of": ["Contractor"]}], } ] -MAPPING_RESPONSE = { - "id": mapping_id, - "rules": MAPPING_RULES -} +MAPPING_RESPONSE = {"id": mapping_id, "rules": MAPPING_RULES} -MAPPING_RESPONSE_2 = { - "id": mapping_id, - "rules": MAPPING_RULES_2 -} +MAPPING_RESPONSE_2 = {"id": mapping_id, "rules": MAPPING_RULES_2} mfa_opt1 = 'password,totp' mfa_opt2 = 'password' @@ -270,7 +240,7 @@ trust_id = 't-456' trust_expires = None trust_impersonation = False -trust_roles = {"id": role_id, "name": role_name}, +trust_roles = ({"id": role_id, "name": role_name},) TRUST = { 'expires_at': trust_expires, @@ -327,32 +297,31 @@ sp_id = 'BETA' sp_description = 'Service Provider to burst into' service_provider_url = 'https://beta.example.com/Shibboleth.sso/POST/SAML' -sp_auth_url = ('https://beta.example.com/v3/OS-FEDERATION/identity_providers/' - 'idp/protocol/saml2/auth') +sp_auth_url = ( + 'https://beta.example.com/v3/OS-FEDERATION/identity_providers/' + 'idp/protocol/saml2/auth' +) SERVICE_PROVIDER = { 'id': sp_id, 'enabled': True, 'description': sp_description, 'sp_url': service_provider_url, - 'auth_url': sp_auth_url + 'auth_url': sp_auth_url, } -PROTOCOL_ID_MAPPING = { - 'id': protocol_id, - 'mapping': mapping_id -} +PROTOCOL_ID_MAPPING = {'id': protocol_id, 'mapping': mapping_id} PROTOCOL_OUTPUT = { 'id': protocol_id, 'mapping_id': mapping_id, - 'identity_provider': idp_id + 'identity_provider': idp_id, } PROTOCOL_OUTPUT_UPDATED = { 'id': protocol_id, 'mapping_id': mapping_id_updated, - 'identity_provider': idp_id + 'identity_provider': idp_id, } # Assignments @@ -366,22 +335,24 @@ ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID_INCLUDE_NAMES = { 'scope': { 'project': { - 'domain': {'id': domain_id, - 'name': domain_name}, + 'domain': {'id': domain_id, 'name': domain_name}, 'id': project_id, - 'name': project_name}}, + 'name': project_name, + } + }, 'user': { - 'domain': {'id': domain_id, - 'name': domain_name}, + 'domain': {'id': domain_id, 'name': domain_name}, 'id': user_id, - 'name': user_name}, - 'role': {'id': role_id, - 'name': role_name}, + 'name': user_name, + }, + 'role': {'id': role_id, 'name': role_name}, } ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID_INHERITED = { - 'scope': {'project': {'id': project_id}, - 'OS-INHERIT:inherited_to': 'projects'}, + 'scope': { + 'project': {'id': project_id}, + 'OS-INHERIT:inherited_to': 'projects', + }, 'user': {'id': user_id}, 'role': {'id': role_id}, } @@ -405,21 +376,20 @@ } ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID_INCLUDE_NAMES = { - 'scope': { - 'domain': {'id': domain_id, - 'name': domain_name}}, + 'scope': {'domain': {'id': domain_id, 'name': domain_name}}, 'user': { - 'domain': {'id': domain_id, - 'name': domain_name}, + 'domain': {'id': domain_id, 'name': domain_name}, 'id': user_id, - 'name': user_name}, - 'role': {'id': role_id, - 'name': role_name}, + 'name': user_name, + }, + 'role': {'id': role_id, 'name': role_name}, } ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID_INHERITED = { - 'scope': {'domain': {'id': domain_id}, - 'OS-INHERIT:inherited_to': 'projects'}, + 'scope': { + 'domain': {'id': domain_id}, + 'OS-INHERIT:inherited_to': 'projects', + }, 'user': {'id': user_id}, 'role': {'id': role_id}, } @@ -437,7 +407,7 @@ OAUTH_CONSUMER = { 'id': consumer_id, 'secret': consumer_secret, - 'description': consumer_description + 'description': consumer_description, } access_token_id = 'test access token id' @@ -448,7 +418,7 @@ 'id': access_token_id, 'expires': access_token_expires, 'key': access_token_id, - 'secret': access_token_secret + 'secret': access_token_secret, } request_token_id = 'test request token id' @@ -459,17 +429,15 @@ 'id': request_token_id, 'expires': request_token_expires, 'key': request_token_id, - 'secret': request_token_secret + 'secret': request_token_secret, } oauth_verifier_pin = '6d74XaDS' -OAUTH_VERIFIER = { - 'oauth_verifier': oauth_verifier_pin -} +OAUTH_VERIFIER = {'oauth_verifier': oauth_verifier_pin} app_cred_id = 'app-cred-id' app_cred_name = 'testing_app_cred' -app_cred_role = {"id": role_id, "name": role_name, "domain": None}, +app_cred_role = ({"id": role_id, "name": role_name, "domain": None},) app_cred_description = 'app credential for testing' app_cred_expires = datetime.datetime(2022, 1, 1, 0, 0) app_cred_expires_str = app_cred_expires.strftime('%Y-%m-%dT%H:%M:%S%z') @@ -491,7 +459,7 @@ 'expires_at': None, 'unrestricted': False, 'secret': app_cred_secret, - 'access_rules': None + 'access_rules': None, } APP_CRED_OPTIONS = { 'id': app_cred_id, @@ -519,7 +487,7 @@ 'expires_at': None, 'unrestricted': False, 'secret': app_cred_secret, - 'access_rules': app_cred_access_rules + 'access_rules': app_cred_access_rules, } registered_limit_id = 'registered-limit-id' @@ -532,7 +500,7 @@ 'resource_name': registered_limit_resource_name, 'service_id': service_id, 'description': None, - 'region_id': None + 'region_id': None, } REGISTERED_LIMIT_OPTIONS = { 'id': registered_limit_id, @@ -540,7 +508,7 @@ 'resource_name': registered_limit_resource_name, 'service_id': service_id, 'description': registered_limit_description, - 'region_id': region_id + 'region_id': region_id, } limit_id = 'limit-id' @@ -554,7 +522,7 @@ 'resource_name': limit_resource_name, 'service_id': service_id, 'description': None, - 'region_id': None + 'region_id': None, } LIMIT_OPTIONS = { 'id': limit_id, @@ -563,7 +531,7 @@ 'resource_name': limit_resource_name, 'service_id': service_id, 'description': limit_description, - 'region_id': region_id + 'region_id': region_id, } @@ -597,8 +565,7 @@ def fake_auth_ref(fake_token, fake_service=None): return auth_ref -class FakeAuth(object): - +class FakeAuth: def __init__(self, auth_method_class=None): self._auth_method_class = auth_method_class @@ -606,14 +573,12 @@ def get_token(self, *args, **kwargs): return token_id -class FakeSession(object): - +class FakeSession: def __init__(self, **kwargs): self.auth = FakeAuth() -class FakeIdentityv3Client(object): - +class FakeIdentityv3Client: def __init__(self, **kwargs): self.domains = mock.Mock() self.domains.resource_class = fakes.FakeResource(None, {}) @@ -638,8 +603,9 @@ def __init__(self, **kwargs): self.services = mock.Mock() self.services.resource_class = fakes.FakeResource(None, {}) self.session = mock.Mock() - self.session.auth.auth_ref.service_catalog.resource_class = \ + self.session.auth.auth_ref.service_catalog.resource_class = ( fakes.FakeResource(None, {}) + ) self.tokens = mock.Mock() self.tokens.resource_class = fakes.FakeResource(None, {}) self.trusts = mock.Mock() @@ -654,8 +620,9 @@ def __init__(self, **kwargs): self.auth.client = mock.Mock() self.auth.client.resource_class = fakes.FakeResource(None, {}) self.application_credentials = mock.Mock() - self.application_credentials.resource_class = fakes.FakeResource(None, - {}) + self.application_credentials.resource_class = fakes.FakeResource( + None, {} + ) self.access_rules = mock.Mock() self.access_rules.resource_class = fakes.FakeResource(None, {}) self.inference_rules = mock.Mock() @@ -666,8 +633,7 @@ def __init__(self, **kwargs): self.limits.resource_class = fakes.FakeResource(None, {}) -class FakeFederationManager(object): - +class FakeFederationManager: def __init__(self, **kwargs): self.identity_providers = mock.Mock() self.identity_providers.resource_class = fakes.FakeResource(None, {}) @@ -684,16 +650,14 @@ def __init__(self, **kwargs): class FakeFederatedClient(FakeIdentityv3Client): - def __init__(self, **kwargs): - super(FakeFederatedClient, self).__init__(**kwargs) + super().__init__(**kwargs) self.federation = FakeFederationManager() class FakeOAuth1Client(FakeIdentityv3Client): - def __init__(self, **kwargs): - super(FakeOAuth1Client, self).__init__(**kwargs) + super().__init__(**kwargs) self.access_tokens = mock.Mock() self.access_tokens.resource_class = fakes.FakeResource(None, {}) @@ -703,40 +667,81 @@ def __init__(self, **kwargs): self.request_tokens.resource_class = fakes.FakeResource(None, {}) -class TestIdentityv3(utils.TestCommand): - +class FakeClientMixin: def setUp(self): - super(TestIdentityv3, self).setUp() + super().setUp() self.app.client_manager.identity = FakeIdentityv3Client( endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN, ) + self.identity_client = self.app.client_manager.identity + + # TODO(stephenfin): Rename to 'identity_client' once all commands are + # migrated to SDK + self.app.client_manager.sdk_connection.identity = mock.Mock( + _proxy.Proxy + ) + self.identity_sdk_client = ( + self.app.client_manager.sdk_connection.identity + ) -class TestFederatedIdentity(utils.TestCommand): +class TestIdentityv3( + FakeClientMixin, + utils.TestCommand, +): ... + +class FakeModel(dict): + def __getattr__(self, key): + try: + return self[key] + except KeyError: + raise AttributeError(key) + + +# We don't use FakeClientMixin since we want a different fake legacy client +class TestFederatedIdentity(utils.TestCommand): def setUp(self): - super(TestFederatedIdentity, self).setUp() + super().setUp() self.app.client_manager.identity = FakeFederatedClient( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN + endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN + ) + self.identity_client = self.app.client_manager.identity + + # TODO(stephenfin): Rename to 'identity_client' once all commands are + # migrated to SDK + self.app.client_manager.sdk_connection.identity = mock.Mock( + _proxy.Proxy + ) + self.identity_sdk_client = ( + self.app.client_manager.sdk_connection.identity ) +# We don't use FakeClientMixin since we want a different fake legacy client class TestOAuth1(utils.TestCommand): - def setUp(self): - super(TestOAuth1, self).setUp() + super().setUp() self.app.client_manager.identity = FakeOAuth1Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN + endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN + ) + self.identity_client = self.app.client_manager.identity + + # TODO(stephenfin): Rename to 'identity_client' once all commands are + # migrated to SDK + self.app.client_manager.sdk_connection.identity = mock.Mock( + _proxy.Proxy + ) + self.identity_sdk_client = ( + self.app.client_manager.sdk_connection.identity ) -class FakeProject(object): +class FakeProject: """Fake one or more project.""" @staticmethod @@ -765,8 +770,9 @@ def create_one_project(attrs=None): } project_info.update(attrs) - project = fakes.FakeResource(info=copy.deepcopy(project_info), - loaded=True) + project = fakes.FakeResource( + info=copy.deepcopy(project_info), loaded=True + ) return project @staticmethod @@ -787,7 +793,7 @@ def create_projects(attrs=None, count=2): return projects -class FakeDomain(object): +class FakeDomain: """Fake one or more domain.""" @staticmethod @@ -813,12 +819,13 @@ def create_one_domain(attrs=None): } domain_info.update(attrs) - domain = fakes.FakeResource(info=copy.deepcopy(domain_info), - loaded=True) + domain = fakes.FakeResource( + info=copy.deepcopy(domain_info), loaded=True + ) return domain -class FakeCredential(object): +class FakeCredential: """Fake one or more credential.""" @staticmethod @@ -845,7 +852,8 @@ def create_one_credential(attrs=None): credential_info.update(attrs) credential = fakes.FakeResource( - info=copy.deepcopy(credential_info), loaded=True) + info=copy.deepcopy(credential_info), loaded=True + ) return credential @staticmethod @@ -887,7 +895,7 @@ def get_credentials(credentials=None, count=2): return mock.Mock(side_effect=credentials) -class FakeUser(object): +class FakeUser: """Fake one or more user.""" @staticmethod @@ -914,8 +922,7 @@ def create_one_user(attrs=None): } user_info.update(attrs) - user = fakes.FakeResource(info=copy.deepcopy(user_info), - loaded=True) + user = fakes.FakeResource(info=copy.deepcopy(user_info), loaded=True) return user @staticmethod @@ -957,7 +964,7 @@ def get_users(users=None, count=2): return mock.Mock(side_effect=users) -class FakeGroup(object): +class FakeGroup: """Fake one or more group.""" @staticmethod @@ -982,8 +989,7 @@ def create_one_group(attrs=None): } group_info.update(attrs) - group = fakes.FakeResource(info=copy.deepcopy(group_info), - loaded=True) + group = fakes.FakeResource(info=copy.deepcopy(group_info), loaded=True) return group @staticmethod @@ -1025,7 +1031,7 @@ def get_groups(groups=None, count=2): return mock.Mock(side_effect=groups) -class FakeEndpoint(object): +class FakeEndpoint: """Fake one or more endpoint.""" @staticmethod @@ -1052,8 +1058,9 @@ def create_one_endpoint(attrs=None): } endpoint_info.update(attrs) - endpoint = fakes.FakeResource(info=copy.deepcopy(endpoint_info), - loaded=True) + endpoint = fakes.FakeResource( + info=copy.deepcopy(endpoint_info), loaded=True + ) return endpoint @staticmethod @@ -1076,13 +1083,12 @@ def create_one_endpoint_filter(attrs=None): # Overwrite default attributes if there are some attributes set endpoint_filter_info.update(attrs) - endpoint_filter = fakes.FakeModel( - copy.deepcopy(endpoint_filter_info)) + endpoint_filter = FakeModel(copy.deepcopy(endpoint_filter_info)) return endpoint_filter -class FakeEndpointGroup(object): +class FakeEndpointGroup: """Fake one or more endpoint group.""" @staticmethod @@ -1110,8 +1116,9 @@ def create_one_endpointgroup(attrs=None): } endpointgroup_info.update(attrs) - endpoint = fakes.FakeResource(info=copy.deepcopy(endpointgroup_info), - loaded=True) + endpoint = fakes.FakeResource( + info=copy.deepcopy(endpointgroup_info), loaded=True + ) return endpoint @staticmethod @@ -1134,13 +1141,14 @@ def create_one_endpointgroup_filter(attrs=None): # Overwrite default attributes if there are some attributes set endpointgroup_filter_info.update(attrs) - endpointgroup_filter = fakes.FakeModel( - copy.deepcopy(endpointgroup_filter_info)) + endpointgroup_filter = FakeModel( + copy.deepcopy(endpointgroup_filter_info) + ) return endpointgroup_filter -class FakeService(object): +class FakeService: """Fake one or more service.""" @staticmethod @@ -1166,12 +1174,13 @@ def create_one_service(attrs=None): } service_info.update(attrs) - service = fakes.FakeResource(info=copy.deepcopy(service_info), - loaded=True) + service = fakes.FakeResource( + info=copy.deepcopy(service_info), loaded=True + ) return service -class FakeRoleAssignment(object): +class FakeRoleAssignment: """Fake one or more role assignment.""" @staticmethod @@ -1195,13 +1204,15 @@ def create_one_role_assignment(attrs=None): role_assignment_info.update(attrs) role_assignment = fakes.FakeResource( - info=copy.deepcopy(role_assignment_info), loaded=True) + info=copy.deepcopy(role_assignment_info), loaded=True + ) return role_assignment -class FakeImpliedRoleResponse(object): +class FakeImpliedRoleResponse: """Fake one or more role assignment.""" + def __init__(self, prior_role, implied_roles): self.prior_role = prior_role self.implies = [role for role in implied_roles] @@ -1215,8 +1226,6 @@ def create_list(): """ # set default attributes. - implied_roles = [ - FakeImpliedRoleResponse(ROLES[0], [ROLES[1]]) - ] + implied_roles = [FakeImpliedRoleResponse(ROLES[0], [ROLES[1]])] return implied_roles diff --git a/openstackclient/tests/unit/identity/v3/test_access_rule.py b/openstackclient/tests/unit/identity/v3/test_access_rule.py index 904fe323d9..0fc68dd366 100644 --- a/openstackclient/tests/unit/identity/v3/test_access_rule.py +++ b/openstackclient/tests/unit/identity/v3/test_access_rule.py @@ -13,104 +13,93 @@ # under the License. # -import copy -from unittest import mock +from unittest.mock import call +from openstack import exceptions as sdk_exceptions +from openstack.identity.v3 import access_rule as _access_rule +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions -from osc_lib import utils from openstackclient.identity.v3 import access_rule -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestAccessRule(identity_fakes.TestIdentityv3): +class TestAccessRuleDelete(identity_fakes.TestIdentityv3): + access_rule = sdk_fakes.generate_fake_resource(_access_rule.AccessRule) def setUp(self): - super(TestAccessRule, self).setUp() + super().setUp() - identity_manager = self.app.client_manager.identity - self.access_rules_mock = identity_manager.access_rules - self.access_rules_mock.reset_mock() - self.roles_mock = identity_manager.roles - self.roles_mock.reset_mock() - - -class TestAccessRuleDelete(TestAccessRule): - - def setUp(self): - super(TestAccessRuleDelete, self).setUp() - - # This is the return value for utils.find_resource() - self.access_rules_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ACCESS_RULE), - loaded=True, + self.identity_sdk_client.get_access_rule.return_value = ( + self.access_rule ) - self.access_rules_mock.delete.return_value = None + self.identity_sdk_client.delete_access_rule.return_value = None # Get the command object to test - self.cmd = access_rule.DeleteAccessRule( - self.app, None) + self.cmd = access_rule.DeleteAccessRule(self.app, None) def test_access_rule_delete(self): - arglist = [ - identity_fakes.access_rule_id, - ] - verifylist = [ - ('access_rule', [identity_fakes.access_rule_id]) - ] + arglist = [self.access_rule.id] + verifylist = [('access_rule', [self.access_rule.id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + result = self.cmd.take_action(parsed_args) - self.access_rules_mock.delete.assert_called_with( - identity_fakes.access_rule_id, + self.identity_sdk_client.delete_access_rule.assert_called_with( + user_id, + self.access_rule.id, ) self.assertIsNone(result) - @mock.patch.object(utils, 'find_resource') - def test_delete_multi_access_rules_with_exception(self, find_mock): - find_mock.side_effect = [self.access_rules_mock.get.return_value, - exceptions.CommandError] + def test_delete_multi_access_rules_with_exception(self): + self.identity_sdk_client.get_access_rule.side_effect = [ + self.access_rule, + sdk_exceptions.NotFoundException, + ] + arglist = [ - identity_fakes.access_rule_id, + self.access_rule.id, 'nonexistent_access_rule', ] verifylist = [ ('access_rule', arglist), ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 access rules failed to' - ' delete.', str(e)) + self.assertEqual('1 of 2 access rules failed to delete.', str(e)) + + calls = [] + for a in arglist: + calls.append(call(user_id, a)) - find_mock.assert_any_call(self.access_rules_mock, - identity_fakes.access_rule_id) - find_mock.assert_any_call(self.access_rules_mock, - 'nonexistent_access_rule') + self.identity_sdk_client.get_access_rule.assert_has_calls(calls) - self.assertEqual(2, find_mock.call_count) - self.access_rules_mock.delete.assert_called_once_with( - identity_fakes.access_rule_id) + self.assertEqual( + 2, self.identity_sdk_client.get_access_rule.call_count + ) + self.identity_sdk_client.delete_access_rule.assert_called_once_with( + user_id, self.access_rule.id + ) -class TestAccessRuleList(TestAccessRule): +class TestAccessRuleList(identity_fakes.TestIdentityv3): + access_rule = sdk_fakes.generate_fake_resource(_access_rule.AccessRule) def setUp(self): - super(TestAccessRuleList, self).setUp() + super().setUp() - self.access_rules_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ACCESS_RULE), - loaded=True, - ), - ] + self.identity_sdk_client.access_rules.return_value = [self.access_rule] # Get the command object to test self.cmd = access_rule.ListAccessRule(self.app, None) @@ -120,30 +109,34 @@ def test_access_rule_list(self): verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + columns, data = self.cmd.take_action(parsed_args) - self.access_rules_mock.list.assert_called_with(user=None) + self.identity_sdk_client.access_rules.assert_called_with(user=user_id) collist = ('ID', 'Service', 'Method', 'Path') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.access_rule_id, - identity_fakes.access_rule_service, - identity_fakes.access_rule_method, - identity_fakes.access_rule_path, - ), ) + datalist = ( + ( + self.access_rule.id, + self.access_rule.service, + self.access_rule.method, + self.access_rule.path, + ), + ) self.assertEqual(datalist, tuple(data)) -class TestAccessRuleShow(TestAccessRule): +class TestAccessRuleShow(identity_fakes.TestIdentityv3): + access_rule = sdk_fakes.generate_fake_resource(_access_rule.AccessRule) def setUp(self): - super(TestAccessRuleShow, self).setUp() + super().setUp() - self.access_rules_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ACCESS_RULE), - loaded=True, + self.identity_sdk_client.get_access_rule.return_value = ( + self.access_rule ) # Get the command object to test @@ -151,24 +144,28 @@ def setUp(self): def test_access_rule_show(self): arglist = [ - identity_fakes.access_rule_id, + self.access_rule.id, ] verifylist = [ - ('access_rule', identity_fakes.access_rule_id), + ('access_rule', self.access_rule.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + columns, data = self.cmd.take_action(parsed_args) - self.access_rules_mock.get.assert_called_with( - identity_fakes.access_rule_id) + self.identity_sdk_client.get_access_rule.assert_called_with( + user_id, self.access_rule.id + ) - collist = ('id', 'method', 'path', 'service') + collist = ('ID', 'Method', 'Path', 'Service') self.assertEqual(collist, columns) datalist = ( - identity_fakes.access_rule_id, - identity_fakes.access_rule_method, - identity_fakes.access_rule_path, - identity_fakes.access_rule_service, + self.access_rule.id, + self.access_rule.method, + self.access_rule.path, + self.access_rule.service, ) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_application_credential.py b/openstackclient/tests/unit/identity/v3/test_application_credential.py index 24bafc9f5f..3a3a80e4a5 100644 --- a/openstackclient/tests/unit/identity/v3/test_application_credential.py +++ b/openstackclient/tests/unit/identity/v3/test_application_credential.py @@ -13,61 +13,76 @@ # under the License. # -import copy -import json +import datetime from unittest import mock +from unittest.mock import call from osc_lib import exceptions -from osc_lib import utils +from openstack import exceptions as sdk_exceptions +from openstack.identity.v3 import ( + application_credential as _application_credential, +) +from openstack.identity.v3 import role as _role +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from openstackclient.identity.v3 import application_credential -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestApplicationCredential(identity_fakes.TestIdentityv3): - +class TestApplicationCredentialCreate(identity_fakes.TestIdentityv3): def setUp(self): - super(TestApplicationCredential, self).setUp() - - identity_manager = self.app.client_manager.identity - self.app_creds_mock = identity_manager.application_credentials - self.app_creds_mock.reset_mock() - self.roles_mock = identity_manager.roles - self.roles_mock.reset_mock() - + super().setUp() -class TestApplicationCredentialCreate(TestApplicationCredential): + self.roles = sdk_fakes.generate_fake_resource(_role.Role) + self.application_credential = sdk_fakes.generate_fake_resource( + resource_type=_application_credential.ApplicationCredential, + roles=[], + ) - def setUp(self): - super(TestApplicationCredentialCreate, self).setUp() + self.columns = ( + 'ID', + 'Name', + 'Description', + 'Project ID', + 'Roles', + 'Unrestricted', + 'Access Rules', + 'Expires At', + 'Secret', + ) + self.data = ( + self.application_credential.id, + self.application_credential.name, + self.application_credential.description, + self.application_credential.project_id, + application_credential.RolesColumn( + self.application_credential.roles + ), + self.application_credential.unrestricted, + self.application_credential.access_rules, + self.application_credential.expires_at, + self.application_credential.secret, + ) - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, + self.identity_sdk_client.create_application_credential.return_value = ( + self.application_credential ) # Get the command object to test self.cmd = application_credential.CreateApplicationCredential( - self.app, None) - - def test_application_credential_create_basic(self): - self.app_creds_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_BASIC), - loaded=True, + self.app, None ) - name = identity_fakes.app_cred_name - arglist = [ - name - ] - verifylist = [ - ('name', identity_fakes.app_cred_name) - ] + def test_application_credential_create_basic(self): + name = self.application_credential.name + arglist = [name] + verifylist = [('name', self.application_credential.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. @@ -75,58 +90,45 @@ def test_application_credential_create_basic(self): # Set expected values kwargs = { - 'secret': None, 'roles': [], 'expires_at': None, 'description': None, + 'secret': None, 'unrestricted': False, - 'access_rules': None, + 'access_rules': [], } - self.app_creds_mock.create.assert_called_with( - name, - **kwargs + self.identity_sdk_client.create_application_credential.assert_called_with( + user_id, name, **kwargs ) - collist = ('access_rules', 'description', 'expires_at', 'id', 'name', - 'project_id', 'roles', 'secret', 'unrestricted') - self.assertEqual(collist, columns) - datalist = ( - None, - None, - None, - identity_fakes.app_cred_id, - identity_fakes.app_cred_name, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.app_cred_secret, - False, - ) - self.assertEqual(datalist, data) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) def test_application_credential_create_with_options(self): - name = identity_fakes.app_cred_name - self.app_creds_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_OPTIONS), - loaded=True, - ) - + name = self.application_credential.name arglist = [ name, - '--secret', 'moresecuresecret', - '--role', identity_fakes.role_id, - '--expiration', identity_fakes.app_cred_expires_str, - '--description', 'credential for testing' + '--secret', + 'moresecuresecret', + '--role', + self.roles.id, + '--expiration', + '2024-01-01T00:00:00', + '--description', + 'credential for testing', ] verifylist = [ - ('name', identity_fakes.app_cred_name), + ('name', self.application_credential.name), ('secret', 'moresecuresecret'), - ('role', [identity_fakes.role_id]), - ('expiration', identity_fakes.app_cred_expires_str), - ('description', 'credential for testing') + ('roles', [self.roles.id]), + ('expiration', '2024-01-01T00:00:00'), + ('description', 'credential for testing'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. @@ -134,179 +136,152 @@ def test_application_credential_create_with_options(self): # Set expected values kwargs = { - 'secret': 'moresecuresecret', - 'roles': [identity_fakes.role_id], - 'expires_at': identity_fakes.app_cred_expires, + 'roles': [{'id': self.roles.id}], + 'expires_at': datetime.datetime(2024, 1, 1, 0, 0), 'description': 'credential for testing', + 'secret': 'moresecuresecret', 'unrestricted': False, - 'access_rules': None, + 'access_rules': [], } - self.app_creds_mock.create.assert_called_with( - name, - **kwargs + self.identity_sdk_client.create_application_credential.assert_called_with( + user_id, name, **kwargs ) - collist = ('access_rules', 'description', 'expires_at', 'id', 'name', - 'project_id', 'roles', 'secret', 'unrestricted') - self.assertEqual(collist, columns) - datalist = ( - None, - identity_fakes.app_cred_description, - identity_fakes.app_cred_expires_str, - identity_fakes.app_cred_id, - identity_fakes.app_cred_name, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.app_cred_secret, - False, - ) - self.assertEqual(datalist, data) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) def test_application_credential_create_with_access_rules_string(self): - name = identity_fakes.app_cred_name - self.app_creds_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_ACCESS_RULES), - loaded=True, - ) + name = self.application_credential.name arglist = [ name, - '--access-rules', identity_fakes.app_cred_access_rules, + '--access-rules', + '[{"path": "/v2.1/servers", "method": "GET", "service": "compute"}]', ] verifylist = [ - ('name', identity_fakes.app_cred_name), - ('access_rules', identity_fakes.app_cred_access_rules), + ('name', self.application_credential.name), + ( + 'access_rules', + '[{"path": "/v2.1/servers", "method": "GET", "service": "compute"}]', + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'secret': None, 'roles': [], 'expires_at': None, 'description': None, + 'secret': None, 'unrestricted': False, - 'access_rules': json.loads(identity_fakes.app_cred_access_rules) + 'access_rules': [ + { + "path": "/v2.1/servers", + "method": "GET", + "service": "compute", + } + ], } - self.app_creds_mock.create.assert_called_with( - name, - **kwargs + self.identity_sdk_client.create_application_credential.assert_called_with( + user_id, name, **kwargs ) - collist = ('access_rules', 'description', 'expires_at', 'id', 'name', - 'project_id', 'roles', 'secret', 'unrestricted') - self.assertEqual(collist, columns) - datalist = ( - identity_fakes.app_cred_access_rules, - None, - None, - identity_fakes.app_cred_id, - identity_fakes.app_cred_name, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.app_cred_secret, - False, - ) - self.assertEqual(datalist, data) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) @mock.patch('openstackclient.identity.v3.application_credential.json.load') @mock.patch('openstackclient.identity.v3.application_credential.open') def test_application_credential_create_with_access_rules_file( - self, _, mock_json_load): - mock_json_load.return_value = identity_fakes.app_cred_access_rules - - name = identity_fakes.app_cred_name - self.app_creds_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_ACCESS_RULES), - loaded=True, - ) + self, _, mock_json_load + ): + mock_json_load.return_value = '/tmp/access_rules.json' + name = self.application_credential.name arglist = [ name, - '--access-rules', identity_fakes.app_cred_access_rules_path, + '--access-rules', + '/tmp/access_rules.json', ] verifylist = [ - ('name', identity_fakes.app_cred_name), - ('access_rules', identity_fakes.app_cred_access_rules_path), + ('name', self.application_credential.name), + ('access_rules', '/tmp/access_rules.json'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'secret': None, 'roles': [], 'expires_at': None, 'description': None, + 'secret': None, 'unrestricted': False, - 'access_rules': identity_fakes.app_cred_access_rules + 'access_rules': '/tmp/access_rules.json', } - self.app_creds_mock.create.assert_called_with( - name, - **kwargs + self.identity_sdk_client.create_application_credential.assert_called_with( + user_id, name, **kwargs ) - collist = ('access_rules', 'description', 'expires_at', 'id', 'name', - 'project_id', 'roles', 'secret', 'unrestricted') - self.assertEqual(collist, columns) - datalist = ( - identity_fakes.app_cred_access_rules, - None, - None, - identity_fakes.app_cred_id, - identity_fakes.app_cred_name, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.app_cred_secret, - False, - ) - self.assertEqual(datalist, data) - + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) -class TestApplicationCredentialDelete(TestApplicationCredential): +class TestApplicationCredentialDelete(identity_fakes.TestIdentityv3): def setUp(self): - super(TestApplicationCredentialDelete, self).setUp() + super().setUp() - # This is the return value for utils.find_resource() - self.app_creds_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_BASIC), - loaded=True, + self.application_credential = sdk_fakes.generate_fake_resource( + resource_type=_application_credential.ApplicationCredential, + roles=[], + ) + self.identity_sdk_client.find_application_credential.return_value = ( + self.application_credential + ) + self.identity_sdk_client.delete_application_credential.return_value = ( + None ) - self.app_creds_mock.delete.return_value = None # Get the command object to test self.cmd = application_credential.DeleteApplicationCredential( - self.app, None) + self.app, None + ) def test_application_credential_delete(self): arglist = [ - identity_fakes.app_cred_id, + self.application_credential.id, ] verifylist = [ - ('application_credential', [identity_fakes.app_cred_id]) + ('application_credential', [self.application_credential.id]) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + result = self.cmd.take_action(parsed_args) - self.app_creds_mock.delete.assert_called_with( - identity_fakes.app_cred_id, + self.identity_sdk_client.delete_application_credential.assert_called_with( + user_id, + self.application_credential.id, ) self.assertIsNone(result) - @mock.patch.object(utils, 'find_resource') - def test_delete_multi_app_creds_with_exception(self, find_mock): - find_mock.side_effect = [self.app_creds_mock.get.return_value, - exceptions.CommandError] + def test_delete_multi_app_creds_with_exception(self): + self.identity_sdk_client.find_application_credential.side_effect = [ + self.application_credential, + sdk_exceptions.NotFoundException, + ] arglist = [ - identity_fakes.app_cred_id, + self.application_credential.id, 'nonexistent_app_cred', ] verifylist = [ @@ -314,107 +289,176 @@ def test_delete_multi_app_creds_with_exception(self, find_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 application credentials failed to' - ' delete.', str(e)) + self.assertEqual( + '1 of 2 application credentials failed to delete.', str(e) + ) - find_mock.assert_any_call(self.app_creds_mock, - identity_fakes.app_cred_id) - find_mock.assert_any_call(self.app_creds_mock, - 'nonexistent_app_cred') + calls = [] + for a in arglist: + calls.append(call(user_id, a, ignore_missing=False)) - self.assertEqual(2, find_mock.call_count) - self.app_creds_mock.delete.assert_called_once_with( - identity_fakes.app_cred_id) + self.identity_sdk_client.find_application_credential.assert_has_calls( + calls + ) + self.assertEqual( + 2, self.identity_sdk_client.find_application_credential.call_count + ) + self.identity_sdk_client.delete_application_credential.assert_called_once_with( + user_id, self.application_credential.id + ) -class TestApplicationCredentialList(TestApplicationCredential): +class TestApplicationCredentialList(identity_fakes.TestIdentityv3): def setUp(self): - super(TestApplicationCredentialList, self).setUp() + super().setUp() - self.app_creds_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_BASIC), - loaded=True, - ), + self.application_credential = sdk_fakes.generate_fake_resource( + resource_type=_application_credential.ApplicationCredential, + roles=[], + ) + self.identity_sdk_client.application_credentials.return_value = [ + self.application_credential ] + self.user = sdk_fakes.generate_fake_resource(resource_type=_user.User) + self.identity_sdk_client.find_user.return_value = self.user + + self.columns = ( + 'ID', + 'Name', + 'Description', + 'Project ID', + 'Roles', + 'Unrestricted', + 'Access Rules', + 'Expires At', + ) + self.data = ( + ( + self.application_credential.id, + self.application_credential.name, + self.application_credential.description, + self.application_credential.project_id, + application_credential.RolesColumn( + self.application_credential.roles + ), + self.application_credential.unrestricted, + self.application_credential.access_rules, + self.application_credential.expires_at, + ), + ) # Get the command object to test - self.cmd = application_credential.ListApplicationCredential(self.app, - None) + self.cmd = application_credential.ListApplicationCredential( + self.app, None + ) def test_application_credential_list(self): arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class Lister in cliff, abstract method take_action() - # returns a tuple containing the column names and an iterable - # containing the data to be listed. + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + columns, data = self.cmd.take_action(parsed_args) - self.app_creds_mock.list.assert_called_with(user=None) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, tuple(data)) - collist = ('ID', 'Name', 'Project ID', 'Description', 'Expires At') - self.assertEqual(collist, columns) - datalist = (( - identity_fakes.app_cred_id, - identity_fakes.app_cred_name, - identity_fakes.project_id, - None, - None - ), ) - self.assertEqual(datalist, tuple(data)) + self.identity_sdk_client.find_user.assert_not_called() + self.identity_sdk_client.application_credentials.assert_called_with( + user=user_id + ) + + def test_application_credential_list_user(self): + arglist = ['--user', self.user.name] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + conn.config.get_auth().get_user_id(conn.identity) -class TestApplicationCredentialShow(TestApplicationCredential): + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, tuple(data)) + + self.identity_sdk_client.find_user.assert_called_once_with( + name_or_id=self.user.name, ignore_missing=False + ) + self.identity_sdk_client.application_credentials.assert_called_with( + user=self.user.id + ) + +class TestApplicationCredentialShow(identity_fakes.TestIdentityv3): def setUp(self): - super(TestApplicationCredentialShow, self).setUp() + super().setUp() - self.app_creds_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.APP_CRED_BASIC), - loaded=True, + self.application_credential = sdk_fakes.generate_fake_resource( + resource_type=_application_credential.ApplicationCredential, + roles=[], + ) + self.identity_sdk_client.find_application_credential.return_value = ( + self.application_credential + ) + + self.columns = ( + 'ID', + 'Name', + 'Description', + 'Project ID', + 'Roles', + 'Unrestricted', + 'Access Rules', + 'Expires At', + ) + self.data = ( + self.application_credential.id, + self.application_credential.name, + self.application_credential.description, + self.application_credential.project_id, + application_credential.RolesColumn( + self.application_credential.roles + ), + self.application_credential.unrestricted, + self.application_credential.access_rules, + self.application_credential.expires_at, ) # Get the command object to test - self.cmd = application_credential.ShowApplicationCredential(self.app, - None) + self.cmd = application_credential.ShowApplicationCredential( + self.app, None + ) def test_application_credential_show(self): arglist = [ - identity_fakes.app_cred_id, + self.application_credential.id, ] verifylist = [ - ('application_credential', identity_fakes.app_cred_id), + ('application_credential', self.application_credential.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.app_creds_mock.get.assert_called_with(identity_fakes.app_cred_id) - - collist = ('access_rules', 'description', 'expires_at', 'id', 'name', - 'project_id', 'roles', 'secret', 'unrestricted') - self.assertEqual(collist, columns) - datalist = ( - None, - None, - None, - identity_fakes.app_cred_id, - identity_fakes.app_cred_name, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.app_cred_secret, - False, + self.identity_sdk_client.find_application_credential.assert_called_with( + user_id, self.application_credential.id, ignore_missing=False ) - self.assertEqual(datalist, data) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/identity/v3/test_catalog.py b/openstackclient/tests/unit/identity/v3/test_catalog.py index 802a9017e2..df292b1689 100644 --- a/openstackclient/tests/unit/identity/v3/test_catalog.py +++ b/openstackclient/tests/unit/identity/v3/test_catalog.py @@ -9,7 +9,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# from unittest import mock @@ -19,7 +18,6 @@ class TestCatalog(utils.TestCommand): - fake_service = { 'id': 'qwertyuiop', 'type': 'compute', @@ -48,7 +46,7 @@ class TestCatalog(utils.TestCommand): } def setUp(self): - super(TestCatalog, self).setUp() + super().setUp() self.sc_mock = mock.Mock() self.sc_mock.service_catalog.catalog.return_value = [ @@ -62,9 +60,8 @@ def setUp(self): class TestCatalogList(TestCatalog): - def setUp(self): - super(TestCatalogList, self).setUp() + super().setUp() # Get the command object to test self.cmd = catalog.ListCatalog(self.app, None) @@ -74,8 +71,7 @@ def test_catalog_list(self): identity_fakes.TOKEN_WITH_PROJECT_ID, fake_service=self.fake_service, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -88,19 +84,21 @@ def test_catalog_list(self): collist = ('Name', 'Type', 'Endpoints') self.assertEqual(collist, columns) - datalist = (( - 'supernova', - 'compute', - catalog.EndpointsColumn( - auth_ref.service_catalog.catalog[0]['endpoints']), - ), ) + datalist = ( + ( + 'supernova', + 'compute', + catalog.EndpointsColumn( + auth_ref.service_catalog.catalog[0]['endpoints'] + ), + ), + ) self.assertCountEqual(datalist, tuple(data)) class TestCatalogShow(TestCatalog): - def setUp(self): - super(TestCatalogShow, self).setUp() + super().setUp() # Get the command object to test self.cmd = catalog.ShowCatalog(self.app, None) @@ -110,8 +108,7 @@ def test_catalog_show(self): identity_fakes.TOKEN_WITH_PROJECT_ID, fake_service=self.fake_service, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [ 'compute', @@ -130,7 +127,8 @@ def test_catalog_show(self): self.assertEqual(collist, columns) datalist = ( catalog.EndpointsColumn( - auth_ref.service_catalog.catalog[0]['endpoints']), + auth_ref.service_catalog.catalog[0]['endpoints'] + ), 'qwertyuiop', 'supernova', 'compute', @@ -146,4 +144,5 @@ def test_endpoints_column_human_readabale(self): 'onlyone\n admin: https://admin.example.com\n' '\n internal: https://internal.example.com\n' '\n none: https://none.example.com\n', - col.human_readable()) + col.human_readable(), + ) diff --git a/openstackclient/tests/unit/identity/v3/test_consumer.py b/openstackclient/tests/unit/identity/v3/test_consumer.py index 403250ef56..94b81dd941 100644 --- a/openstackclient/tests/unit/identity/v3/test_consumer.py +++ b/openstackclient/tests/unit/identity/v3/test_consumer.py @@ -18,18 +18,15 @@ class TestOAuth1(identity_fakes.TestOAuth1): - def setUp(self): - super(TestOAuth1, self).setUp() - identity_client = self.app.client_manager.identity - self.consumers_mock = identity_client.oauth1.consumers + super().setUp() + self.consumers_mock = self.identity_client.oauth1.consumers self.consumers_mock.reset_mock() class TestConsumerCreate(TestOAuth1): - def setUp(self): - super(TestConsumerCreate, self).setUp() + super().setUp() self.consumers_mock.create.return_value = fakes.FakeResource( None, @@ -41,7 +38,8 @@ def setUp(self): def test_create_consumer(self): arglist = [ - '--description', identity_fakes.consumer_description, + '--description', + identity_fakes.consumer_description, ] verifylist = [ ('description', identity_fakes.consumer_description), @@ -64,9 +62,8 @@ def test_create_consumer(self): class TestConsumerDelete(TestOAuth1): - def setUp(self): - super(TestConsumerDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.consumers_mock.get.return_value = fakes.FakeResource( @@ -96,9 +93,8 @@ def test_delete_consumer(self): class TestConsumerList(TestOAuth1): - def setUp(self): - super(TestConsumerList, self).setUp() + super().setUp() self.consumers_mock.get.return_value = fakes.FakeResource( None, @@ -129,17 +125,18 @@ def test_consumer_list(self): collist = ('ID', 'Description') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.consumer_id, - identity_fakes.consumer_description, - ), ) + datalist = ( + ( + identity_fakes.consumer_id, + identity_fakes.consumer_description, + ), + ) self.assertEqual(datalist, tuple(data)) class TestConsumerSet(TestOAuth1): - def setUp(self): - super(TestConsumerSet, self).setUp() + super().setUp() self.consumers_mock.get.return_value = fakes.FakeResource( None, @@ -161,7 +158,8 @@ def test_consumer_update(self): new_description = "consumer new description" arglist = [ - '--description', new_description, + '--description', + new_description, identity_fakes.consumer_id, ] verifylist = [ @@ -174,16 +172,14 @@ def test_consumer_update(self): kwargs = {'description': new_description} self.consumers_mock.update.assert_called_with( - identity_fakes.consumer_id, - **kwargs + identity_fakes.consumer_id, **kwargs ) self.assertIsNone(result) class TestConsumerShow(TestOAuth1): - def setUp(self): - super(TestConsumerShow, self).setUp() + super().setUp() consumer_no_secret = copy.deepcopy(identity_fakes.OAUTH_CONSUMER) del consumer_no_secret['secret'] diff --git a/openstackclient/tests/unit/identity/v3/test_credential.py b/openstackclient/tests/unit/identity/v3/test_credential.py index 40596d5874..f21fd71a0f 100644 --- a/openstackclient/tests/unit/identity/v3/test_credential.py +++ b/openstackclient/tests/unit/identity/v3/test_credential.py @@ -10,9 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock from unittest.mock import call +from openstack import exceptions as sdk_exceptions +from openstack.identity.v3 import credential as _credential +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.identity.v3 import credential @@ -20,28 +24,9 @@ from openstackclient.tests.unit import utils -class TestCredential(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestCredential, self).setUp() - - # Get a shortcut to the CredentialManager Mock - self.credentials_mock = self.app.client_manager.identity.credentials - self.credentials_mock.reset_mock() - - # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - - # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - - -class TestCredentialCreate(TestCredential): - - user = identity_fakes.FakeUser.create_one_user() - project = identity_fakes.FakeProject.create_one_project() +class TestCredentialCreate(identity_fakes.TestIdentityv3): + user = sdk_fakes.generate_fake_resource(_user.User) + project = sdk_fakes.generate_fake_resource(_project.Project) columns = ( 'blob', 'id', @@ -51,13 +36,19 @@ class TestCredentialCreate(TestCredential): ) def setUp(self): - super(TestCredentialCreate, self).setUp() + super().setUp() - self.credential = identity_fakes.FakeCredential.create_one_credential( - attrs={'user_id': self.user.id, 'project_id': self.project.id}) - self.credentials_mock.create.return_value = self.credential - self.users_mock.get.return_value = self.user - self.projects_mock.get.return_value = self.project + self.credential = sdk_fakes.generate_fake_resource( + resource_type=_credential.Credential, + user_id=self.user.id, + project_id=self.project.id, + type='cert', + ) + self.identity_sdk_client.create_credential.return_value = ( + self.credential + ) + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.find_project.return_value = self.project self.data = ( self.credential.blob, self.credential.id, @@ -82,12 +73,12 @@ def test_credential_create_no_options(self): columns, data = self.cmd.take_action(parsed_args) kwargs = { - 'user': self.credential.user_id, + 'user_id': self.credential.user_id, 'type': self.credential.type, 'blob': self.credential.blob, - 'project': None, + 'project_id': None, } - self.credentials_mock.create.assert_called_once_with( + self.identity_sdk_client.create_credential.assert_called_once_with( **kwargs ) @@ -98,8 +89,10 @@ def test_credential_create_with_options(self): arglist = [ self.credential.user_id, self.credential.blob, - '--type', self.credential.type, - '--project', self.credential.project_id, + '--type', + self.credential.type, + '--project', + self.credential.project_id, ] verifylist = [ ('user', self.credential.user_id), @@ -112,12 +105,12 @@ def test_credential_create_with_options(self): columns, data = self.cmd.take_action(parsed_args) kwargs = { - 'user': self.credential.user_id, + 'user_id': self.credential.user_id, 'type': self.credential.type, 'blob': self.credential.blob, - 'project': self.credential.project_id, + 'project_id': self.credential.project_id, } - self.credentials_mock.create.assert_called_once_with( + self.identity_sdk_client.create_credential.assert_called_once_with( **kwargs ) @@ -125,37 +118,40 @@ def test_credential_create_with_options(self): self.assertEqual(self.data, data) -class TestCredentialDelete(TestCredential): - - credentials = identity_fakes.FakeCredential.create_credentials(count=2) - +class TestCredentialDelete(identity_fakes.TestIdentityv3): def setUp(self): - super(TestCredentialDelete, self).setUp() + super().setUp() - self.credentials_mock.delete.return_value = None + self.identity_sdk_client.delete_credential.return_value = None # Get the command object to test self.cmd = credential.DeleteCredential(self.app, None) def test_credential_delete(self): + credential = sdk_fakes.generate_fake_resource( + _credential.Credential, + ) arglist = [ - self.credentials[0].id, + credential.id, ] verifylist = [ - ('credential', [self.credentials[0].id]), + ('credential', [credential.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.credentials_mock.delete.assert_called_with( - self.credentials[0].id, + self.identity_sdk_client.delete_credential.assert_called_with( + credential.id, ) self.assertIsNone(result) def test_credential_multi_delete(self): + credentials = sdk_fakes.generate_fake_resources( + _credential.Credential, count=2 + ) arglist = [] - for c in self.credentials: + for c in credentials: arglist.append(c.id) verifylist = [ ('credential', arglist), @@ -165,25 +161,26 @@ def test_credential_multi_delete(self): result = self.cmd.take_action(parsed_args) calls = [] - for c in self.credentials: + for c in credentials: calls.append(call(c.id)) - self.credentials_mock.delete.assert_has_calls(calls) + self.identity_sdk_client.delete_credential.assert_has_calls(calls) self.assertIsNone(result) def test_credential_multi_delete_with_exception(self): + credential = sdk_fakes.generate_fake_resource( + _credential.Credential, + ) arglist = [ - self.credentials[0].id, + credential.id, 'unexist_credential', ] - verifylist = [ - ('credential', [self.credentials[0].id, 'unexist_credential']) - ] + verifylist = [('credential', [credential.id, 'unexist_credential'])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - delete_mock_result = [None, exceptions.CommandError] - self.credentials_mock.delete = ( - mock.Mock(side_effect=delete_mock_result) - ) + self.identity_sdk_client.delete_credential.side_effect = [ + None, + sdk_exceptions.NotFoundException, + ] try: self.cmd.take_action(parsed_args) @@ -191,30 +188,35 @@ def test_credential_multi_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 credential failed to delete.', str(e)) - self.credentials_mock.delete.assert_any_call(self.credentials[0].id) - self.credentials_mock.delete.assert_any_call('unexist_credential') - + self.identity_sdk_client.delete_credential.assert_any_call( + credential.id + ) + self.identity_sdk_client.delete_credential.assert_any_call( + 'unexist_credential' + ) -class TestCredentialList(TestCredential): - credential = identity_fakes.FakeCredential.create_one_credential() +class TestCredentialList(identity_fakes.TestIdentityv3): + credential = sdk_fakes.generate_fake_resource(_credential.Credential) columns = ('ID', 'Type', 'User ID', 'Data', 'Project ID') - data = (( - credential.id, - credential.type, - credential.user_id, - credential.blob, - credential.project_id, - ), ) + data = ( + ( + credential.id, + credential.type, + credential.user_id, + credential.blob, + credential.project_id, + ), + ) def setUp(self): - super(TestCredentialList, self).setUp() + super().setUp() - self.user = identity_fakes.FakeUser.create_one_user() - self.users_mock.get.return_value = self.user + self.user = sdk_fakes.generate_fake_resource(_user.User) + self.identity_sdk_client.find_user.return_value = self.user - self.credentials_mock.list.return_value = [self.credential] + self.identity_sdk_client.credentials.return_value = [self.credential] # Get the command object to test self.cmd = credential.ListCredential(self.app, None) @@ -226,14 +228,16 @@ def test_credential_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) - self.credentials_mock.list.assert_called_with() + self.identity_sdk_client.credentials.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) def test_credential_list_with_options(self): arglist = [ - '--user', self.credential.user_id, - '--type', self.credential.type, + '--user', + self.credential.user_id, + '--type', + self.credential.type, ] verifylist = [ ('user', self.credential.user_id), @@ -247,21 +251,20 @@ def test_credential_list_with_options(self): 'user_id': self.user.id, 'type': self.credential.type, } - self.users_mock.get.assert_called_with(self.credential.user_id) - self.credentials_mock.list.assert_called_with( - **kwargs + self.identity_sdk_client.find_user.assert_called_with( + self.credential.user_id, domain_id=None, ignore_missing=False ) + self.identity_sdk_client.credentials.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.data, tuple(data)) -class TestCredentialSet(TestCredential): - - credential = identity_fakes.FakeCredential.create_one_credential() +class TestCredentialSet(identity_fakes.TestIdentityv3): + credential = sdk_fakes.generate_fake_resource(_credential.Credential) def setUp(self): - super(TestCredentialSet, self).setUp() + super().setUp() self.cmd = credential.SetCredential(self.app, None) def test_credential_set_no_options(self): @@ -269,44 +272,57 @@ def test_credential_set_no_options(self): self.credential.id, ] - self.assertRaises(utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + utils.ParserException, self.check_parser, self.cmd, arglist, [] + ) def test_credential_set_missing_user(self): arglist = [ - '--type', 'ec2', - '--data', self.credential.blob, + '--type', + 'ec2', + '--data', + self.credential.blob, self.credential.id, ] - self.assertRaises(utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + utils.ParserException, self.check_parser, self.cmd, arglist, [] + ) def test_credential_set_missing_type(self): arglist = [ - '--user', self.credential.user_id, - '--data', self.credential.blob, + '--user', + self.credential.user_id, + '--data', + self.credential.blob, self.credential.id, ] - self.assertRaises(utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + utils.ParserException, self.check_parser, self.cmd, arglist, [] + ) def test_credential_set_missing_data(self): arglist = [ - '--user', self.credential.user_id, - '--type', 'ec2', + '--user', + self.credential.user_id, + '--type', + 'ec2', self.credential.id, ] - self.assertRaises(utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + utils.ParserException, self.check_parser, self.cmd, arglist, [] + ) def test_credential_set_valid(self): arglist = [ - '--user', self.credential.user_id, - '--type', 'ec2', - '--data', self.credential.blob, + '--user', + self.credential.user_id, + '--type', + 'ec2', + '--data', + self.credential.blob, self.credential.id, ] parsed_args = self.check_parser(self.cmd, arglist, []) @@ -317,10 +333,14 @@ def test_credential_set_valid(self): def test_credential_set_valid_with_project(self): arglist = [ - '--user', self.credential.user_id, - '--type', 'ec2', - '--data', self.credential.blob, - '--project', self.credential.project_id, + '--user', + self.credential.user_id, + '--type', + 'ec2', + '--data', + self.credential.blob, + '--project', + self.credential.project_id, self.credential.id, ] parsed_args = self.check_parser(self.cmd, arglist, []) @@ -330,8 +350,7 @@ def test_credential_set_valid_with_project(self): self.assertIsNone(result) -class TestCredentialShow(TestCredential): - +class TestCredentialShow(identity_fakes.TestIdentityv3): columns = ( 'blob', 'id', @@ -341,10 +360,12 @@ class TestCredentialShow(TestCredential): ) def setUp(self): - super(TestCredentialShow, self).setUp() + super().setUp() - self.credential = identity_fakes.FakeCredential.create_one_credential() - self.credentials_mock.get.return_value = self.credential + self.credential = sdk_fakes.generate_fake_resource( + _credential.Credential + ) + self.identity_sdk_client.get_credential.return_value = self.credential self.data = ( self.credential.blob, self.credential.id, @@ -366,6 +387,8 @@ def test_credential_show(self): columns, data = self.cmd.take_action(parsed_args) - self.credentials_mock.get.assert_called_once_with(self.credential.id) + self.identity_sdk_client.get_credential.assert_called_once_with( + self.credential.id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/identity/v3/test_domain.py b/openstackclient/tests/unit/identity/v3/test_domain.py index c39f1bd3d7..cc0593d1fe 100644 --- a/openstackclient/tests/unit/identity/v3/test_domain.py +++ b/openstackclient/tests/unit/identity/v3/test_domain.py @@ -10,41 +10,32 @@ # License for the specific language governing permissions and limitations # under the License. +from openstack.identity.v3 import domain as _domain +from openstack.test import fakes as sdk_fakes from openstackclient.identity.v3 import domain from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestDomain(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestDomain, self).setUp() - - # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains - self.domains_mock.reset_mock() - - -class TestDomainCreate(TestDomain): - +class TestDomainCreate(identity_fakes.TestIdentityv3): columns = ( - 'description', - 'enabled', 'id', 'name', - 'tags' + 'enabled', + 'description', + 'options', ) def setUp(self): - super(TestDomainCreate, self).setUp() + super().setUp() - self.domain = identity_fakes.FakeDomain.create_one_domain() - self.domains_mock.create.return_value = self.domain + self.domain = sdk_fakes.generate_fake_resource(_domain.Domain) + self.identity_sdk_client.create_domain.return_value = self.domain self.datalist = ( - self.domain.description, - True, self.domain.id, self.domain.name, - self.domain.tags + self.domain.is_enabled, + self.domain.description, + self.domain.options, ) # Get the command object to test @@ -69,18 +60,17 @@ def test_domain_create_no_options(self): 'name': self.domain.name, 'description': None, 'options': {}, - 'enabled': True, + 'is_enabled': True, } - self.domains_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_domain.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_domain_create_description(self): arglist = [ - '--description', 'new desc', + '--description', + 'new desc', self.domain.name, ] verifylist = [ @@ -99,11 +89,9 @@ def test_domain_create_description(self): 'name': self.domain.name, 'description': 'new desc', 'options': {}, - 'enabled': True, + 'is_enabled': True, } - self.domains_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_domain.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -114,7 +102,7 @@ def test_domain_create_enable(self): self.domain.name, ] verifylist = [ - ('enable', True), + ('is_enabled', True), ('name', self.domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -129,11 +117,9 @@ def test_domain_create_enable(self): 'name': self.domain.name, 'description': None, 'options': {}, - 'enabled': True, + 'is_enabled': True, } - self.domains_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_domain.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -144,7 +130,7 @@ def test_domain_create_disable(self): self.domain.name, ] verifylist = [ - ('disable', True), + ('is_enabled', False), ('name', self.domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -159,11 +145,9 @@ def test_domain_create_disable(self): 'name': self.domain.name, 'description': None, 'options': {}, - 'enabled': False, + 'is_enabled': False, } - self.domains_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_domain.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -189,11 +173,9 @@ def test_domain_create_with_immutable(self): 'name': self.domain.name, 'description': None, 'options': {'immutable': True}, - 'enabled': True, + 'is_enabled': True, } - self.domains_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_domain.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -204,7 +186,7 @@ def test_domain_create_with_no_immutable(self): self.domain.name, ] verifylist = [ - ('no_immutable', True), + ('immutable', False), ('name', self.domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -219,26 +201,23 @@ def test_domain_create_with_no_immutable(self): 'name': self.domain.name, 'description': None, 'options': {'immutable': False}, - 'enabled': True, + 'is_enabled': True, } - self.domains_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_domain.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) -class TestDomainDelete(TestDomain): - - domain = identity_fakes.FakeDomain.create_one_domain() +class TestDomainDelete(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestDomainDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() - self.domains_mock.get.return_value = self.domain - self.domains_mock.delete.return_value = None + self.identity_sdk_client.find_domain.return_value = self.domain + self.identity_sdk_client.delete_domain.return_value = None # Get the command object to test self.cmd = domain.DeleteDomain(self.app, None) @@ -254,20 +233,35 @@ def test_domain_delete(self): result = self.cmd.take_action(parsed_args) - self.domains_mock.delete.assert_called_with( + self.identity_sdk_client.delete_domain.assert_called_with( self.domain.id, ) self.assertIsNone(result) -class TestDomainList(TestDomain): - - domain = identity_fakes.FakeDomain.create_one_domain() +class TestDomainList(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource( + resource_type=_domain.Domain, is_enabled=True + ) + columns = ( + 'ID', + 'Name', + 'Enabled', + 'Description', + ) def setUp(self): - super(TestDomainList, self).setUp() + super().setUp() - self.domains_mock.list.return_value = [self.domain] + self.identity_sdk_client.domains.return_value = [self.domain] + self.datalist = ( + ( + self.domain.id, + self.domain.name, + self.domain.is_enabled, + self.domain.description, + ), + ) # Get the command object to test self.cmd = domain.ListDomain(self.app, None) @@ -281,24 +275,14 @@ def test_domain_list_no_options(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.domains_mock.list.assert_called_with() + self.identity_sdk_client.domains.assert_called_with() - collist = ('ID', 'Name', 'Enabled', 'Description') - self.assertEqual(collist, columns) - datalist = (( - self.domain.id, - self.domain.name, - True, - self.domain.description, - ), ) - self.assertEqual(datalist, tuple(data)) + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, tuple(data)) def test_domain_list_with_option_name(self): - arglist = ['--name', - self.domain.name] - verifylist = [ - ('name', self.domain.name) - ] + arglist = ['--name', self.domain.name] + verifylist = [('name', self.domain.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() @@ -306,26 +290,15 @@ def test_domain_list_with_option_name(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - kwargs = { - 'name': self.domain.name - } - self.domains_mock.list.assert_called_with(**kwargs) + kwargs = {'name': self.domain.name} + self.identity_sdk_client.domains.assert_called_with(**kwargs) - collist = ('ID', 'Name', 'Enabled', 'Description') - self.assertEqual(collist, columns) - datalist = (( - self.domain.id, - self.domain.name, - True, - self.domain.description, - ), ) - self.assertEqual(datalist, tuple(data)) + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, tuple(data)) def test_domain_list_with_option_enabled(self): arglist = ['--enabled'] - verifylist = [ - ('enabled', True) - ] + verifylist = [('is_enabled', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() @@ -333,32 +306,22 @@ def test_domain_list_with_option_enabled(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - kwargs = { - 'enabled': True - } - self.domains_mock.list.assert_called_with(**kwargs) - - collist = ('ID', 'Name', 'Enabled', 'Description') - self.assertEqual(collist, columns) - datalist = (( - self.domain.id, - self.domain.name, - True, - self.domain.description, - ), ) - self.assertEqual(datalist, tuple(data)) + kwargs = {'is_enabled': True} + self.identity_sdk_client.domains.assert_called_with(**kwargs) + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, tuple(data)) -class TestDomainSet(TestDomain): - domain = identity_fakes.FakeDomain.create_one_domain() +class TestDomainSet(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestDomainSet, self).setUp() + super().setUp() - self.domains_mock.get.return_value = self.domain + self.identity_sdk_client.find_domain.return_value = self.domain - self.domains_mock.update.return_value = self.domain + self.identity_sdk_client.update_domain.return_value = self.domain # Get the command object to test self.cmd = domain.SetDomain(self.app, None) @@ -375,15 +338,15 @@ def test_domain_set_no_options(self): result = self.cmd.take_action(parsed_args) kwargs = {} - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) def test_domain_set_name(self): arglist = [ - '--name', 'qwerty', + '--name', + 'qwerty', self.domain.id, ] verifylist = [ @@ -398,15 +361,15 @@ def test_domain_set_name(self): kwargs = { 'name': 'qwerty', } - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) def test_domain_set_description(self): arglist = [ - '--description', 'new desc', + '--description', + 'new desc', self.domain.id, ] verifylist = [ @@ -421,9 +384,8 @@ def test_domain_set_description(self): kwargs = { 'description': 'new desc', } - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) @@ -433,7 +395,7 @@ def test_domain_set_enable(self): self.domain.id, ] verifylist = [ - ('enable', True), + ('is_enabled', True), ('domain', self.domain.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -442,11 +404,10 @@ def test_domain_set_enable(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, } - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) @@ -456,7 +417,7 @@ def test_domain_set_disable(self): self.domain.id, ] verifylist = [ - ('disable', True), + ('is_enabled', False), ('domain', self.domain.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -465,11 +426,10 @@ def test_domain_set_disable(self): # Set expected values kwargs = { - 'enabled': False, + 'is_enabled': False, } - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) @@ -490,9 +450,8 @@ def test_domain_set_immutable_option(self): kwargs = { 'options': {'immutable': True}, } - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) @@ -502,7 +461,7 @@ def test_domain_set_no_immutable_option(self): self.domain.id, ] verifylist = [ - ('no_immutable', True), + ('immutable', False), ('domain', self.domain.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -513,20 +472,34 @@ def test_domain_set_no_immutable_option(self): kwargs = { 'options': {'immutable': False}, } - self.domains_mock.update.assert_called_with( - self.domain.id, - **kwargs + self.identity_sdk_client.update_domain.assert_called_with( + self.domain.id, **kwargs ) self.assertIsNone(result) -class TestDomainShow(TestDomain): +class TestDomainShow(identity_fakes.TestIdentityv3): + columns = ( + 'id', + 'name', + 'enabled', + 'description', + 'options', + ) def setUp(self): - super(TestDomainShow, self).setUp() + super().setUp() + + self.domain = sdk_fakes.generate_fake_resource(_domain.Domain) + self.identity_sdk_client.find_domain.return_value = self.domain + self.datalist = ( + self.domain.id, + self.domain.name, + self.domain.is_enabled, + self.domain.description, + self.domain.options, + ) - self.domain = identity_fakes.FakeDomain.create_one_domain() - self.domains_mock.get.return_value = self.domain # Get the command object to test self.cmd = domain.ShowDomain(self.app, None) @@ -538,32 +511,17 @@ def test_domain_show(self): ('domain', self.domain.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'project': - {'domain': - {'id': 'd1', - 'name': 'd1' - } - } - } - } + self.identity_client.tokens.get_token_data.return_value = { + 'token': {'project': {'domain': {'id': 'd1', 'name': 'd1'}}} + } # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.domains_mock.get.assert_called_with( - self.domain.id, + self.identity_sdk_client.find_domain.assert_called_with( + self.domain.id, ignore_missing=False ) - collist = ('description', 'enabled', 'id', 'name', 'tags') - self.assertEqual(collist, columns) - datalist = ( - self.domain.description, - True, - self.domain.id, - self.domain.name, - self.domain.tags - ) - self.assertEqual(datalist, data) + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_endpoint.py b/openstackclient/tests/unit/identity/v3/test_endpoint.py index 62dcf58d5e..ba69317f5d 100644 --- a/openstackclient/tests/unit/identity/v3/test_endpoint.py +++ b/openstackclient/tests/unit/identity/v3/test_endpoint.py @@ -10,60 +10,68 @@ # License for the specific language governing permissions and limitations # under the License. +from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import endpoint as _endpoint +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import region as _region +from openstack.identity.v3 import service as _service +from openstack.test import fakes as sdk_fakes + from openstackclient.identity.v3 import endpoint from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes class TestEndpoint(identity_fakes.TestIdentityv3): - def setUp(self): - super(TestEndpoint, self).setUp() + super().setUp() # Get a shortcut to the EndpointManager Mock - self.endpoints_mock = self.app.client_manager.identity.endpoints + self.endpoints_mock = self.identity_client.endpoints self.endpoints_mock.reset_mock() - self.ep_filter_mock = ( - self.app.client_manager.identity.endpoint_filter - ) + self.ep_filter_mock = self.identity_client.endpoint_filter self.ep_filter_mock.reset_mock() # Get a shortcut to the ServiceManager Mock - self.services_mock = self.app.client_manager.identity.services + self.services_mock = self.identity_client.services self.services_mock.reset_mock() # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains self.domains_mock.reset_mock() # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() -class TestEndpointCreate(TestEndpoint): - - service = identity_fakes.FakeService.create_one_service() - +class TestEndpointCreate(identity_fakes.TestIdentityv3): columns = ( 'enabled', 'id', 'interface', 'region', + 'region_id', 'service_id', + 'url', 'service_name', 'service_type', - 'url', ) def setUp(self): - super(TestEndpointCreate, self).setUp() - - self.endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': self.service.id}) - self.endpoints_mock.create.return_value = self.endpoint + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.region = sdk_fakes.generate_fake_resource(_region.Region) + self.endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=self.service.id, + interface='admin', + region_id=self.region.id, + ) - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service + self.identity_sdk_client.create_endpoint.return_value = self.endpoint + self.identity_sdk_client.find_service.return_value = self.service + self.identity_sdk_client.get_region.return_value = self.region # Get the command object to test self.cmd = endpoint.CreateEndpoint(self.app, None) @@ -82,6 +90,9 @@ def test_endpoint_create_no_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + # Fake endpoints come with a region ID by default, so set it to None + setattr(self.endpoint, "region_id", None) + # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. @@ -89,27 +100,25 @@ def test_endpoint_create_no_options(self): # Set expected values kwargs = { - 'service': self.service.id, + 'service_id': self.service.id, 'url': self.endpoint.url, 'interface': self.endpoint.interface, - 'enabled': True, - 'region': None, + 'is_enabled': True, } - self.endpoints_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_endpoint.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( True, self.endpoint.id, self.endpoint.interface, - self.endpoint.region, + None, + None, self.service.id, + self.endpoint.url, self.service.name, self.service.type, - self.endpoint.url, ) self.assertEqual(datalist, data) @@ -118,14 +127,15 @@ def test_endpoint_create_region(self): self.service.id, self.endpoint.interface, self.endpoint.url, - '--region', self.endpoint.region, + '--region', + self.region.id, ] verifylist = [ ('enabled', True), ('service', self.service.id), ('interface', self.endpoint.interface), ('url', self.endpoint.url), - ('region', self.endpoint.region), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -136,27 +146,26 @@ def test_endpoint_create_region(self): # Set expected values kwargs = { - 'service': self.service.id, + 'service_id': self.service.id, 'url': self.endpoint.url, 'interface': self.endpoint.interface, - 'enabled': True, - 'region': self.endpoint.region, + 'is_enabled': True, + 'region_id': self.region.id, } - self.endpoints_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_endpoint.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( True, self.endpoint.id, self.endpoint.interface, - self.endpoint.region, + self.region.id, + self.region.id, self.service.id, + self.endpoint.url, self.service.name, self.service.type, - self.endpoint.url, ) self.assertEqual(datalist, data) @@ -165,7 +174,7 @@ def test_endpoint_create_enable(self): self.service.id, self.endpoint.interface, self.endpoint.url, - '--enable' + '--enable', ] verifylist = [ ('enabled', True), @@ -175,6 +184,9 @@ def test_endpoint_create_enable(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + # Fake endpoints come with a region ID by default, so set it to None + setattr(self.endpoint, "region_id", None) + # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. @@ -182,27 +194,25 @@ def test_endpoint_create_enable(self): # Set expected values kwargs = { - 'service': self.service.id, + 'service_id': self.service.id, 'url': self.endpoint.url, 'interface': self.endpoint.interface, - 'enabled': True, - 'region': None, + 'is_enabled': True, } - self.endpoints_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_endpoint.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( True, self.endpoint.id, self.endpoint.interface, - self.endpoint.region, + None, + None, self.service.id, + self.endpoint.url, self.service.name, self.service.type, - self.endpoint.url, ) self.assertEqual(datalist, data) @@ -221,6 +231,10 @@ def test_endpoint_create_disable(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + # Fake endpoints come with a region ID by default, so set it to None + setattr(self.endpoint, "region_id", None) + setattr(self.endpoint, "is_enabled", False) + # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. @@ -228,41 +242,37 @@ def test_endpoint_create_disable(self): # Set expected values kwargs = { - 'service': self.service.id, + 'service_id': self.service.id, 'url': self.endpoint.url, 'interface': self.endpoint.interface, - 'enabled': False, - 'region': None, + 'is_enabled': False, } - self.endpoints_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_endpoint.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( - True, + False, self.endpoint.id, self.endpoint.interface, - self.endpoint.region, + None, + None, self.service.id, + self.endpoint.url, self.service.name, self.service.type, - self.endpoint.url, ) self.assertEqual(datalist, data) -class TestEndpointDelete(TestEndpoint): - - endpoint = identity_fakes.FakeEndpoint.create_one_endpoint() - +class TestEndpointDelete(identity_fakes.TestIdentityv3): def setUp(self): - super(TestEndpointDelete, self).setUp() + super().setUp() - # This is the return value for utils.find_resource(endpoint) - self.endpoints_mock.get.return_value = self.endpoint - self.endpoints_mock.delete.return_value = None + self.endpoint = sdk_fakes.generate_fake_resource(_endpoint.Endpoint) + + self.identity_sdk_client.find_endpoint.return_value = self.endpoint + self.identity_sdk_client.delete_endpoint.return_value = None # Get the command object to test self.cmd = endpoint.DeleteEndpoint(self.app, None) @@ -278,18 +288,13 @@ def test_endpoint_delete(self): result = self.cmd.take_action(parsed_args) - self.endpoints_mock.delete.assert_called_with( + self.identity_sdk_client.delete_endpoint.assert_called_with( self.endpoint.id, ) self.assertIsNone(result) -class TestEndpointList(TestEndpoint): - - service = identity_fakes.FakeService.create_one_service() - endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) - +class TestEndpointList(identity_fakes.TestIdentityv3): columns = ( 'ID', 'Region', @@ -301,13 +306,21 @@ class TestEndpointList(TestEndpoint): ) def setUp(self): - super(TestEndpointList, self).setUp() - - self.endpoints_mock.list.return_value = [self.endpoint] + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.region = sdk_fakes.generate_fake_resource(_region.Region) + self.endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=self.service.id, + interface='admin', + region_id=self.region.id, + ) - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service - self.services_mock.list.return_value = [self.service] + self.identity_sdk_client.endpoints.return_value = [self.endpoint] + self.identity_sdk_client.find_service.return_value = self.service + self.identity_sdk_client.services.return_value = [self.service] + self.identity_sdk_client.get_region.return_value = self.region # Get the command object to test self.cmd = endpoint.ListEndpoint(self.app, None) @@ -321,13 +334,13 @@ def test_endpoint_list_no_options(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.endpoints_mock.list.assert_called_with() + self.identity_sdk_client.endpoints.assert_called_with() self.assertEqual(self.columns, columns) datalist = ( ( self.endpoint.id, - self.endpoint.region, + self.region.id, self.service.name, self.service.type, True, @@ -339,7 +352,8 @@ def test_endpoint_list_no_options(self): def test_endpoint_list_service(self): arglist = [ - '--service', self.service.id, + '--service', + self.service.id, ] verifylist = [ ('service', self.service.id), @@ -353,15 +367,15 @@ def test_endpoint_list_service(self): # Set expected values kwargs = { - 'service': self.service.id, + 'service_id': self.service.id, } - self.endpoints_mock.list.assert_called_with(**kwargs) + self.identity_sdk_client.endpoints.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( ( self.endpoint.id, - self.endpoint.region, + self.region.id, self.service.name, self.service.type, True, @@ -373,7 +387,8 @@ def test_endpoint_list_service(self): def test_endpoint_list_interface(self): arglist = [ - '--interface', self.endpoint.interface, + '--interface', + self.endpoint.interface, ] verifylist = [ ('interface', self.endpoint.interface), @@ -389,13 +404,13 @@ def test_endpoint_list_interface(self): kwargs = { 'interface': self.endpoint.interface, } - self.endpoints_mock.list.assert_called_with(**kwargs) + self.identity_sdk_client.endpoints.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( ( self.endpoint.id, - self.endpoint.region, + self.region.id, self.service.name, self.service.type, True, @@ -407,10 +422,11 @@ def test_endpoint_list_interface(self): def test_endpoint_list_region(self): arglist = [ - '--region', self.endpoint.region, + '--region', + self.region.id, ] verifylist = [ - ('region', self.endpoint.region), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -421,15 +437,15 @@ def test_endpoint_list_region(self): # Set expected values kwargs = { - 'region': self.endpoint.region, + 'region_id': self.region.id, } - self.endpoints_mock.list.assert_called_with(**kwargs) + self.identity_sdk_client.endpoints.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( ( self.endpoint.id, - self.endpoint.region, + self.region.id, self.service.name, self.service.type, True, @@ -440,18 +456,15 @@ def test_endpoint_list_region(self): self.assertEqual(datalist, tuple(data)) def test_endpoint_list_project_with_project_domain(self): - project = identity_fakes.FakeProject.create_one_project() - domain = identity_fakes.FakeDomain.create_one_domain() + project = sdk_fakes.generate_fake_resource(_project.Project) + domain = sdk_fakes.generate_fake_resource(_domain.Domain) - self.ep_filter_mock.list_endpoints_for_project.return_value = [ + self.identity_sdk_client.project_endpoints.return_value = [ self.endpoint ] - self.projects_mock.get.return_value = project + self.identity_sdk_client.find_project.return_value = project - arglist = [ - '--project', project.name, - '--project-domain', domain.name - ] + arglist = ['--project', project.name, '--project-domain', domain.name] verifylist = [ ('project', project.name), ('project_domain', domain.name), @@ -462,7 +475,7 @@ def test_endpoint_list_project_with_project_domain(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.ep_filter_mock.list_endpoints_for_project.assert_called_with( + self.identity_sdk_client.project_endpoints.assert_called_with( project=project.id ) @@ -470,7 +483,7 @@ def test_endpoint_list_project_with_project_domain(self): datalist = ( ( self.endpoint.id, - self.endpoint.region, + self.region.id, self.service.name, self.service.type, True, @@ -481,22 +494,20 @@ def test_endpoint_list_project_with_project_domain(self): self.assertEqual(datalist, tuple(data)) -class TestEndpointSet(TestEndpoint): - - service = identity_fakes.FakeService.create_one_service() - endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) - +class TestEndpointSet(identity_fakes.TestIdentityv3): def setUp(self): - super(TestEndpointSet, self).setUp() - - # This is the return value for utils.find_resource(endpoint) - self.endpoints_mock.get.return_value = self.endpoint + super().setUp() - self.endpoints_mock.update.return_value = self.endpoint + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=self.service.id, + interface='admin', + ) - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service + self.identity_sdk_client.find_endpoint.return_value = self.endpoint + self.identity_sdk_client.update_endpoint.return_value = self.endpoint + self.identity_sdk_client.find_service.return_value = self.service # Get the command object to test self.cmd = endpoint.SetEndpoint(self.app, None) @@ -512,24 +523,13 @@ def test_endpoint_set_no_options(self): result = self.cmd.take_action(parsed_args) - kwargs = { - 'enabled': None, - 'interface': None, - 'region': None, - 'service': None, - 'url': None, - } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id ) self.assertIsNone(result) def test_endpoint_set_interface(self): - arglist = [ - '--interface', 'public', - self.endpoint.id - ] + arglist = ['--interface', 'public', self.endpoint.id] verifylist = [ ('interface', 'public'), ('endpoint', self.endpoint.id), @@ -540,23 +540,15 @@ def test_endpoint_set_interface(self): # Set expected values kwargs = { - 'enabled': None, 'interface': 'public', - 'url': None, - 'region': None, - 'service': None, } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_url(self): - arglist = [ - '--url', 'http://localhost:5000', - self.endpoint.id - ] + arglist = ['--url', 'http://localhost:5000', self.endpoint.id] verifylist = [ ('url', 'http://localhost:5000'), ('endpoint', self.endpoint.id), @@ -567,23 +559,15 @@ def test_endpoint_set_url(self): # Set expected values kwargs = { - 'enabled': None, - 'interface': None, 'url': 'http://localhost:5000', - 'region': None, - 'service': None, } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_service(self): - arglist = [ - '--service', self.service.id, - self.endpoint.id - ] + arglist = ['--service', self.service.id, self.endpoint.id] verifylist = [ ('service', self.service.id), ('endpoint', self.endpoint.id), @@ -594,23 +578,15 @@ def test_endpoint_set_service(self): # Set expected values kwargs = { - 'enabled': None, - 'interface': None, - 'url': None, - 'region': None, - 'service': self.service.id, + 'service_id': self.service.id, } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_region(self): - arglist = [ - '--region', 'e-rzzz', - self.endpoint.id - ] + arglist = ['--region', 'e-rzzz', self.endpoint.id] verifylist = [ ('region', 'e-rzzz'), ('endpoint', self.endpoint.id), @@ -621,23 +597,15 @@ def test_endpoint_set_region(self): # Set expected values kwargs = { - 'enabled': None, - 'interface': None, - 'url': None, - 'region': 'e-rzzz', - 'service': None, + 'region_id': 'e-rzzz', } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_enable(self): - arglist = [ - '--enable', - self.endpoint.id - ] + arglist = ['--enable', self.endpoint.id] verifylist = [ ('enabled', True), ('endpoint', self.endpoint.id), @@ -648,23 +616,15 @@ def test_endpoint_set_enable(self): # Set expected values kwargs = { - 'enabled': True, - 'interface': None, - 'url': None, - 'region': None, - 'service': None, + 'is_enabled': True, } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id, **kwargs ) self.assertIsNone(result) def test_endpoint_set_disable(self): - arglist = [ - '--disable', - self.endpoint.id - ] + arglist = ['--disable', self.endpoint.id] verifylist = [ ('disabled', True), ('endpoint', self.endpoint.id), @@ -675,32 +635,31 @@ def test_endpoint_set_disable(self): # Set expected values kwargs = { - 'enabled': False, - 'interface': None, - 'url': None, - 'region': None, - 'service': None, + 'is_enabled': False, } - self.endpoints_mock.update.assert_called_with( - self.endpoint.id, - **kwargs + self.identity_sdk_client.update_endpoint.assert_called_with( + self.endpoint.id, **kwargs ) self.assertIsNone(result) -class TestEndpointShow(TestEndpoint): - - service = identity_fakes.FakeService.create_one_service() - endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) - +class TestEndpointShow(identity_fakes.TestIdentityv3): def setUp(self): - super(TestEndpointShow, self).setUp() + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.region = sdk_fakes.generate_fake_resource(_region.Region) + self.endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=self.service.id, + interface='admin', + region_id=self.region.id, + ) - self.endpoints_mock.get.return_value = self.endpoint + self.identity_sdk_client.find_endpoint.return_value = self.endpoint - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service + self.identity_sdk_client.find_service.return_value = self.service + self.identity_sdk_client.get_region.return_value = self.region # Get the command object to test self.cmd = endpoint.ShowEndpoint(self.app, None) @@ -718,8 +677,8 @@ def test_endpoint_show(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.endpoints_mock.get.assert_called_with( - self.endpoint.id, + self.identity_sdk_client.find_endpoint.assert_called_with( + self.endpoint.id, ignore_missing=False ) collist = ( @@ -727,99 +686,101 @@ def test_endpoint_show(self): 'id', 'interface', 'region', + 'region_id', 'service_id', + 'url', 'service_name', 'service_type', - 'url', ) self.assertEqual(collist, columns) datalist = ( True, self.endpoint.id, self.endpoint.interface, - self.endpoint.region, + self.region.id, + self.region.id, self.service.id, + self.endpoint.url, self.service.name, self.service.type, - self.endpoint.url, ) self.assertEqual(datalist, data) class TestEndpointCreateServiceWithoutName(TestEndpointCreate): - - service = identity_fakes.FakeService.create_one_service( - attrs={'service_name': ''}) + service = sdk_fakes.generate_fake_resource( + resource_type=_service.Service, + name='', + ) + region = sdk_fakes.generate_fake_resource(_region.Region) + endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=service.id, + interface='admin', + region_id=region.id, + ) def setUp(self): - super(TestEndpointCreate, self).setUp() - - self.endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': self.service.id}) - - self.endpoints_mock.create.return_value = self.endpoint - - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service + super().setUp() # Get the command object to test self.cmd = endpoint.CreateEndpoint(self.app, None) class TestEndpointListServiceWithoutName(TestEndpointList): - - service = identity_fakes.FakeService.create_one_service( - attrs={'service_name': ''}) - endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) + service = sdk_fakes.generate_fake_resource( + resource_type=_service.Service, + name='', + ) + region = sdk_fakes.generate_fake_resource(_region.Region) + endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=service.id, + interface='admin', + region_id=region.id, + ) def setUp(self): - super(TestEndpointList, self).setUp() - - self.endpoints_mock.list.return_value = [self.endpoint] - - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service - self.services_mock.list.return_value = [self.service] + super().setUp() # Get the command object to test self.cmd = endpoint.ListEndpoint(self.app, None) class TestEndpointShowServiceWithoutName(TestEndpointShow): - - service = identity_fakes.FakeService.create_one_service( - attrs={'service_name': ''}) - endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) + service = sdk_fakes.generate_fake_resource( + resource_type=_service.Service, + name='', + ) + region = sdk_fakes.generate_fake_resource(_region.Region) + endpoint = sdk_fakes.generate_fake_resource( + resource_type=_endpoint.Endpoint, + service_id=service.id, + interface='admin', + region_id=region.id, + ) def setUp(self): - super(TestEndpointShow, self).setUp() - - self.endpoints_mock.get.return_value = self.endpoint - - # This is the return value for common.find_resource(service) - self.services_mock.get.return_value = self.service + super().setUp() # Get the command object to test self.cmd = endpoint.ShowEndpoint(self.app, None) class TestAddProjectToEndpoint(TestEndpoint): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() service = identity_fakes.FakeService.create_one_service() endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) + attrs={'service_id': service.id} + ) new_ep_filter = identity_fakes.FakeEndpoint.create_one_endpoint_filter( - attrs={'endpoint': endpoint.id, - 'project': project.id} + attrs={'endpoint': endpoint.id, 'project': project.id} ) def setUp(self): - super(TestAddProjectToEndpoint, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.endpoints_mock.get.return_value = self.endpoint @@ -844,8 +805,7 @@ def test_add_project_to_endpoint_no_option(self): result = self.cmd.take_action(parsed_args) self.ep_filter_mock.add_endpoint_to_project.assert_called_with( - project=self.project.id, - endpoint=self.endpoint.id + project=self.project.id, endpoint=self.endpoint.id ) self.assertIsNone(result) @@ -853,7 +813,8 @@ def test_add_project_to_endpoint_with_option(self): arglist = [ self.endpoint.id, self.project.id, - '--project-domain', self.domain.id, + '--project-domain', + self.domain.id, ] verifylist = [ ('endpoint', self.endpoint.id), @@ -864,22 +825,21 @@ def test_add_project_to_endpoint_with_option(self): result = self.cmd.take_action(parsed_args) self.ep_filter_mock.add_endpoint_to_project.assert_called_with( - project=self.project.id, - endpoint=self.endpoint.id + project=self.project.id, endpoint=self.endpoint.id ) self.assertIsNone(result) class TestRemoveProjectEndpoint(TestEndpoint): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() service = identity_fakes.FakeService.create_one_service() endpoint = identity_fakes.FakeEndpoint.create_one_endpoint( - attrs={'service_id': service.id}) + attrs={'service_id': service.id} + ) def setUp(self): - super(TestRemoveProjectEndpoint, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.endpoints_mock.get.return_value = self.endpoint @@ -914,7 +874,8 @@ def test_remove_project_endpoint_with_options(self): arglist = [ self.endpoint.id, self.project.id, - '--project-domain', self.domain.id, + '--project-domain', + self.domain.id, ] verifylist = [ ('endpoint', self.endpoint.id), diff --git a/openstackclient/tests/unit/identity/v3/test_endpoint_group.py b/openstackclient/tests/unit/identity/v3/test_endpoint_group.py index c081fa1f11..e739a7d92a 100644 --- a/openstackclient/tests/unit/identity/v3/test_endpoint_group.py +++ b/openstackclient/tests/unit/identity/v3/test_endpoint_group.py @@ -18,35 +18,29 @@ class TestEndpointGroup(identity_fakes.TestIdentityv3): - def setUp(self): - super(TestEndpointGroup, self).setUp() + super().setUp() # Get a shortcut to the EndpointManager Mock - self.endpoint_groups_mock = ( - self.app.client_manager.identity.endpoint_groups - ) + self.endpoint_groups_mock = self.identity_client.endpoint_groups self.endpoint_groups_mock.reset_mock() - self.epf_mock = ( - self.app.client_manager.identity.endpoint_filter - ) + self.epf_mock = self.identity_client.endpoint_filter self.epf_mock.reset_mock() # Get a shortcut to the ServiceManager Mock - self.services_mock = self.app.client_manager.identity.services + self.services_mock = self.identity_client.services self.services_mock.reset_mock() # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains self.domains_mock.reset_mock() # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() class TestEndpointGroupCreate(TestEndpointGroup): - columns = ( 'description', 'filters', @@ -55,11 +49,13 @@ class TestEndpointGroupCreate(TestEndpointGroup): ) def setUp(self): - super(TestEndpointGroupCreate, self).setUp() + super().setUp() self.endpoint_group = ( identity_fakes.FakeEndpointGroup.create_one_endpointgroup( - attrs={'filters': identity_fakes.endpoint_group_filters})) + attrs={'filters': identity_fakes.endpoint_group_filters} + ) + ) self.endpoint_groups_mock.create.return_value = self.endpoint_group @@ -68,7 +64,8 @@ def setUp(self): def test_endpointgroup_create_no_options(self): arglist = [ - '--description', self.endpoint_group.description, + '--description', + self.endpoint_group.description, self.endpoint_group.name, identity_fakes.endpoint_group_file_path, ] @@ -81,8 +78,11 @@ def test_endpointgroup_create_no_options(self): mocker = mock.Mock() mocker.return_value = identity_fakes.endpoint_group_filters - with mock.patch("openstackclient.identity.v3.endpoint_group." - "CreateEndpointGroup._read_filters", mocker): + with mock.patch( + "openstackclient.identity.v3.endpoint_group." + "CreateEndpointGroup._read_filters", + mocker, + ): columns, data = self.cmd.take_action(parsed_args) # Set expected values @@ -92,9 +92,7 @@ def test_endpointgroup_create_no_options(self): 'description': self.endpoint_group.description, } - self.endpoint_groups_mock.create.assert_called_with( - **kwargs - ) + self.endpoint_groups_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( @@ -107,12 +105,12 @@ def test_endpointgroup_create_no_options(self): class TestEndpointGroupDelete(TestEndpointGroup): - endpoint_group = ( - identity_fakes.FakeEndpointGroup.create_one_endpointgroup()) + identity_fakes.FakeEndpointGroup.create_one_endpointgroup() + ) def setUp(self): - super(TestEndpointGroupDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource(endpoint) self.endpoint_groups_mock.get.return_value = self.endpoint_group @@ -139,9 +137,9 @@ def test_endpointgroup_delete(self): class TestEndpointGroupList(TestEndpointGroup): - endpoint_group = ( - identity_fakes.FakeEndpointGroup.create_one_endpointgroup()) + identity_fakes.FakeEndpointGroup.create_one_endpointgroup() + ) project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() @@ -152,14 +150,16 @@ class TestEndpointGroupList(TestEndpointGroup): ) def setUp(self): - super(TestEndpointGroupList, self).setUp() + super().setUp() self.endpoint_groups_mock.list.return_value = [self.endpoint_group] self.endpoint_groups_mock.get.return_value = self.endpoint_group self.epf_mock.list_projects_for_endpoint_group.return_value = [ - self.project] + self.project + ] self.epf_mock.list_endpoint_groups_for_project.return_value = [ - self.endpoint_group] + self.endpoint_group + ] # Get the command object to test self.cmd = endpoint_group.ListEndpointGroup(self.app, None) @@ -187,7 +187,8 @@ def test_endpoint_group_list_no_options(self): def test_endpoint_group_list_projects_by_endpoint_group(self): arglist = [ - '--endpointgroup', self.endpoint_group.id, + '--endpointgroup', + self.endpoint_group.id, ] verifylist = [ ('endpointgroup', self.endpoint_group.id), @@ -219,8 +220,10 @@ def test_endpoint_group_list_by_project(self): self.projects_mock.get.return_value = self.project arglist = [ - '--project', self.project.name, - '--domain', self.domain.name + '--project', + self.project.name, + '--domain', + self.domain.name, ] verifylist = [ ('project', self.project.name), @@ -248,12 +251,12 @@ def test_endpoint_group_list_by_project(self): class TestEndpointGroupSet(TestEndpointGroup): - endpoint_group = ( - identity_fakes.FakeEndpointGroup.create_one_endpointgroup()) + identity_fakes.FakeEndpointGroup.create_one_endpointgroup() + ) def setUp(self): - super(TestEndpointGroupSet, self).setUp() + super().setUp() # This is the return value for utils.find_resource(endpoint) self.endpoint_groups_mock.get.return_value = self.endpoint_group @@ -274,22 +277,14 @@ def test_endpoint_group_set_no_options(self): result = self.cmd.take_action(parsed_args) - kwargs = { - 'name': None, - 'filters': None, - 'description': '' - } + kwargs = {'name': None, 'filters': None, 'description': ''} self.endpoint_groups_mock.update.assert_called_with( - self.endpoint_group.id, - **kwargs + self.endpoint_group.id, **kwargs ) self.assertIsNone(result) def test_endpoint_group_set_name(self): - arglist = [ - '--name', 'qwerty', - self.endpoint_group.id - ] + arglist = ['--name', 'qwerty', self.endpoint_group.id] verifylist = [ ('name', 'qwerty'), ('endpointgroup', self.endpoint_group.id), @@ -299,20 +294,16 @@ def test_endpoint_group_set_name(self): result = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - 'name': 'qwerty', - 'filters': None, - 'description': '' - } + kwargs = {'name': 'qwerty', 'filters': None, 'description': ''} self.endpoint_groups_mock.update.assert_called_with( - self.endpoint_group.id, - **kwargs + self.endpoint_group.id, **kwargs ) self.assertIsNone(result) def test_endpoint_group_set_filters(self): arglist = [ - '--filters', identity_fakes.endpoint_group_file_path, + '--filters', + identity_fakes.endpoint_group_file_path, self.endpoint_group.id, ] verifylist = [ @@ -324,8 +315,11 @@ def test_endpoint_group_set_filters(self): mocker = mock.Mock() mocker.return_value = identity_fakes.endpoint_group_filters_2 - with mock.patch("openstackclient.identity.v3.endpoint_group." - "SetEndpointGroup._read_filters", mocker): + with mock.patch( + "openstackclient.identity.v3.endpoint_group." + "SetEndpointGroup._read_filters", + mocker, + ): result = self.cmd.take_action(parsed_args) # Set expected values @@ -336,17 +330,13 @@ def test_endpoint_group_set_filters(self): } self.endpoint_groups_mock.update.assert_called_with( - self.endpoint_group.id, - **kwargs + self.endpoint_group.id, **kwargs ) self.assertIsNone(result) def test_endpoint_group_set_description(self): - arglist = [ - '--description', 'qwerty', - self.endpoint_group.id - ] + arglist = ['--description', 'qwerty', self.endpoint_group.id] verifylist = [ ('description', 'qwerty'), ('endpointgroup', self.endpoint_group.id), @@ -362,26 +352,26 @@ def test_endpoint_group_set_description(self): 'description': 'qwerty', } self.endpoint_groups_mock.update.assert_called_with( - self.endpoint_group.id, - **kwargs + self.endpoint_group.id, **kwargs ) self.assertIsNone(result) class TestAddProjectToEndpointGroup(TestEndpointGroup): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() endpoint_group = ( - identity_fakes.FakeEndpointGroup.create_one_endpointgroup()) + identity_fakes.FakeEndpointGroup.create_one_endpointgroup() + ) new_ep_filter = ( identity_fakes.FakeEndpointGroup.create_one_endpointgroup_filter( - attrs={'endpointgroup': endpoint_group.id, - 'project': project.id})) + attrs={'endpointgroup': endpoint_group.id, 'project': project.id} + ) + ) def setUp(self): - super(TestAddProjectToEndpointGroup, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.endpoint_groups_mock.get.return_value = self.endpoint_group @@ -416,7 +406,8 @@ def test_add_project_to_endpoint_with_option(self): arglist = [ self.endpoint_group.id, self.project.id, - '--project-domain', self.domain.id, + '--project-domain', + self.domain.id, ] verifylist = [ ('endpointgroup', self.endpoint_group.id), @@ -434,14 +425,14 @@ def test_add_project_to_endpoint_with_option(self): class TestRemoveProjectEndpointGroup(TestEndpointGroup): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() endpoint_group = ( - identity_fakes.FakeEndpointGroup.create_one_endpointgroup()) + identity_fakes.FakeEndpointGroup.create_one_endpointgroup() + ) def setUp(self): - super(TestRemoveProjectEndpointGroup, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.endpoint_groups_mock.get.return_value = self.endpoint_group @@ -452,7 +443,8 @@ def setUp(self): # Get the command object to test self.cmd = endpoint_group.RemoveProjectFromEndpointGroup( - self.app, None) + self.app, None + ) def test_remove_project_endpoint_no_options(self): arglist = [ @@ -477,7 +469,8 @@ def test_remove_project_endpoint_with_options(self): arglist = [ self.endpoint_group.id, self.project.id, - '--project-domain', self.domain.id, + '--project-domain', + self.domain.id, ] verifylist = [ ('endpointgroup', self.endpoint_group.id), diff --git a/openstackclient/tests/unit/identity/v3/test_group.py b/openstackclient/tests/unit/identity/v3/test_group.py index 04ba0dbe03..598402e070 100644 --- a/openstackclient/tests/unit/identity/v3/test_group.py +++ b/openstackclient/tests/unit/identity/v3/test_group.py @@ -14,48 +14,33 @@ from unittest import mock from unittest.mock import call -from keystoneauth1 import exceptions as ks_exc +from openstack import exceptions as sdk_exc +from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import group as _group +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions -from osc_lib import utils from openstackclient.identity.v3 import group from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestGroup(identity_fakes.TestIdentityv3): - +class TestGroupAddUser(identity_fakes.TestIdentityv3): def setUp(self): - super(TestGroup, self).setUp() - - # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains - self.domains_mock.reset_mock() - - # Get a shortcut to the GroupManager Mock - self.groups_mock = self.app.client_manager.identity.groups - self.groups_mock.reset_mock() - - # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - + super().setUp() -class TestGroupAddUser(TestGroup): - - _group = identity_fakes.FakeGroup.create_one_group() - users = identity_fakes.FakeUser.create_users(count=2) - - def setUp(self): - super(TestGroupAddUser, self).setUp() + self._group = sdk_fakes.generate_fake_resource(_group.Group) + self.users = tuple( + sdk_fakes.generate_fake_resources(_user.User, count=2) + ) - self.groups_mock.get.return_value = self._group - self.users_mock.get = ( - identity_fakes.FakeUser.get_users(self.users)) - self.users_mock.add_to_group.return_value = None + self.identity_sdk_client.find_group.return_value = self._group + self.identity_sdk_client.add_user_to_group.return_value = None self.cmd = group.AddUserToGroup(self.app, None) def test_group_add_user(self): + self.identity_sdk_client.find_user.return_value = self.users[0] arglist = [ self._group.name, self.users[0].name, @@ -67,11 +52,16 @@ def test_group_add_user(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.users_mock.add_to_group.assert_called_once_with( - self.users[0].id, self._group.id) + self.identity_sdk_client.add_user_to_group.assert_called_once_with( + self.users[0].id, self._group.id + ) self.assertIsNone(result) def test_group_add_multi_users(self): + self.identity_sdk_client.find_user.side_effect = [ + self.users[0], + self.users[1], + ] arglist = [ self._group.name, self.users[0].name, @@ -84,15 +74,19 @@ def test_group_add_multi_users(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [call(self.users[0].id, self._group.id), - call(self.users[1].id, self._group.id)] - self.users_mock.add_to_group.assert_has_calls(calls) + calls = [ + call(self.users[0].id, self._group.id), + call(self.users[1].id, self._group.id), + ] + self.identity_sdk_client.add_user_to_group.assert_has_calls(calls) self.assertIsNone(result) @mock.patch.object(group.LOG, 'error') def test_group_add_user_with_error(self, mock_error): - self.users_mock.add_to_group.side_effect = [ - exceptions.CommandError(), None] + self.identity_sdk_client.add_user_to_group.side_effect = [ + sdk_exc.ResourceNotFound, + None, + ] arglist = [ self._group.name, self.users[0].name, @@ -107,26 +101,22 @@ def test_group_add_user_with_error(self, mock_error): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - msg = "1 of 2 users not added to group %s." % self._group.name + msg = f"1 of 2 users not added to group {self._group.name}." self.assertEqual(msg, str(e)) - msg = ("%(user)s not added to group %(group)s: ") % { - 'user': self.users[0].name, - 'group': self._group.name, - } + msg = f"{self.users[0].name} not added to group {self._group.name}: {str(sdk_exc.ResourceNotFound())}" mock_error.assert_called_once_with(msg) -class TestGroupCheckUser(TestGroup): - - group = identity_fakes.FakeGroup.create_one_group() - user = identity_fakes.FakeUser.create_one_user() - +class TestGroupCheckUser(identity_fakes.TestIdentityv3): def setUp(self): - super(TestGroupCheckUser, self).setUp() + super().setUp() + + self.group = sdk_fakes.generate_fake_resource(_group.Group) + self.user = sdk_fakes.generate_fake_resource(_user.User) - self.groups_mock.get.return_value = self.group - self.users_mock.get.return_value = self.user - self.users_mock.check_in_group.return_value = None + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.check_user_in_group.return_value = True self.cmd = group.CheckUserInGroup(self.app, None) @@ -142,14 +132,15 @@ def test_group_check_user(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.users_mock.check_in_group.assert_called_once_with( - self.user.id, self.group.id) + self.identity_sdk_client.check_user_in_group.assert_called_once_with( + self.user.id, self.group.id + ) self.assertIsNone(result) def test_group_check_user_server_error(self): - def server_error(*args): - raise ks_exc.http.InternalServerError - self.users_mock.check_in_group.side_effect = server_error + self.identity_sdk_client.check_user_in_group.side_effect = ( + sdk_exc.SDKException + ) arglist = [ self.group.name, self.user.name, @@ -160,13 +151,13 @@ def server_error(*args): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(ks_exc.http.InternalServerError, - self.cmd.take_action, parsed_args) - + self.assertRaises( + sdk_exc.SDKException, self.cmd.take_action, parsed_args + ) -class TestGroupCreate(TestGroup): - domain = identity_fakes.FakeDomain.create_one_domain() +class TestGroupCreate(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) columns = ( 'description', @@ -176,23 +167,21 @@ class TestGroupCreate(TestGroup): ) def setUp(self): - super(TestGroupCreate, self).setUp() - self.group = identity_fakes.FakeGroup.create_one_group( - attrs={'domain_id': self.domain.id}) - self.data = ( - self.group.description, - self.group.domain_id, - self.group.id, - self.group.name, + super().setUp() + self.group = sdk_fakes.generate_fake_resource( + _group.Group, description=None, domain_id=None + ) + self.group_with_options = sdk_fakes.generate_fake_resource( + _group.Group, domain_id=self.domain.id ) - self.groups_mock.create.return_value = self.group - self.groups_mock.get.return_value = self.group - self.domains_mock.get.return_value = self.domain + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_domain.return_value = self.domain self.cmd = group.CreateGroup(self.app, None) def test_group_create(self): + self.identity_sdk_client.create_group.return_value = self.group arglist = [ self.group.name, ] @@ -202,38 +191,56 @@ def test_group_create(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.groups_mock.create.assert_called_once_with( + self.identity_sdk_client.create_group.assert_called_once_with( name=self.group.name, - domain=None, - description=None, ) self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) + datalist = ( + self.group.description, + None, + self.group.id, + self.group.name, + ) + self.assertEqual(datalist, data) def test_group_create_with_options(self): + self.identity_sdk_client.create_group.return_value = ( + self.group_with_options + ) arglist = [ - '--domain', self.domain.name, - '--description', self.group.description, - self.group.name, + '--domain', + self.domain.name, + '--description', + self.group_with_options.description, + self.group_with_options.name, ] verifylist = [ ('domain', self.domain.name), - ('description', self.group.description), - ('name', self.group.name), + ('description', self.group_with_options.description), + ('name', self.group_with_options.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.groups_mock.create.assert_called_once_with( - name=self.group.name, - domain=self.domain.id, - description=self.group.description, + self.identity_sdk_client.create_group.assert_called_once_with( + name=self.group_with_options.name, + domain_id=self.domain.id, + description=self.group_with_options.description, ) self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) + datalist = ( + self.group_with_options.description, + self.domain.id, + self.group_with_options.id, + self.group_with_options.name, + ) + self.assertEqual(datalist, data) def test_group_create_or_show(self): - self.groups_mock.create.side_effect = ks_exc.Conflict() + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.create_group.side_effect = ( + sdk_exc.ConflictException + ) arglist = [ '--or-show', self.group.name, @@ -245,47 +252,99 @@ def test_group_create_or_show(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.groups_mock.get.assert_called_once_with(self.group.name) + self.identity_sdk_client.find_group.assert_called_once_with( + self.group.name, ignore_missing=False + ) self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) + datalist = ( + self.group.description, + None, + self.group.id, + self.group.name, + ) + self.assertEqual(datalist, data) + def test_group_create_or_show_with_domain(self): + self.identity_sdk_client.find_group.return_value = ( + self.group_with_options + ) + self.identity_sdk_client.create_group.side_effect = ( + sdk_exc.ConflictException + ) + arglist = [ + '--or-show', + self.group_with_options.name, + '--domain', + self.domain.id, + ] + verifylist = [ + ('or_show', True), + ('name', self.group_with_options.name), + ('domain', self.domain.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.identity_sdk_client.find_group.assert_called_once_with( + self.group_with_options.name, + domain_id=self.domain.id, + ignore_missing=False, + ) + self.assertEqual(self.columns, columns) + datalist = ( + self.group_with_options.description, + self.domain.id, + self.group_with_options.id, + self.group_with_options.name, + ) + self.assertEqual(datalist, data) -class TestGroupDelete(TestGroup): - domain = identity_fakes.FakeDomain.create_one_domain() - groups = identity_fakes.FakeGroup.create_groups( - attrs={'domain_id': domain.id}, count=2) +class TestGroupDelete(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestGroupDelete, self).setUp() + super().setUp() - self.groups_mock.get = ( - identity_fakes.FakeGroup.get_groups(self.groups)) - self.groups_mock.delete.return_value = None - self.domains_mock.get.return_value = self.domain + self.group = sdk_fakes.generate_fake_resource( + _group.Group, + domain_id=None, + ) + self.group_with_domain = sdk_fakes.generate_fake_resource( + _group.Group, + name=self.group.name, + domain_id=self.domain.id, + ) + self.identity_sdk_client.delete_group.return_value = None + self.identity_sdk_client.find_domain.return_value = self.domain self.cmd = group.DeleteGroup(self.app, None) def test_group_delete(self): + self.identity_sdk_client.find_group.return_value = self.group arglist = [ - self.groups[0].id, + self.group.id, ] verifylist = [ - ('groups', [self.groups[0].id]), + ('groups', [self.group.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.groups_mock.get.assert_called_once_with(self.groups[0].id) - self.groups_mock.delete.assert_called_once_with(self.groups[0].id) + self.identity_sdk_client.find_group.assert_called_once_with( + name_or_id=self.group.id, ignore_missing=False + ) + self.identity_sdk_client.delete_group.assert_called_once_with( + self.group.id + ) self.assertIsNone(result) def test_group_multi_delete(self): - arglist = [] - verifylist = [] - - for g in self.groups: - arglist.append(g.id) + self.identity_sdk_client.find_group.side_effect = [ + self.group, + self.group_with_domain, + ] + arglist = [self.group.id, self.group_with_domain.id] verifylist = [ ('groups', arglist), ] @@ -293,39 +352,50 @@ def test_group_multi_delete(self): result = self.cmd.take_action(parsed_args) - calls = [] - for g in self.groups: - calls.append(call(g.id)) - self.groups_mock.delete.assert_has_calls(calls) + self.identity_sdk_client.delete_group.assert_has_calls( + [mock.call(self.group.id), mock.call(self.group_with_domain.id)] + ) self.assertIsNone(result) def test_group_delete_with_domain(self): - get_mock_result = [exceptions.CommandError, self.groups[0]] - self.groups_mock.get = ( - mock.Mock(side_effect=get_mock_result)) + self.identity_sdk_client.find_domain.side_effect = [ + sdk_exc.ForbiddenException + ] + self.identity_sdk_client.find_group.return_value = ( + self.group_with_domain + ) arglist = [ - '--domain', self.domain.id, - self.groups[0].id, + '--domain', + self.group_with_domain.domain_id, + self.group_with_domain.name, ] verifylist = [ - ('domain', self.groups[0].domain_id), - ('groups', [self.groups[0].id]), + ('domain', self.domain.id), + ('groups', [self.group_with_domain.name]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.groups_mock.get.assert_any_call( - self.groups[0].id, domain_id=self.domain.id) - self.groups_mock.delete.assert_called_once_with(self.groups[0].id) + self.identity_sdk_client.find_group.assert_called_with( + name_or_id=self.group_with_domain.name, + ignore_missing=False, + domain_id=self.domain.id, + ) + self.identity_sdk_client.delete_group.assert_called_once_with( + self.group_with_domain.id + ) self.assertIsNone(result) - @mock.patch.object(utils, 'find_resource') - def test_delete_multi_groups_with_exception(self, find_mock): - find_mock.side_effect = [self.groups[0], - exceptions.CommandError] + def test_delete_multi_groups_with_exception(self): + self.identity_sdk_client.find_group.side_effect = [ + self.group, + self.group_with_domain, + exceptions.CommandError, + ] arglist = [ - self.groups[0].id, + self.group.id, + self.group_with_domain.id, 'unexist_group', ] verifylist = [ @@ -337,47 +407,57 @@ def test_delete_multi_groups_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 groups failed to delete.', - str(e)) - - find_mock.assert_any_call(self.groups_mock, self.groups[0].id) - find_mock.assert_any_call(self.groups_mock, 'unexist_group') - - self.assertEqual(2, find_mock.call_count) - self.groups_mock.delete.assert_called_once_with(self.groups[0].id) + self.assertEqual('1 of 3 groups failed to delete.', str(e)) + + self.identity_sdk_client.find_group.assert_has_calls( + [ + mock.call(name_or_id=self.group.id, ignore_missing=False), + mock.call( + name_or_id=self.group_with_domain.id, ignore_missing=False + ), + mock.call(name_or_id='unexist_group', ignore_missing=False), + ] + ) + self.assertEqual(3, self.identity_sdk_client.find_group.call_count) + self.identity_sdk_client.delete_group.assert_has_calls( + [ + mock.call(self.group.id), + mock.call(self.group_with_domain.id), + ] + ) -class TestGroupList(TestGroup): - domain = identity_fakes.FakeDomain.create_one_domain() - group = identity_fakes.FakeGroup.create_one_group() - user = identity_fakes.FakeUser.create_one_user() +class TestGroupList(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) columns = ( 'ID', 'Name', ) - datalist = ( - ( - group.id, - group.name, - ), - ) def setUp(self): - super(TestGroupList, self).setUp() + super().setUp() - self.groups_mock.get.return_value = self.group - self.groups_mock.list.return_value = [self.group] - - self.domains_mock.get.return_value = self.domain + self.group = sdk_fakes.generate_fake_resource( + _group.Group, description=None, domain_id=None + ) + self.group_with_domain = sdk_fakes.generate_fake_resource( + _group.Group, domain_id=self.domain.id + ) + self.user = sdk_fakes.generate_fake_resource(_user.User) - self.users_mock.get.return_value = self.user + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.find_domain.return_value = self.domain # Get the command object to test self.cmd = group.ListGroup(self.app, None) def test_group_list_no_options(self): + self.identity_sdk_client.groups.return_value = [ + self.group, + self.group_with_domain, + ] arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -387,22 +467,26 @@ def test_group_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'domain': None, - 'user': None, - } - - self.groups_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.groups.assert_called_with() self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist, tuple(data)) + datalist = ( + ( + self.group.id, + self.group.name, + ), + ( + self.group_with_domain.id, + self.group_with_domain.name, + ), + ) + self.assertEqual(datalist, tuple(data)) def test_group_list_domain(self): + self.identity_sdk_client.groups.return_value = [self.group_with_domain] arglist = [ - '--domain', self.domain.id, + '--domain', + self.domain.id, ] verifylist = [ ('domain', self.domain.id), @@ -416,23 +500,51 @@ def test_group_list_domain(self): # Set expected values kwargs = { - 'domain': self.domain.id, - 'user': None, + 'domain_id': self.domain.id, } - self.groups_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.groups.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist, tuple(data)) + datalist = ((self.group_with_domain.id, self.group_with_domain.name),) + self.assertEqual(datalist, tuple(data)) def test_group_list_user(self): + self.identity_sdk_client.user_groups.return_value = [self.group] + arglist = [ + '--user', + self.user.name, + ] + verifylist = [ + ('user', self.user.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + self.identity_sdk_client.user_groups.assert_called_with(self.user.id) + + self.assertEqual(self.columns, columns) + + datalist = ((self.group.id, self.group.name),) + self.assertEqual(datalist, tuple(data)) + + def test_group_list_user_domain(self): + self.identity_sdk_client.user_groups.return_value = [ + self.group_with_domain + ] arglist = [ - '--user', self.user.name, + '--user', + self.user.name, + '--domain', + self.domain.name, ] verifylist = [ ('user', self.user.name), + ('domain', self.domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -443,18 +555,23 @@ def test_group_list_user(self): # Set expected values kwargs = { - 'domain': None, - 'user': self.user.id, + 'domain_id': self.domain.id, } - self.groups_mock.list.assert_called_with( - **kwargs + self.identity_sdk_client.user_groups.assert_called_with( + self.user.id, **kwargs ) self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist, tuple(data)) + + datalist = ((self.group_with_domain.id, self.group_with_domain.name),) + self.assertEqual(datalist, tuple(data)) def test_group_list_long(self): + self.identity_sdk_client.groups.return_value = [ + self.group, + self.group_with_domain, + ] arglist = [ '--long', ] @@ -468,46 +585,46 @@ def test_group_list_long(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - # Set expected values - kwargs = { - 'domain': None, - 'user': None, - } - - self.groups_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.groups.assert_called_with() - columns = self.columns + ( + long_columns = self.columns + ( 'Domain ID', 'Description', ) - datalist = (( - self.group.id, - self.group.name, - self.group.domain_id, - self.group.description, - ), ) - self.assertEqual(columns, columns) + datalist = ( + ( + self.group.id, + self.group.name, + self.group.domain_id, + self.group.description, + ), + ( + self.group_with_domain.id, + self.group_with_domain.name, + self.group_with_domain.domain_id, + self.group_with_domain.description, + ), + ) + self.assertEqual(long_columns, columns) self.assertEqual(datalist, tuple(data)) -class TestGroupRemoveUser(TestGroup): - - _group = identity_fakes.FakeGroup.create_one_group() - users = identity_fakes.FakeUser.create_users(count=2) - +class TestGroupRemoveUser(identity_fakes.TestIdentityv3): def setUp(self): - super(TestGroupRemoveUser, self).setUp() + super().setUp() + + self._group = sdk_fakes.generate_fake_resource(_group.Group) + self.users = tuple( + sdk_fakes.generate_fake_resources(_user.User, count=2) + ) - self.groups_mock.get.return_value = self._group - self.users_mock.get = ( - identity_fakes.FakeUser.get_users(self.users)) - self.users_mock.remove_from_group.return_value = None + self.identity_sdk_client.find_group.return_value = self._group + self.identity_sdk_client.remove_user_from_group.return_value = None self.cmd = group.RemoveUserFromGroup(self.app, None) def test_group_remove_user(self): + self.identity_sdk_client.find_user.return_value = self.users[0] arglist = [ self._group.id, self.users[0].id, @@ -519,11 +636,16 @@ def test_group_remove_user(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.users_mock.remove_from_group.assert_called_once_with( - self.users[0].id, self._group.id) + self.identity_sdk_client.remove_user_from_group.assert_called_once_with( + self.users[0].id, self._group.id + ) self.assertIsNone(result) def test_group_remove_multi_users(self): + self.identity_sdk_client.find_user.side_effect = [ + self.users[0], + self.users[1], + ] arglist = [ self._group.name, self.users[0].name, @@ -536,15 +658,19 @@ def test_group_remove_multi_users(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [call(self.users[0].id, self._group.id), - call(self.users[1].id, self._group.id)] - self.users_mock.remove_from_group.assert_has_calls(calls) + calls = [ + call(self.users[0].id, self._group.id), + call(self.users[1].id, self._group.id), + ] + self.identity_sdk_client.remove_user_from_group.assert_has_calls(calls) self.assertIsNone(result) @mock.patch.object(group.LOG, 'error') def test_group_remove_user_with_error(self, mock_error): - self.users_mock.remove_from_group.side_effect = [ - exceptions.CommandError(), None] + self.identity_sdk_client.remove_user_from_group.side_effect = [ + sdk_exc.ResourceNotFound(), + None, + ] arglist = [ self._group.id, self.users[0].id, @@ -559,31 +685,31 @@ def test_group_remove_user_with_error(self, mock_error): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - msg = "1 of 2 users not removed from group %s." % self._group.id + msg = f"1 of 2 users not removed from group {self._group.id}." self.assertEqual(msg, str(e)) - msg = ("%(user)s not removed from group %(group)s: ") % { - 'user': self.users[0].id, - 'group': self._group.id, - } + msg = f"{self.users[0].id} not removed from group {self._group.id}: {str(sdk_exc.ResourceNotFound())}" mock_error.assert_called_once_with(msg) -class TestGroupSet(TestGroup): - - domain = identity_fakes.FakeDomain.create_one_domain() - group = identity_fakes.FakeGroup.create_one_group( - attrs={'domain_id': domain.id}) +class TestGroupSet(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestGroupSet, self).setUp() + super().setUp() + self.group = sdk_fakes.generate_fake_resource( + _group.Group, domain_id=self.domain.id + ) + self.group_with_domain = sdk_fakes.generate_fake_resource( + _group.Group, name=self.group.name, domain_id=self.domain.id + ) - self.groups_mock.get.return_value = self.group - self.domains_mock.get.return_value = self.domain - self.groups_mock.update.return_value = None + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_domain.return_value = self.domain self.cmd = group.SetGroup(self.app, None) def test_group_set_nothing(self): + self.identity_sdk_client.update_group.return_value = self.group arglist = [ self.group.id, ] @@ -593,13 +719,18 @@ def test_group_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.groups_mock.update.assert_called_once_with(self.group.id) + self.identity_sdk_client.update_group.assert_called_once_with( + self.group.id + ) self.assertIsNone(result) def test_group_set_name_and_description(self): + self.identity_sdk_client.update_group.return_value = self.group arglist = [ - '--name', 'new_name', - '--description', 'new_description', + '--name', + 'new_name', + '--description', + 'new_description', self.group.id, ] verifylist = [ @@ -614,35 +745,43 @@ def test_group_set_name_and_description(self): 'name': 'new_name', 'description': 'new_description', } - self.groups_mock.update.assert_called_once_with( - self.group.id, **kwargs) + self.identity_sdk_client.update_group.assert_called_once_with( + self.group.id, **kwargs + ) self.assertIsNone(result) def test_group_set_with_domain(self): - get_mock_result = [exceptions.CommandError, self.group] - self.groups_mock.get = ( - mock.Mock(side_effect=get_mock_result)) - + self.identity_sdk_client.find_domain.side_effect = [ + sdk_exc.ForbiddenException + ] + self.identity_sdk_client.find_group.return_value = ( + self.group_with_domain + ) arglist = [ - '--domain', self.domain.id, - self.group.id, + '--domain', + self.domain.id, + self.group_with_domain.name, ] verifylist = [ ('domain', self.domain.id), - ('group', self.group.id), + ('group', self.group_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.groups_mock.get.assert_any_call( - self.group.id, domain_id=self.domain.id) - self.groups_mock.update.assert_called_once_with(self.group.id) + self.identity_sdk_client.find_group.assert_called_once_with( + name_or_id=self.group_with_domain.name, + ignore_missing=False, + domain_id=self.domain.id, + ) + self.identity_sdk_client.update_group.assert_called_once_with( + self.group_with_domain.id + ) self.assertIsNone(result) -class TestGroupShow(TestGroup): - - domain = identity_fakes.FakeDomain.create_one_domain() +class TestGroupShow(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) columns = ( 'description', @@ -652,22 +791,20 @@ class TestGroupShow(TestGroup): ) def setUp(self): - super(TestGroupShow, self).setUp() - self.group = identity_fakes.FakeGroup.create_one_group( - attrs={'domain_id': self.domain.id}) - self.data = ( - self.group.description, - self.group.domain_id, - self.group.id, - self.group.name, + super().setUp() + self.group = sdk_fakes.generate_fake_resource( + _group.Group, description=None, domain_id=None + ) + self.group_with_domain = sdk_fakes.generate_fake_resource( + _group.Group, name=self.group.name, domain_id=self.domain.id ) - self.groups_mock.get.return_value = self.group - self.domains_mock.get.return_value = self.domain + self.identity_sdk_client.find_domain.return_value = self.domain self.cmd = group.ShowGroup(self.app, None) def test_group_show(self): + self.identity_sdk_client.find_group.return_value = self.group arglist = [ self.group.id, ] @@ -677,27 +814,44 @@ def test_group_show(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.groups_mock.get.assert_called_once_with(self.group.id) + self.identity_sdk_client.find_group.assert_called_once_with( + self.group.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) + datalist = ( + None, + None, + self.group.id, + self.group.name, + ) + self.assertEqual(datalist, data) def test_group_show_with_domain(self): - get_mock_result = [exceptions.CommandError, self.group] - self.groups_mock.get = ( - mock.Mock(side_effect=get_mock_result)) - + self.identity_sdk_client.find_group.return_value = ( + self.group_with_domain + ) arglist = [ - '--domain', self.domain.id, - self.group.id, + '--domain', + self.domain.id, + self.group_with_domain.name, ] verifylist = [ ('domain', self.domain.id), - ('group', self.group.id), + ('group', self.group_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.groups_mock.get.assert_any_call( - self.group.id, domain_id=self.domain.id) + self.identity_sdk_client.find_group.assert_called_once_with( + self.group_with_domain.name, + domain_id=self.domain.id, + ignore_missing=False, + ) self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) + datalist = ( + self.group_with_domain.description, + self.domain.id, + self.group_with_domain.id, + self.group_with_domain.name, + ) + self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_identity_provider.py b/openstackclient/tests/unit/identity/v3/test_identity_provider.py index 480bae596c..c65e947efa 100644 --- a/openstackclient/tests/unit/identity/v3/test_identity_provider.py +++ b/openstackclient/tests/unit/identity/v3/test_identity_provider.py @@ -24,17 +24,16 @@ class TestIdentityProvider(identity_fakes.TestFederatedIdentity): - def setUp(self): - super(TestIdentityProvider, self).setUp() + super().setUp() # Identity Provider mocks - federation_lib = self.app.client_manager.identity.federation + federation_lib = self.identity_client.federation self.identity_providers_mock = federation_lib.identity_providers self.identity_providers_mock.reset_mock() # Domain mocks - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains self.domains_mock.reset_mock() self.domain = identity_fakes.FakeDomain.create_one_domain( identity_fakes.DOMAIN @@ -44,7 +43,6 @@ def setUp(self): class TestIdentityProviderCreate(TestIdentityProvider): - columns = ( 'description', 'domain_id', @@ -61,7 +59,7 @@ class TestIdentityProviderCreate(TestIdentityProvider): ) def setUp(self): - super(TestIdentityProviderCreate, self).setUp() + super().setUp() copied_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) resource = fakes.FakeResource(None, copied_idp, loaded=True) @@ -87,8 +85,7 @@ def test_create_identity_provider_no_options(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -96,7 +93,8 @@ def test_create_identity_provider_no_options(self): def test_create_identity_provider_description(self): arglist = [ - '--description', identity_fakes.idp_description, + '--description', + identity_fakes.idp_description, identity_fakes.idp_id, ] verifylist = [ @@ -115,8 +113,7 @@ def test_create_identity_provider_description(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -125,11 +122,12 @@ def test_create_identity_provider_description(self): def test_create_identity_provider_remote_id(self): arglist = [ identity_fakes.idp_id, - '--remote-id', identity_fakes.idp_remote_ids[0] + '--remote-id', + identity_fakes.idp_remote_ids[0], ] verifylist = [ ('identity_provider_id', identity_fakes.idp_id), - ('remote_id', identity_fakes.idp_remote_ids[:1]), + ('remote_ids', identity_fakes.idp_remote_ids[:1]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -143,8 +141,7 @@ def test_create_identity_provider_remote_id(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -152,13 +149,15 @@ def test_create_identity_provider_remote_id(self): def test_create_identity_provider_remote_ids_multiple(self): arglist = [ - '--remote-id', identity_fakes.idp_remote_ids[0], - '--remote-id', identity_fakes.idp_remote_ids[1], - identity_fakes.idp_id + '--remote-id', + identity_fakes.idp_remote_ids[0], + '--remote-id', + identity_fakes.idp_remote_ids[1], + identity_fakes.idp_id, ] verifylist = [ ('identity_provider_id', identity_fakes.idp_id), - ('remote_id', identity_fakes.idp_remote_ids), + ('remote_ids', identity_fakes.idp_remote_ids), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -172,8 +171,7 @@ def test_create_identity_provider_remote_ids_multiple(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -181,7 +179,8 @@ def test_create_identity_provider_remote_ids_multiple(self): def test_create_identity_provider_remote_ids_file(self): arglist = [ - '--remote-id-file', '/tmp/file_name', + '--remote-id-file', + '/tmp/file_name', identity_fakes.idp_id, ] verifylist = [ @@ -192,8 +191,11 @@ def test_create_identity_provider_remote_ids_file(self): mocker = mock.Mock() mocker.return_value = "\n".join(identity_fakes.idp_remote_ids) - with mock.patch("openstackclient.identity.v3.identity_provider." - "utils.read_blob_file_contents", mocker): + with mock.patch( + "openstackclient.identity.v3.identity_provider." + "utils.read_blob_file_contents", + mocker, + ): columns, data = self.cmd.take_action(parsed_args) # Set expected values @@ -205,15 +207,13 @@ def test_create_identity_provider_remote_ids_file(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.datalist, data) def test_create_identity_provider_disabled(self): - # Prepare FakeResource object IDENTITY_PROVIDER = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) IDENTITY_PROVIDER['enabled'] = False @@ -241,8 +241,7 @@ def test_create_identity_provider_disabled(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -251,13 +250,14 @@ def test_create_identity_provider_disabled(self): identity_fakes.domain_id, False, identity_fakes.idp_id, - identity_fakes.formatted_idp_remote_ids + identity_fakes.formatted_idp_remote_ids, ) self.assertCountEqual(datalist, data) def test_create_identity_provider_domain_name(self): arglist = [ - '--domain', identity_fakes.domain_name, + '--domain', + identity_fakes.domain_name, identity_fakes.idp_id, ] verifylist = [ @@ -276,8 +276,7 @@ def test_create_identity_provider_domain_name(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -285,7 +284,8 @@ def test_create_identity_provider_domain_name(self): def test_create_identity_provider_domain_id(self): arglist = [ - '--domain', identity_fakes.domain_id, + '--domain', + identity_fakes.domain_id, identity_fakes.idp_id, ] verifylist = [ @@ -304,8 +304,7 @@ def test_create_identity_provider_domain_id(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -313,7 +312,8 @@ def test_create_identity_provider_domain_id(self): def test_create_identity_provider_authttl_positive(self): arglist = [ - '--authorization-ttl', '60', + '--authorization-ttl', + '60', identity_fakes.idp_id, ] verifylist = [ @@ -333,8 +333,7 @@ def test_create_identity_provider_authttl_positive(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -342,7 +341,8 @@ def test_create_identity_provider_authttl_positive(self): def test_create_identity_provider_authttl_zero(self): arglist = [ - '--authorization-ttl', '0', + '--authorization-ttl', + '0', identity_fakes.idp_id, ] verifylist = [ @@ -362,8 +362,7 @@ def test_create_identity_provider_authttl_zero(self): } self.identity_providers_mock.create.assert_called_with( - id=identity_fakes.idp_id, - **kwargs + id=identity_fakes.idp_id, **kwargs ) self.assertEqual(self.columns, columns) @@ -371,7 +370,8 @@ def test_create_identity_provider_authttl_zero(self): def test_create_identity_provider_authttl_negative(self): arglist = [ - '--authorization-ttl', '-60', + '--authorization-ttl', + '-60', identity_fakes.idp_id, ] verifylist = [ @@ -379,23 +379,29 @@ def test_create_identity_provider_authttl_negative(self): ('authorization_ttl', -60), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_identity_provider_authttl_not_int(self): arglist = [ - '--authorization-ttl', 'spam', + '--authorization-ttl', + 'spam', identity_fakes.idp_id, ] verifylist = [] - self.assertRaises(test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) class TestIdentityProviderDelete(TestIdentityProvider): - def setUp(self): - super(TestIdentityProviderDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.identity_providers_mock.get.return_value = fakes.FakeResource( @@ -425,9 +431,8 @@ def test_delete_identity_provider(self): class TestIdentityProviderList(TestIdentityProvider): - def setUp(self): - super(TestIdentityProviderList, self).setUp() + super().setUp() self.identity_providers_mock.get.return_value = fakes.FakeResource( None, @@ -459,20 +464,19 @@ def test_identity_provider_list_no_options(self): collist = ('ID', 'Enabled', 'Domain ID', 'Description') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.idp_id, - True, - identity_fakes.domain_id, - identity_fakes.idp_description, - ), ) + datalist = ( + ( + identity_fakes.idp_id, + True, + identity_fakes.domain_id, + identity_fakes.idp_description, + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_identity_provider_list_ID_option(self): - arglist = ['--id', - identity_fakes.idp_id] - verifylist = [ - ('id', identity_fakes.idp_id) - ] + arglist = ['--id', identity_fakes.idp_id] + verifylist = [('id', identity_fakes.idp_id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() @@ -480,26 +484,24 @@ def test_identity_provider_list_ID_option(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - kwargs = { - 'id': identity_fakes.idp_id - } + kwargs = {'id': identity_fakes.idp_id} self.identity_providers_mock.list.assert_called_with(**kwargs) collist = ('ID', 'Enabled', 'Domain ID', 'Description') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.idp_id, - True, - identity_fakes.domain_id, - identity_fakes.idp_description, - ), ) + datalist = ( + ( + identity_fakes.idp_id, + True, + identity_fakes.domain_id, + identity_fakes.idp_description, + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_identity_provider_list_enabled_option(self): arglist = ['--enabled'] - verifylist = [ - ('enabled', True) - ] + verifylist = [('enabled', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() @@ -507,24 +509,23 @@ def test_identity_provider_list_enabled_option(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - kwargs = { - 'enabled': True - } + kwargs = {'enabled': True} self.identity_providers_mock.list.assert_called_with(**kwargs) collist = ('ID', 'Enabled', 'Domain ID', 'Description') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.idp_id, - True, - identity_fakes.domain_id, - identity_fakes.idp_description, - ), ) + datalist = ( + ( + identity_fakes.idp_id, + True, + identity_fakes.domain_id, + identity_fakes.idp_description, + ), + ) self.assertCountEqual(datalist, tuple(data)) class TestIdentityProviderSet(TestIdentityProvider): - columns = ( 'description', 'enabled', @@ -539,35 +540,28 @@ class TestIdentityProviderSet(TestIdentityProvider): ) def setUp(self): - super(TestIdentityProviderSet, self).setUp() + super().setUp() self.cmd = identity_provider.SetIdentityProvider(self.app, None) def test_identity_provider_set_description(self): - """Set Identity Provider's description. """ + """Set Identity Provider's description.""" def prepare(self): """Prepare fake return objects before the test is executed""" updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) updated_idp['enabled'] = False - resources = fakes.FakeResource( - None, - updated_idp, - loaded=True - ) + resources = fakes.FakeResource(None, updated_idp, loaded=True) self.identity_providers_mock.update.return_value = resources prepare(self) new_description = 'new desc' - arglist = [ - '--description', new_description, - identity_fakes.idp_id - ] + arglist = ['--description', new_description, identity_fakes.idp_id] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('description', new_description), ('enable', False), ('disable', False), - ('remote_id', None) + ('remote_ids', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) @@ -586,25 +580,24 @@ def prepare(self): """Prepare fake return objects before the test is executed""" updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) updated_idp['enabled'] = False - resources = fakes.FakeResource( - None, - updated_idp, - loaded=True - ) + resources = fakes.FakeResource(None, updated_idp, loaded=True) self.identity_providers_mock.update.return_value = resources prepare(self) arglist = [ - '--disable', identity_fakes.idp_id, - '--remote-id', identity_fakes.idp_remote_ids[0], - '--remote-id', identity_fakes.idp_remote_ids[1] + '--disable', + identity_fakes.idp_id, + '--remote-id', + identity_fakes.idp_remote_ids[0], + '--remote-id', + identity_fakes.idp_remote_ids[1], ] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('description', None), ('enable', False), ('disable', True), - ('remote_id', identity_fakes.idp_remote_ids) + ('remote_ids', identity_fakes.idp_remote_ids), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -612,7 +605,7 @@ def prepare(self): self.identity_providers_mock.update.assert_called_with( identity_fakes.idp_id, enabled=False, - remote_ids=identity_fakes.idp_remote_ids + remote_ids=identity_fakes.idp_remote_ids, ) def test_identity_provider_enable(self): @@ -626,29 +619,34 @@ def prepare(self): resources = fakes.FakeResource( None, copy.deepcopy(identity_fakes.IDENTITY_PROVIDER), - loaded=True + loaded=True, ) self.identity_providers_mock.update.return_value = resources prepare(self) arglist = [ - '--enable', identity_fakes.idp_id, - '--remote-id', identity_fakes.idp_remote_ids[0], - '--remote-id', identity_fakes.idp_remote_ids[1] + '--enable', + identity_fakes.idp_id, + '--remote-id', + identity_fakes.idp_remote_ids[0], + '--remote-id', + identity_fakes.idp_remote_ids[1], ] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('description', None), ('enable', True), ('disable', False), - ('remote_id', identity_fakes.idp_remote_ids) + ('remote_ids', identity_fakes.idp_remote_ids), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) self.identity_providers_mock.update.assert_called_with( - identity_fakes.idp_id, enabled=True, - remote_ids=identity_fakes.idp_remote_ids) + identity_fakes.idp_id, + enabled=True, + remote_ids=identity_fakes.idp_remote_ids, + ) def test_identity_provider_replace_remote_ids(self): """Enable Identity Provider. @@ -662,31 +660,31 @@ def prepare(self): updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) updated_idp['remote_ids'] = [self.new_remote_id] - resources = fakes.FakeResource( - None, - updated_idp, - loaded=True - ) + resources = fakes.FakeResource(None, updated_idp, loaded=True) self.identity_providers_mock.update.return_value = resources prepare(self) arglist = [ - '--enable', identity_fakes.idp_id, - '--remote-id', self.new_remote_id + '--enable', + identity_fakes.idp_id, + '--remote-id', + self.new_remote_id, ] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('description', None), ('enable', True), ('disable', False), - ('remote_id', [self.new_remote_id]) + ('remote_ids', [self.new_remote_id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) self.identity_providers_mock.update.assert_called_with( - identity_fakes.idp_id, enabled=True, - remote_ids=[self.new_remote_id]) + identity_fakes.idp_id, + enabled=True, + remote_ids=[self.new_remote_id], + ) def test_identity_provider_replace_remote_ids_file(self): """Enable Identity Provider. @@ -700,17 +698,15 @@ def prepare(self): updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) updated_idp['remote_ids'] = [self.new_remote_id] - resources = fakes.FakeResource( - None, - updated_idp, - loaded=True - ) + resources = fakes.FakeResource(None, updated_idp, loaded=True) self.identity_providers_mock.update.return_value = resources prepare(self) arglist = [ - '--enable', identity_fakes.idp_id, - '--remote-id-file', self.new_remote_id, + '--enable', + identity_fakes.idp_id, + '--remote-id-file', + self.new_remote_id, ] verifylist = [ ('identity_provider', identity_fakes.idp_id), @@ -723,12 +719,17 @@ def prepare(self): mocker = mock.Mock() mocker.return_value = self.new_remote_id - with mock.patch("openstackclient.identity.v3.identity_provider." - "utils.read_blob_file_contents", mocker): + with mock.patch( + "openstackclient.identity.v3.identity_provider." + "utils.read_blob_file_contents", + mocker, + ): self.cmd.take_action(parsed_args) self.identity_providers_mock.update.assert_called_with( - identity_fakes.idp_id, enabled=True, - remote_ids=[self.new_remote_id]) + identity_fakes.idp_id, + enabled=True, + remote_ids=[self.new_remote_id], + ) def test_identity_provider_no_options(self): def prepare(self): @@ -736,7 +737,7 @@ def prepare(self): resources = fakes.FakeResource( None, copy.deepcopy(identity_fakes.IDENTITY_PROVIDER), - loaded=True + loaded=True, ) self.identity_providers_mock.get.return_value = resources @@ -755,7 +756,7 @@ def prepare(self): ('identity_provider', identity_fakes.idp_id), ('enable', False), ('disable', False), - ('remote_id', None) + ('remote_ids', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -766,23 +767,16 @@ def prepare(self): """Prepare fake return objects before the test is executed""" updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) updated_idp['authorization_ttl'] = 60 - resources = fakes.FakeResource( - None, - updated_idp, - loaded=True - ) + resources = fakes.FakeResource(None, updated_idp, loaded=True) self.identity_providers_mock.update.return_value = resources prepare(self) - arglist = [ - '--authorization-ttl', '60', - identity_fakes.idp_id - ] + arglist = ['--authorization-ttl', '60', identity_fakes.idp_id] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('enable', False), ('disable', False), - ('remote_id', None), + ('remote_ids', None), ('authorization_ttl', 60), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -797,23 +791,16 @@ def prepare(self): """Prepare fake return objects before the test is executed""" updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER) updated_idp['authorization_ttl'] = 0 - resources = fakes.FakeResource( - None, - updated_idp, - loaded=True - ) + resources = fakes.FakeResource(None, updated_idp, loaded=True) self.identity_providers_mock.update.return_value = resources prepare(self) - arglist = [ - '--authorization-ttl', '0', - identity_fakes.idp_id - ] + arglist = ['--authorization-ttl', '0', identity_fakes.idp_id] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('enable', False), ('disable', False), - ('remote_id', None), + ('remote_ids', None), ('authorization_ttl', 0), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -824,35 +811,34 @@ def prepare(self): ) def test_identity_provider_set_authttl_negative(self): - arglist = [ - '--authorization-ttl', '-1', - identity_fakes.idp_id - ] + arglist = ['--authorization-ttl', '-1', identity_fakes.idp_id] verifylist = [ ('identity_provider', identity_fakes.idp_id), ('enable', False), ('disable', False), - ('remote_id', None), + ('remote_ids', None), ('authorization_ttl', -1), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_identity_provider_set_authttl_not_int(self): - arglist = [ - '--authorization-ttl', 'spam', - identity_fakes.idp_id - ] + arglist = ['--authorization-ttl', 'spam', identity_fakes.idp_id] verifylist = [] - self.assertRaises(test_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) class TestIdentityProviderShow(TestIdentityProvider): - def setUp(self): - super(TestIdentityProviderShow, self).setUp() + super().setUp() ret = fakes.FakeResource( None, @@ -860,8 +846,10 @@ def setUp(self): loaded=True, ) - self.identity_providers_mock.get.side_effect = [Exception("Not found"), - ret] + self.identity_providers_mock.get.side_effect = [ + Exception("Not found"), + ret, + ] self.identity_providers_mock.get.return_value = ret # Get the command object to test @@ -879,8 +867,7 @@ def test_identity_provider_show(self): columns, data = self.cmd.take_action(parsed_args) self.identity_providers_mock.get.assert_called_with( - identity_fakes.idp_id, - id='test_idp' + identity_fakes.idp_id, id='test_idp' ) collist = ('description', 'domain_id', 'enabled', 'id', 'remote_ids') @@ -890,6 +877,6 @@ def test_identity_provider_show(self): identity_fakes.domain_id, True, identity_fakes.idp_id, - identity_fakes.formatted_idp_remote_ids + identity_fakes.formatted_idp_remote_ids, ) self.assertCountEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_implied_role.py b/openstackclient/tests/unit/identity/v3/test_implied_role.py index 749681293b..f6bc6063f6 100644 --- a/openstackclient/tests/unit/identity/v3/test_implied_role.py +++ b/openstackclient/tests/unit/identity/v3/test_implied_role.py @@ -21,11 +21,10 @@ class TestRole(identity_fakes.TestIdentityv3): - def setUp(self): - super(TestRole, self).setUp() + super().setUp() - identity_client = self.app.client_manager.identity + identity_client = self.identity_client # Get a shortcut to the UserManager Mock self.users_mock = identity_client.users @@ -56,9 +55,8 @@ def _is_inheritance_testcase(self): class TestImpliedRoleCreate(TestRole): - def setUp(self): - super(TestImpliedRoleCreate, self).setUp() + super().setUp() self.roles_mock.list.return_value = [ fakes.FakeResource( @@ -75,8 +73,10 @@ def setUp(self): fake_resource = fakes.FakeResource( None, - {'prior_role': copy.deepcopy(identity_fakes.ROLES[0]), - 'implied': copy.deepcopy(identity_fakes.ROLES[1]), }, + { + 'prior_role': copy.deepcopy(identity_fakes.ROLES[0]), + 'implied': copy.deepcopy(identity_fakes.ROLES[1]), + }, loaded=True, ) self.inference_rules_mock.create.return_value = fake_resource @@ -84,10 +84,10 @@ def setUp(self): self.cmd = implied_role.CreateImpliedRole(self.app, None) def test_implied_role_create(self): - arglist = [ identity_fakes.ROLES[0]['id'], - '--implied-role', identity_fakes.ROLES[1]['id'], + '--implied-role', + identity_fakes.ROLES[1]['id'], ] verifylist = [ ('role', identity_fakes.ROLES[0]['id']), @@ -102,23 +102,21 @@ def test_implied_role_create(self): # InferenceRuleManager.create(prior, implied) self.inference_rules_mock.create.assert_called_with( - identity_fakes.ROLES[0]['id'], - identity_fakes.ROLES[1]['id'] + identity_fakes.ROLES[0]['id'], identity_fakes.ROLES[1]['id'] ) collist = ('implied', 'prior_role') self.assertEqual(collist, columns) datalist = ( identity_fakes.ROLES[1]['id'], - identity_fakes.ROLES[0]['id'] + identity_fakes.ROLES[0]['id'], ) self.assertEqual(datalist, data) class TestImpliedRoleDelete(TestRole): - def setUp(self): - super(TestImpliedRoleDelete, self).setUp() + super().setUp() self.roles_mock.list.return_value = [ fakes.FakeResource( @@ -135,8 +133,10 @@ def setUp(self): fake_resource = fakes.FakeResource( None, - {'prior-role': copy.deepcopy(identity_fakes.ROLES[0]), - 'implied': copy.deepcopy(identity_fakes.ROLES[1]), }, + { + 'prior-role': copy.deepcopy(identity_fakes.ROLES[0]), + 'implied': copy.deepcopy(identity_fakes.ROLES[1]), + }, loaded=True, ) self.inference_rules_mock.delete.return_value = fake_resource @@ -146,7 +146,8 @@ def setUp(self): def test_implied_role_delete(self): arglist = [ identity_fakes.ROLES[0]['id'], - '--implied-role', identity_fakes.ROLES[1]['id'], + '--implied-role', + identity_fakes.ROLES[1]['id'], ] verifylist = [ ('role', identity_fakes.ROLES[0]['id']), @@ -156,18 +157,17 @@ def test_implied_role_delete(self): self.cmd.take_action(parsed_args) self.inference_rules_mock.delete.assert_called_with( - identity_fakes.ROLES[0]['id'], - identity_fakes.ROLES[1]['id'] + identity_fakes.ROLES[0]['id'], identity_fakes.ROLES[1]['id'] ) class TestImpliedRoleList(TestRole): - def setUp(self): - super(TestImpliedRoleList, self).setUp() + super().setUp() self.inference_rules_mock.list_inference_roles.return_value = ( - identity_fakes.FakeImpliedRoleResponse.create_list()) + identity_fakes.FakeImpliedRoleResponse.create_list() + ) self.cmd = implied_role.ListImpliedRole(self.app, None) @@ -178,12 +178,20 @@ def test_implied_role_list(self): columns, data = self.cmd.take_action(parsed_args) self.inference_rules_mock.list_inference_roles.assert_called_with() - collist = ['Prior Role ID', 'Prior Role Name', - 'Implied Role ID', 'Implied Role Name'] + collist = [ + 'Prior Role ID', + 'Prior Role Name', + 'Implied Role ID', + 'Implied Role Name', + ] self.assertEqual(collist, columns) datalist = [ - (identity_fakes.ROLES[0]['id'], identity_fakes.ROLES[0]['name'], - identity_fakes.ROLES[1]['id'], identity_fakes.ROLES[1]['name']) + ( + identity_fakes.ROLES[0]['id'], + identity_fakes.ROLES[0]['name'], + identity_fakes.ROLES[1]['id'], + identity_fakes.ROLES[1]['name'], + ) ] x = [d for d in data] self.assertEqual(datalist, x) diff --git a/openstackclient/tests/unit/identity/v3/test_limit.py b/openstackclient/tests/unit/identity/v3/test_limit.py index e5cd87b8bd..a1d180b4ba 100644 --- a/openstackclient/tests/unit/identity/v3/test_limit.py +++ b/openstackclient/tests/unit/identity/v3/test_limit.py @@ -21,11 +21,10 @@ class TestLimit(identity_fakes.TestIdentityv3): - def setUp(self): - super(TestLimit, self).setUp() + super().setUp() - identity_manager = self.app.client_manager.identity + identity_manager = self.identity_client self.limit_mock = identity_manager.limits @@ -40,28 +39,21 @@ def setUp(self): class TestLimitCreate(TestLimit): - def setUp(self): - super(TestLimitCreate, self).setUp() + super().setUp() self.service = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.SERVICE), - loaded=True + None, copy.deepcopy(identity_fakes.SERVICE), loaded=True ) self.services_mock.get.return_value = self.service self.project = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.PROJECT), - loaded=True + None, copy.deepcopy(identity_fakes.PROJECT), loaded=True ) self.projects_mock.get.return_value = self.project self.region = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGION), - loaded=True + None, copy.deepcopy(identity_fakes.REGION), loaded=True ) self.regions_mock.get.return_value = self.region @@ -69,23 +61,24 @@ def setUp(self): def test_limit_create_without_options(self): self.limit_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.LIMIT), - loaded=True + None, copy.deepcopy(identity_fakes.LIMIT), loaded=True ) resource_limit = 15 arglist = [ - '--project', identity_fakes.project_id, - '--service', identity_fakes.service_id, - '--resource-limit', str(resource_limit), - identity_fakes.limit_resource_name + '--project', + identity_fakes.project_id, + '--service', + identity_fakes.service_id, + '--resource-limit', + str(resource_limit), + identity_fakes.limit_resource_name, ] verifylist = [ ('project', identity_fakes.project_id), ('service', identity_fakes.service_id), ('resource_name', identity_fakes.limit_resource_name), - ('resource_limit', resource_limit) + ('resource_limit', resource_limit), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -97,11 +90,18 @@ def test_limit_create_without_options(self): self.service, identity_fakes.limit_resource_name, resource_limit, - **kwargs + **kwargs, ) - collist = ('description', 'id', 'project_id', 'region_id', - 'resource_limit', 'resource_name', 'service_id') + collist = ( + 'description', + 'id', + 'project_id', + 'region_id', + 'resource_limit', + 'resource_name', + 'service_id', + ) self.assertEqual(collist, columns) datalist = ( None, @@ -110,25 +110,28 @@ def test_limit_create_without_options(self): None, resource_limit, identity_fakes.limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) def test_limit_create_with_options(self): self.limit_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.LIMIT_OPTIONS), - loaded=True + None, copy.deepcopy(identity_fakes.LIMIT_OPTIONS), loaded=True ) resource_limit = 15 arglist = [ - '--project', identity_fakes.project_id, - '--service', identity_fakes.service_id, - '--resource-limit', str(resource_limit), - '--region', identity_fakes.region_id, - '--description', identity_fakes.limit_description, - identity_fakes.limit_resource_name + '--project', + identity_fakes.project_id, + '--service', + identity_fakes.service_id, + '--resource-limit', + str(resource_limit), + '--region', + identity_fakes.region_id, + '--description', + identity_fakes.limit_description, + identity_fakes.limit_resource_name, ] verifylist = [ ('project', identity_fakes.project_id), @@ -136,7 +139,7 @@ def test_limit_create_with_options(self): ('resource_name', identity_fakes.limit_resource_name), ('resource_limit', resource_limit), ('region', identity_fakes.region_id), - ('description', identity_fakes.limit_description) + ('description', identity_fakes.limit_description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -144,18 +147,25 @@ def test_limit_create_with_options(self): kwargs = { 'description': identity_fakes.limit_description, - 'region': self.region + 'region': self.region, } self.limit_mock.create.assert_called_with( self.project, self.service, identity_fakes.limit_resource_name, resource_limit, - **kwargs + **kwargs, ) - collist = ('description', 'id', 'project_id', 'region_id', - 'resource_limit', 'resource_name', 'service_id') + collist = ( + 'description', + 'id', + 'project_id', + 'region_id', + 'resource_limit', + 'resource_name', + 'service_id', + ) self.assertEqual(collist, columns) datalist = ( identity_fakes.limit_description, @@ -164,31 +174,26 @@ def test_limit_create_with_options(self): identity_fakes.region_id, resource_limit, identity_fakes.limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) class TestLimitDelete(TestLimit): - def setUp(self): - super(TestLimitDelete, self).setUp() + super().setUp() self.cmd = limit.DeleteLimit(self.app, None) def test_limit_delete(self): self.limit_mock.delete.return_value = None arglist = [identity_fakes.limit_id] - verifylist = [ - ('limit_id', [identity_fakes.limit_id]) - ] + verifylist = [('limit_id', [identity_fakes.limit_id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.limit_mock.delete.assert_called_with( - identity_fakes.limit_id - ) + self.limit_mock.delete.assert_called_with(identity_fakes.limit_id) self.assertIsNone(result) def test_limit_delete_with_exception(self): @@ -196,29 +201,22 @@ def test_limit_delete_with_exception(self): self.limit_mock.delete.side_effect = return_value arglist = ['fake-limit-id'] - verifylist = [ - ('limit_id', ['fake-limit-id']) - ] + verifylist = [('limit_id', ['fake-limit-id'])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual( - '1 of 1 limits failed to delete.', str(e) - ) + self.assertEqual('1 of 1 limits failed to delete.', str(e)) class TestLimitShow(TestLimit): - def setUp(self): - super(TestLimitShow, self).setUp() + super().setUp() self.limit_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.LIMIT), - loaded=True + None, copy.deepcopy(identity_fakes.LIMIT), loaded=True ) self.cmd = limit.ShowLimit(self.app, None) @@ -233,8 +231,13 @@ def test_limit_show(self): self.limit_mock.get.assert_called_with(identity_fakes.limit_id) collist = ( - 'description', 'id', 'project_id', 'region_id', 'resource_limit', - 'resource_name', 'service_id' + 'description', + 'id', + 'project_id', + 'region_id', + 'resource_limit', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -244,15 +247,14 @@ def test_limit_show(self): None, identity_fakes.limit_resource_limit, identity_fakes.limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) class TestLimitSet(TestLimit): - def setUp(self): - super(TestLimitSet, self).setUp() + super().setUp() self.cmd = limit.SetLimit(self.app, None) def test_limit_set_description(self): @@ -263,12 +265,13 @@ def test_limit_set_description(self): ) arglist = [ - '--description', identity_fakes.limit_description, - identity_fakes.limit_id + '--description', + identity_fakes.limit_description, + identity_fakes.limit_id, ] verifylist = [ ('description', identity_fakes.limit_description), - ('limit_id', identity_fakes.limit_id) + ('limit_id', identity_fakes.limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -277,12 +280,17 @@ def test_limit_set_description(self): self.limit_mock.update.assert_called_with( identity_fakes.limit_id, description=identity_fakes.limit_description, - resource_limit=None + resource_limit=None, ) collist = ( - 'description', 'id', 'project_id', 'region_id', 'resource_limit', - 'resource_name', 'service_id' + 'description', + 'id', + 'project_id', + 'region_id', + 'resource_limit', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -292,7 +300,7 @@ def test_limit_set_description(self): None, identity_fakes.limit_resource_limit, identity_fakes.limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) @@ -305,12 +313,13 @@ def test_limit_set_resource_limit(self): ) arglist = [ - '--resource-limit', str(resource_limit), - identity_fakes.limit_id + '--resource-limit', + str(resource_limit), + identity_fakes.limit_id, ] verifylist = [ ('resource_limit', resource_limit), - ('limit_id', identity_fakes.limit_id) + ('limit_id', identity_fakes.limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -319,12 +328,17 @@ def test_limit_set_resource_limit(self): self.limit_mock.update.assert_called_with( identity_fakes.limit_id, description=None, - resource_limit=resource_limit + resource_limit=resource_limit, ) collist = ( - 'description', 'id', 'project_id', 'region_id', 'resource_limit', - 'resource_name', 'service_id' + 'description', + 'id', + 'project_id', + 'region_id', + 'resource_limit', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -334,21 +348,18 @@ def test_limit_set_resource_limit(self): None, resource_limit, identity_fakes.limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) class TestLimitList(TestLimit): - def setUp(self): - super(TestLimitList, self).setUp() + super().setUp() self.limit_mock.list.return_value = [ fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.LIMIT), - loaded=True + None, copy.deepcopy(identity_fakes.LIMIT), loaded=True ) ] @@ -362,22 +373,28 @@ def test_limit_list(self): columns, data = self.cmd.take_action(parsed_args) self.limit_mock.list.assert_called_with( - service=None, resource_name=None, region=None, - project=None + service=None, resource_name=None, region=None, project=None ) collist = ( - 'ID', 'Project ID', 'Service ID', 'Resource Name', - 'Resource Limit', 'Description', 'Region ID' + 'ID', + 'Project ID', + 'Service ID', + 'Resource Name', + 'Resource Limit', + 'Description', + 'Region ID', ) self.assertEqual(collist, columns) - datalist = (( - identity_fakes.limit_id, - identity_fakes.project_id, - identity_fakes.service_id, - identity_fakes.limit_resource_name, - identity_fakes.limit_resource_limit, - None, - None - ), ) + datalist = ( + ( + identity_fakes.limit_id, + identity_fakes.project_id, + identity_fakes.service_id, + identity_fakes.limit_resource_name, + identity_fakes.limit_resource_limit, + None, + None, + ), + ) self.assertEqual(datalist, tuple(data)) diff --git a/openstackclient/tests/unit/identity/v3/test_mappings.py b/openstackclient/tests/unit/identity/v3/test_mappings.py index 184bd2a265..5a3fad97a8 100644 --- a/openstackclient/tests/unit/identity/v3/test_mappings.py +++ b/openstackclient/tests/unit/identity/v3/test_mappings.py @@ -23,92 +23,83 @@ class TestMapping(identity_fakes.TestFederatedIdentity): - def setUp(self): - super(TestMapping, self).setUp() + super().setUp() - federation_lib = self.app.client_manager.identity.federation + federation_lib = self.identity_client.federation self.mapping_mock = federation_lib.mappings self.mapping_mock.reset_mock() class TestMappingCreate(TestMapping): - def setUp(self): - super(TestMappingCreate, self).setUp() + super().setUp() self.mapping_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.MAPPING_RESPONSE), - loaded=True + None, copy.deepcopy(identity_fakes.MAPPING_RESPONSE), loaded=True ) self.cmd = mapping.CreateMapping(self.app, None) def test_create_mapping(self): arglist = [ - '--rules', identity_fakes.mapping_rules_file_path, - identity_fakes.mapping_id + '--rules', + identity_fakes.mapping_rules_file_path, + identity_fakes.mapping_id, ] verifylist = [ ('mapping', identity_fakes.mapping_id), - ('rules', identity_fakes.mapping_rules_file_path) + ('rules', identity_fakes.mapping_rules_file_path), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) mocker = mock.Mock() mocker.return_value = identity_fakes.MAPPING_RULES - with mock.patch("openstackclient.identity.v3.mapping." - "CreateMapping._read_rules", mocker): + with mock.patch( + "openstackclient.identity.v3.mapping.CreateMapping._read_rules", + mocker, + ): columns, data = self.cmd.take_action(parsed_args) self.mapping_mock.create.assert_called_with( mapping_id=identity_fakes.mapping_id, - rules=identity_fakes.MAPPING_RULES) + rules=identity_fakes.MAPPING_RULES, + schema_version=None, + ) collist = ('id', 'rules') self.assertEqual(collist, columns) - datalist = (identity_fakes.mapping_id, - identity_fakes.MAPPING_RULES) + datalist = (identity_fakes.mapping_id, identity_fakes.MAPPING_RULES) self.assertEqual(datalist, data) class TestMappingDelete(TestMapping): - def setUp(self): - super(TestMappingDelete, self).setUp() + super().setUp() self.mapping_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.MAPPING_RESPONSE), - loaded=True) + None, copy.deepcopy(identity_fakes.MAPPING_RESPONSE), loaded=True + ) self.mapping_mock.delete.return_value = None self.cmd = mapping.DeleteMapping(self.app, None) def test_delete_mapping(self): - arglist = [ - identity_fakes.mapping_id - ] - verifylist = [ - ('mapping', [identity_fakes.mapping_id]) - ] + arglist = [identity_fakes.mapping_id] + verifylist = [('mapping', [identity_fakes.mapping_id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.mapping_mock.delete.assert_called_with( - identity_fakes.mapping_id) + self.mapping_mock.delete.assert_called_with(identity_fakes.mapping_id) self.assertIsNone(result) class TestMappingList(TestMapping): - def setUp(self): - super(TestMappingList, self).setUp() + super().setUp() self.mapping_mock.get.return_value = fakes.FakeResource( - None, - {'id': identity_fakes.mapping_id}, - loaded=True) + None, {'id': identity_fakes.mapping_id}, loaded=True + ) # Pretend list command returns list of two mappings. # NOTE(marek-denis): We are returning FakeResources with mapping id # only as ShowMapping class is implemented in a way where rules will @@ -116,12 +107,12 @@ def setUp(self): self.mapping_mock.list.return_value = [ fakes.FakeResource( None, - {'id': identity_fakes.mapping_id}, + {'id': identity_fakes.mapping_id, 'schema_version': '1.0'}, loaded=True, ), fakes.FakeResource( None, - {'id': 'extra_mapping'}, + {'id': 'extra_mapping', 'schema_version': '2.0'}, loaded=True, ), ] @@ -138,28 +129,26 @@ def test_mapping_list(self): self.mapping_mock.list.assert_called_with() - collist = ('ID',) + collist = ('ID', 'schema_version') self.assertEqual(collist, columns) - datalist = [(identity_fakes.mapping_id,), ('extra_mapping',)] + datalist = [ + (identity_fakes.mapping_id, '1.0'), + ('extra_mapping', '2.0'), + ] self.assertEqual(datalist, data) class TestMappingSet(TestMapping): - def setUp(self): - super(TestMappingSet, self).setUp() + super().setUp() self.mapping_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.MAPPING_RESPONSE), - loaded=True + None, copy.deepcopy(identity_fakes.MAPPING_RESPONSE), loaded=True ) self.mapping_mock.update.return_value = fakes.FakeResource( - None, - identity_fakes.MAPPING_RESPONSE_2, - loaded=True + None, identity_fakes.MAPPING_RESPONSE_2, loaded=True ) # Get the command object to test @@ -167,74 +156,70 @@ def setUp(self): def test_set_new_rules(self): arglist = [ - '--rules', identity_fakes.mapping_rules_file_path, - identity_fakes.mapping_id + '--rules', + identity_fakes.mapping_rules_file_path, + identity_fakes.mapping_id, ] verifylist = [ ('mapping', identity_fakes.mapping_id), - ('rules', identity_fakes.mapping_rules_file_path) + ('rules', identity_fakes.mapping_rules_file_path), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) mocker = mock.Mock() mocker.return_value = identity_fakes.MAPPING_RULES_2 - with mock.patch("openstackclient.identity.v3.mapping." - "SetMapping._read_rules", mocker): + with mock.patch( + "openstackclient.identity.v3.mapping.SetMapping._read_rules", + mocker, + ): result = self.cmd.take_action(parsed_args) self.mapping_mock.update.assert_called_with( mapping=identity_fakes.mapping_id, - rules=identity_fakes.MAPPING_RULES_2) + rules=identity_fakes.MAPPING_RULES_2, + schema_version=None, + ) self.assertIsNone(result) def test_set_rules_wrong_file_path(self): arglist = [ - '--rules', identity_fakes.mapping_rules_file_path, - identity_fakes.mapping_id + '--rules', + identity_fakes.mapping_rules_file_path, + identity_fakes.mapping_id, ] verifylist = [ ('mapping', identity_fakes.mapping_id), - ('rules', identity_fakes.mapping_rules_file_path) + ('rules', identity_fakes.mapping_rules_file_path), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestMappingShow(TestMapping): - def setUp(self): - super(TestMappingShow, self).setUp() + super().setUp() self.mapping_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.MAPPING_RESPONSE), - loaded=True + None, copy.deepcopy(identity_fakes.MAPPING_RESPONSE), loaded=True ) self.cmd = mapping.ShowMapping(self.app, None) def test_mapping_show(self): - arglist = [ - identity_fakes.mapping_id - ] - verifylist = [ - ('mapping', identity_fakes.mapping_id) - ] + arglist = [identity_fakes.mapping_id] + verifylist = [('mapping', identity_fakes.mapping_id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.mapping_mock.get.assert_called_with( - identity_fakes.mapping_id) + self.mapping_mock.get.assert_called_with(identity_fakes.mapping_id) collist = ('id', 'rules') self.assertEqual(collist, columns) - datalist = (identity_fakes.mapping_id, - identity_fakes.MAPPING_RULES) + datalist = (identity_fakes.mapping_id, identity_fakes.MAPPING_RULES) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_oauth.py b/openstackclient/tests/unit/identity/v3/test_oauth.py index 3aabd9b8ae..9dcf0be89e 100644 --- a/openstackclient/tests/unit/identity/v3/test_oauth.py +++ b/openstackclient/tests/unit/identity/v3/test_oauth.py @@ -18,10 +18,9 @@ class TestOAuth1(identity_fakes.TestOAuth1): - def setUp(self): - super(TestOAuth1, self).setUp() - identity_client = self.app.client_manager.identity + super().setUp() + identity_client = self.identity_client self.access_tokens_mock = identity_client.oauth1.access_tokens self.access_tokens_mock.reset_mock() self.request_tokens_mock = identity_client.oauth1.request_tokens @@ -33,9 +32,8 @@ def setUp(self): class TestAccessTokenCreate(TestOAuth1): - def setUp(self): - super(TestAccessTokenCreate, self).setUp() + super().setUp() self.access_tokens_mock.create.return_value = fakes.FakeResource( None, @@ -47,11 +45,16 @@ def setUp(self): def test_create_access_tokens(self): arglist = [ - '--consumer-key', identity_fakes.consumer_id, - '--consumer-secret', identity_fakes.consumer_secret, - '--request-key', identity_fakes.request_token_id, - '--request-secret', identity_fakes.request_token_secret, - '--verifier', identity_fakes.oauth_verifier_pin, + '--consumer-key', + identity_fakes.consumer_id, + '--consumer-secret', + identity_fakes.consumer_secret, + '--request-key', + identity_fakes.request_token_id, + '--request-secret', + identity_fakes.request_token_secret, + '--verifier', + identity_fakes.oauth_verifier_pin, ] verifylist = [ ('consumer_key', identity_fakes.consumer_id), @@ -83,9 +86,8 @@ def test_create_access_tokens(self): class TestRequestTokenAuthorize(TestOAuth1): - def setUp(self): - super(TestRequestTokenAuthorize, self).setUp() + super().setUp() self.roles_mock.get.return_value = fakes.FakeResource( None, @@ -100,12 +102,14 @@ def setUp(self): def test_authorize_request_tokens(self): arglist = [ - '--request-key', identity_fakes.request_token_id, - '--role', identity_fakes.role_name, + '--request-key', + identity_fakes.request_token_id, + '--role', + identity_fakes.role_name, ] verifylist = [ ('request_key', identity_fakes.request_token_id), - ('role', [identity_fakes.role_name]), + ('roles', [identity_fakes.role_name]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -117,16 +121,13 @@ def test_authorize_request_tokens(self): collist = ('oauth_verifier',) self.assertEqual(collist, columns) - datalist = ( - identity_fakes.oauth_verifier_pin, - ) + datalist = (identity_fakes.oauth_verifier_pin,) self.assertEqual(datalist, data) class TestRequestTokenCreate(TestOAuth1): - def setUp(self): - super(TestRequestTokenCreate, self).setUp() + super().setUp() self.request_tokens_mock.create.return_value = fakes.FakeResource( None, @@ -144,9 +145,12 @@ def setUp(self): def test_create_request_tokens(self): arglist = [ - '--consumer-key', identity_fakes.consumer_id, - '--consumer-secret', identity_fakes.consumer_secret, - '--project', identity_fakes.project_id, + '--consumer-key', + identity_fakes.consumer_id, + '--consumer-secret', + identity_fakes.consumer_secret, + '--project', + identity_fakes.project_id, ] verifylist = [ ('consumer_key', identity_fakes.consumer_id), diff --git a/openstackclient/tests/unit/identity/v3/test_project.py b/openstackclient/tests/unit/identity/v3/test_project.py index dfd0805b29..065a65cb1f 100644 --- a/openstackclient/tests/unit/identity/v3/test_project.py +++ b/openstackclient/tests/unit/identity/v3/test_project.py @@ -11,7 +11,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# from unittest import mock from unittest.mock import call @@ -25,21 +24,19 @@ class TestProject(identity_fakes.TestIdentityv3): - def setUp(self): - super(TestProject, self).setUp() + super().setUp() # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains self.domains_mock.reset_mock() # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() class TestProjectCreate(TestProject): - domain = identity_fakes.FakeDomain.create_one_domain() columns = ( @@ -50,14 +47,15 @@ class TestProjectCreate(TestProject): 'is_domain', 'name', 'parent_id', - 'tags' + 'tags', ) def setUp(self): - super(TestProjectCreate, self).setUp() + super().setUp() self.project = identity_fakes.FakeProject.create_one_project( - attrs={'domain_id': self.domain.id}) + attrs={'domain_id': self.domain.id} + ) self.domains_mock.get.return_value = self.domain self.projects_mock.create.return_value = self.project self.datalist = ( @@ -68,7 +66,7 @@ def setUp(self): False, self.project.name, self.project.parent_id, - self.project.tags + self.project.tags, ) # Get the command object to test self.cmd = project.CreateProject(self.app, None) @@ -79,10 +77,9 @@ def test_project_create_no_options(self): ] verifylist = [ ('parent', None), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -103,9 +100,7 @@ def test_project_create_no_options(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) collist = ( 'description', @@ -115,7 +110,7 @@ def test_project_create_no_options(self): 'is_domain', 'name', 'parent_id', - 'tags' + 'tags', ) self.assertEqual(collist, columns) datalist = ( @@ -126,22 +121,22 @@ def test_project_create_no_options(self): False, self.project.name, self.project.parent_id, - self.project.tags + self.project.tags, ) self.assertEqual(datalist, data) def test_project_create_description(self): arglist = [ - '--description', 'new desc', + '--description', + 'new desc', self.project.name, ] verifylist = [ ('description', 'new desc'), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -162,25 +157,23 @@ def test_project_create_description(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_domain(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -201,25 +194,23 @@ def test_project_create_domain(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_domain_no_perms(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) mocker = mock.Mock() @@ -238,9 +229,7 @@ def test_project_create_domain_no_perms(self): 'tags': [], 'options': {}, } - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -250,11 +239,10 @@ def test_project_create_enable(self): self.project.name, ] verifylist = [ - ('enable', True), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -275,9 +263,7 @@ def test_project_create_enable(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -288,8 +274,7 @@ def test_project_create_disable(self): self.project.name, ] verifylist = [ - ('enable', False), - ('disable', True), + ('enabled', False), ('name', self.project.name), ('parent', None), ] @@ -312,21 +297,21 @@ def test_project_create_disable(self): } # ProjectManager.create(name=, domain=, # description=, enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_property(self): arglist = [ - '--property', 'fee=fi', - '--property', 'fo=fum', + '--property', + 'fee=fi', + '--property', + 'fo=fum', self.project.name, ] verifylist = [ - ('property', {'fee': 'fi', 'fo': 'fum'}), + ('properties', {'fee': 'fi', 'fo': 'fum'}), ('name', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -350,25 +335,23 @@ def test_project_create_property(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_is_domain_false_property(self): arglist = [ - '--property', 'is_domain=false', + '--property', + 'is_domain=false', self.project.name, ] verifylist = [ ('parent', None), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('tags', []), - ('property', {'is_domain': 'false'}), + ('properties', {'is_domain': 'false'}), ('name', self.project.name), ] @@ -390,25 +373,23 @@ def test_project_create_is_domain_false_property(self): 'tags': [], 'options': {}, } - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_is_domain_true_property(self): arglist = [ - '--property', 'is_domain=true', + '--property', + 'is_domain=true', self.project.name, ] verifylist = [ ('parent', None), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('tags', []), - ('property', {'is_domain': 'true'}), + ('properties', {'is_domain': 'true'}), ('name', self.project.name), ] @@ -430,25 +411,23 @@ def test_project_create_is_domain_true_property(self): 'tags': [], 'options': {}, } - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_project_create_is_domain_none_property(self): arglist = [ - '--property', 'is_domain=none', + '--property', + 'is_domain=none', self.project.name, ] verifylist = [ ('parent', None), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('tags', []), - ('property', {'is_domain': 'none'}), + ('properties', {'is_domain': 'none'}), ('name', self.project.name), ] @@ -470,9 +449,7 @@ def test_project_create_is_domain_none_property(self): 'tags': [], 'options': {}, } - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -480,22 +457,24 @@ def test_project_create_is_domain_none_property(self): def test_project_create_parent(self): self.parent = identity_fakes.FakeProject.create_one_project() self.project = identity_fakes.FakeProject.create_one_project( - attrs={'domain_id': self.domain.id, 'parent_id': self.parent.id}) + attrs={'domain_id': self.domain.id, 'parent_id': self.parent.id} + ) self.projects_mock.get.return_value = self.parent self.projects_mock.create.return_value = self.project arglist = [ - '--domain', self.project.domain_id, - '--parent', self.parent.name, + '--domain', + self.project.domain_id, + '--parent', + self.parent.name, self.project.name, ] verifylist = [ ('domain', self.project.domain_id), ('parent', self.parent.name), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -511,9 +490,7 @@ def test_project_create_parent(self): 'options': {}, } - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) collist = ( 'description', @@ -523,7 +500,7 @@ def test_project_create_parent(self): 'is_domain', 'name', 'parent_id', - 'tags' + 'tags', ) self.assertEqual(columns, collist) datalist = ( @@ -534,27 +511,30 @@ def test_project_create_parent(self): self.project.is_domain, self.project.name, self.parent.id, - self.project.tags + self.project.tags, ) self.assertEqual(data, datalist) def test_project_create_invalid_parent(self): self.projects_mock.resource_class.__name__ = 'Project' self.projects_mock.get.side_effect = exceptions.NotFound( - 'Invalid parent') + 'Invalid parent' + ) self.projects_mock.find.side_effect = exceptions.NotFound( - 'Invalid parent') + 'Invalid parent' + ) arglist = [ - '--domain', self.project.domain_id, - '--parent', 'invalid', + '--domain', + self.project.domain_id, + '--parent', + 'invalid', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), ('parent', 'invalid'), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -567,17 +547,18 @@ def test_project_create_invalid_parent(self): def test_project_create_with_tags(self): arglist = [ - '--domain', self.project.domain_id, - '--tag', 'foo', + '--domain', + self.project.domain_id, + '--tag', + 'foo', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', ['foo']) + ('tags', ['foo']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -596,9 +577,7 @@ def test_project_create_with_tags(self): 'tags': ['foo'], 'options': {}, } - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -611,11 +590,10 @@ def test_project_create_with_immutable_option(self): verifylist = [ ('immutable', True), ('description', None), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -636,9 +614,7 @@ def test_project_create_with_immutable_option(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -649,13 +625,12 @@ def test_project_create_with_no_immutable_option(self): self.project.name, ] verifylist = [ - ('no_immutable', True), + ('immutable', False), ('description', None), - ('enable', False), - ('disable', False), + ('enabled', True), ('name', self.project.name), ('parent', None), - ('tags', []) + ('tags', []), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -676,20 +651,17 @@ def test_project_create_with_no_immutable_option(self): } # ProjectManager.create(name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.create.assert_called_with( - **kwargs - ) + self.projects_mock.create.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) class TestProjectDelete(TestProject): - project = identity_fakes.FakeProject.create_one_project() def setUp(self): - super(TestProjectDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.projects_mock.get.return_value = self.project @@ -716,8 +688,7 @@ def test_project_delete_no_options(self): @mock.patch.object(utils, 'find_resource') def test_delete_multi_projects_with_exception(self, find_mock): - find_mock.side_effect = [self.project, - exceptions.CommandError] + find_mock.side_effect = [self.project, exceptions.CommandError] arglist = [ self.project.id, 'unexist_project', @@ -731,8 +702,7 @@ def test_delete_multi_projects_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 projects failed to delete.', - str(e)) + self.assertEqual('1 of 2 projects failed to delete.', str(e)) find_mock.assert_any_call(self.projects_mock, self.project.id) find_mock.assert_any_call(self.projects_mock, 'unexist_project') @@ -742,10 +712,10 @@ def test_delete_multi_projects_with_exception(self, find_mock): class TestProjectList(TestProject): - domain = identity_fakes.FakeDomain.create_one_domain() project = identity_fakes.FakeProject.create_one_project( - attrs={'domain_id': domain.id}) + attrs={'domain_id': domain.id} + ) projects = identity_fakes.FakeProject.create_projects() columns = ( @@ -759,14 +729,22 @@ class TestProjectList(TestProject): ), ) datalists = ( - (projects[0].description, True, - projects[0].id, projects[0].name,), - (projects[1].description, True, - projects[1].id, projects[1].name,), + ( + projects[0].description, + True, + projects[0].id, + projects[0].name, + ), + ( + projects[1].description, + True, + projects[1].id, + projects[1].name, + ), ) def setUp(self): - super(TestProjectList, self).setUp() + super().setUp() self.projects_mock.list.return_value = [self.project] @@ -804,18 +782,21 @@ def test_project_list_long(self): collist = ('ID', 'Name', 'Domain ID', 'Description', 'Enabled') self.assertEqual(collist, columns) - datalist = (( - self.project.id, - self.project.name, - self.project.domain_id, - self.project.description, - True, - ), ) + datalist = ( + ( + self.project.id, + self.project.name, + self.project.domain_id, + self.project.description, + True, + ), + ) self.assertEqual(datalist, tuple(data)) def test_project_list_domain(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, ] verifylist = [ ('domain', self.project.domain_id), @@ -830,14 +811,16 @@ def test_project_list_domain(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) self.projects_mock.list.assert_called_with( - domain=self.project.domain_id) + domain=self.project.domain_id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) def test_project_list_domain_no_perms(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, ] verifylist = [ ('domain', self.project.domain_id), @@ -850,17 +833,20 @@ def test_project_list_domain_no_perms(self): columns, data = self.cmd.take_action(parsed_args) self.projects_mock.list.assert_called_with( - domain=self.project.domain_id) + domain=self.project.domain_id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) def test_project_list_parent(self): self.parent = identity_fakes.FakeProject.create_one_project() self.project = identity_fakes.FakeProject.create_one_project( - attrs={'domain_id': self.domain.id, 'parent_id': self.parent.id}) + attrs={'domain_id': self.domain.id, 'parent_id': self.parent.id} + ) arglist = [ - '--parent', self.parent.id, + '--parent', + self.parent.id, ] verifylist = [ ('parent', self.parent.id), @@ -879,7 +865,10 @@ def test_project_list_parent(self): def test_project_list_sort(self): self.projects_mock.list.return_value = self.projects - arglist = ['--sort', 'name:asc', ] + arglist = [ + '--sort', + 'name:asc', + ] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -910,8 +899,7 @@ def test_project_list_my_projects(self): auth_ref = identity_fakes.fake_auth_ref( identity_fakes.TOKEN_WITH_PROJECT_ID, ) - ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [ '--my-projects', @@ -926,25 +914,44 @@ def test_project_list_my_projects(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) self.projects_mock.list.assert_called_with( - user=self.app.client_manager.auth_ref.user_id) + user=self.app.client_manager.auth_ref.user_id + ) collist = ('ID', 'Name') self.assertEqual(collist, columns) - datalist = (( - self.project.id, - self.project.name, - ), ) + datalist = ( + ( + self.project.id, + self.project.name, + ), + ) self.assertEqual(datalist, tuple(data)) + def test_project_list_with_option_enabled(self): + arglist = ['--enabled'] + verifylist = [('is_enabled', True)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) -class TestProjectSet(TestProject): + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + kwargs = {'is_enabled': True} + self.projects_mock.list.assert_called_with(**kwargs) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, tuple(data)) + +class TestProjectSet(TestProject): domain = identity_fakes.FakeDomain.create_one_domain() project = identity_fakes.FakeProject.create_one_project( - attrs={'domain_id': domain.id}) + attrs={'domain_id': domain.id, 'tags': ['tag1', 'tag2', 'tag3']} + ) def setUp(self): - super(TestProjectSet, self).setUp() + super().setUp() self.domains_mock.get.return_value = self.domain @@ -960,8 +967,7 @@ def test_project_set_no_options(self): ] verifylist = [ ('project', self.project.name), - ('enable', False), - ('disable', False), + ('enabled', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -971,15 +977,16 @@ def test_project_set_no_options(self): def test_project_set_name(self): arglist = [ - '--name', 'qwerty', - '--domain', self.project.domain_id, + '--name', + 'qwerty', + '--domain', + self.project.domain_id, self.project.name, ] verifylist = [ ('name', 'qwerty'), ('domain', self.project.domain_id), - ('enable', False), - ('disable', False), + ('enabled', None), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -992,23 +999,21 @@ def test_project_set_name(self): } # ProjectManager.update(project, name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_description(self): arglist = [ - '--domain', self.project.domain_id, - '--description', 'new desc', + '--domain', + self.project.domain_id, + '--description', + 'new desc', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), ('description', 'new desc'), - ('enable', False), - ('disable', False), + ('enabled', None), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1019,22 +1024,19 @@ def test_project_set_description(self): kwargs = { 'description': 'new desc', } - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_enable(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, '--enable', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('enable', True), - ('disable', False), + ('enabled', True), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1045,22 +1047,19 @@ def test_project_set_enable(self): kwargs = { 'enabled': True, } - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_disable(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, '--disable', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('enable', False), - ('disable', True), + ('enabled', False), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1071,22 +1070,22 @@ def test_project_set_disable(self): kwargs = { 'enabled': False, } - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_property(self): arglist = [ - '--domain', self.project.domain_id, - '--property', 'fee=fi', - '--property', 'fo=fum', + '--domain', + self.project.domain_id, + '--property', + 'fee=fi', + '--property', + 'fo=fum', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('property', {'fee': 'fi', 'fo': 'fum'}), + ('properties', {'fee': 'fi', 'fo': 'fum'}), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1098,55 +1097,72 @@ def test_project_set_property(self): 'fee': 'fi', 'fo': 'fum', } - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_tags(self): arglist = [ - '--name', 'qwerty', - '--domain', self.project.domain_id, - '--tag', 'foo', + '--name', + 'qwerty', + '--domain', + self.project.domain_id, + '--tag', + 'foo', self.project.name, ] verifylist = [ ('name', 'qwerty'), ('domain', self.project.domain_id), - ('enable', False), - ('disable', False), + ('enabled', None), ('project', self.project.name), - ('tags', ['foo']) + ('tags', ['foo']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - # Set expected values + # Set expected values. new tag is added to original tags for update. kwargs = { 'name': 'qwerty', - 'tags': ['foo'] + 'tags': sorted({'tag1', 'tag2', 'tag3', 'foo'}), } # ProjectManager.update(project, name=, domain=, description=, # enabled=, **kwargs) - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) + self.assertIsNone(result) + + def test_project_remove_tags(self): + arglist = [ + '--remove-tag', + 'tag1', + '--remove-tag', + 'tag2', + self.project.name, + ] + verifylist = [ + ('enabled', None), + ('project', self.project.name), + ('remove_tags', ['tag1', 'tag2']), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + kwargs = {'tags': list({'tag3'})} + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_with_immutable_option(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, '--immutable', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), ('immutable', True), - ('enable', False), - ('disable', False), + ('enabled', None), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1157,23 +1173,20 @@ def test_project_set_with_immutable_option(self): kwargs = { 'options': {'immutable': True}, } - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) def test_project_set_with_no_immutable_option(self): arglist = [ - '--domain', self.project.domain_id, + '--domain', + self.project.domain_id, '--no-immutable', self.project.name, ] verifylist = [ ('domain', self.project.domain_id), - ('no_immutable', True), - ('enable', False), - ('disable', False), + ('immutable', False), + ('enabled', None), ('project', self.project.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1184,28 +1197,24 @@ def test_project_set_with_no_immutable_option(self): kwargs = { 'options': {'immutable': False}, } - self.projects_mock.update.assert_called_with( - self.project.id, - **kwargs - ) + self.projects_mock.update.assert_called_with(self.project.id, **kwargs) self.assertIsNone(result) class TestProjectShow(TestProject): - domain = identity_fakes.FakeDomain.create_one_domain() def setUp(self): - super(TestProjectShow, self).setUp() + super().setUp() self.project = identity_fakes.FakeProject.create_one_project( - attrs={'domain_id': self.domain.id}) + attrs={'domain_id': self.domain.id} + ) # Get the command object to test self.cmd = project.ShowProject(self.app, None) def test_project_show(self): - self.projects_mock.get.return_value = self.project arglist = [ @@ -1216,15 +1225,15 @@ def test_project_show(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'project': - {'domain': {}, - 'name': parsed_args.project, - 'id': parsed_args.project - } - } - } + self.identity_client.tokens.get_token_data.return_value = { + 'token': { + 'project': { + 'domain': {}, + 'name': parsed_args.project, + 'id': parsed_args.project, + } + } + } # In base command class ShowOne in cliff, abstract method take_action() # returns a two-part tuple with a tuple of column names and a tuple of @@ -1241,7 +1250,7 @@ def test_project_show(self): 'is_domain', 'name', 'parent_id', - 'tags' + 'tags', ) self.assertEqual(collist, columns) datalist = ( @@ -1252,7 +1261,7 @@ def test_project_show(self): False, self.project.name, self.project.parent_id, - self.project.tags + self.project.tags, ) self.assertEqual(datalist, data) @@ -1260,7 +1269,7 @@ def test_project_show_parents(self): self.project = identity_fakes.FakeProject.create_one_project( attrs={ 'parent_id': self.project.parent_id, - 'parents': [{'project': {'id': self.project.parent_id}}] + 'parents': [{'project': {'id': self.project.parent_id}}], } ) self.projects_mock.get.return_value = self.project @@ -1275,23 +1284,28 @@ def test_project_show_parents(self): ('children', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'project': - {'domain': {}, - 'name': parsed_args.project, - 'id': parsed_args.project - } - } - } + self.identity_client.tokens.get_token_data.return_value = { + 'token': { + 'project': { + 'domain': {}, + 'name': parsed_args.project, + 'id': parsed_args.project, + } + } + } columns, data = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_has_calls([call(self.project.id), - call(self.project.id, - parents_as_ids=True, - subtree_as_ids=False, - )]) + self.projects_mock.get.assert_has_calls( + [ + call(self.project.id), + call( + self.project.id, + parents_as_ids=True, + subtree_as_ids=False, + ), + ] + ) collist = ( 'description', @@ -1302,7 +1316,7 @@ def test_project_show_parents(self): 'name', 'parent_id', 'parents', - 'tags' + 'tags', ) self.assertEqual(columns, collist) datalist = ( @@ -1314,7 +1328,7 @@ def test_project_show_parents(self): self.project.name, self.project.parent_id, [{'project': {'id': self.project.parent_id}}], - self.project.tags + self.project.tags, ) self.assertEqual(data, datalist) @@ -1322,7 +1336,7 @@ def test_project_show_subtree(self): self.project = identity_fakes.FakeProject.create_one_project( attrs={ 'parent_id': self.project.parent_id, - 'subtree': [{'project': {'id': 'children-id'}}] + 'subtree': [{'project': {'id': 'children-id'}}], } ) self.projects_mock.get.return_value = self.project @@ -1337,22 +1351,27 @@ def test_project_show_subtree(self): ('children', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'project': - {'domain': {}, - 'name': parsed_args.project, - 'id': parsed_args.project - } - } - } + self.identity_client.tokens.get_token_data.return_value = { + 'token': { + 'project': { + 'domain': {}, + 'name': parsed_args.project, + 'id': parsed_args.project, + } + } + } columns, data = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_has_calls([call(self.project.id), - call(self.project.id, - parents_as_ids=False, - subtree_as_ids=True, - )]) + self.projects_mock.get.assert_has_calls( + [ + call(self.project.id), + call( + self.project.id, + parents_as_ids=False, + subtree_as_ids=True, + ), + ] + ) collist = ( 'description', @@ -1363,7 +1382,7 @@ def test_project_show_subtree(self): 'name', 'parent_id', 'subtree', - 'tags' + 'tags', ) self.assertEqual(columns, collist) datalist = ( @@ -1375,7 +1394,7 @@ def test_project_show_subtree(self): self.project.name, self.project.parent_id, [{'project': {'id': 'children-id'}}], - self.project.tags + self.project.tags, ) self.assertEqual(data, datalist) @@ -1384,7 +1403,7 @@ def test_project_show_parents_and_children(self): attrs={ 'parent_id': self.project.parent_id, 'parents': [{'project': {'id': self.project.parent_id}}], - 'subtree': [{'project': {'id': 'children-id'}}] + 'subtree': [{'project': {'id': 'children-id'}}], } ) self.projects_mock.get.return_value = self.project @@ -1400,22 +1419,27 @@ def test_project_show_parents_and_children(self): ('children', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'project': - {'domain': {}, - 'name': parsed_args.project, - 'id': parsed_args.project - } - } - } + self.identity_client.tokens.get_token_data.return_value = { + 'token': { + 'project': { + 'domain': {}, + 'name': parsed_args.project, + 'id': parsed_args.project, + } + } + } columns, data = self.cmd.take_action(parsed_args) - self.projects_mock.get.assert_has_calls([call(self.project.id), - call(self.project.id, - parents_as_ids=True, - subtree_as_ids=True, - )]) + self.projects_mock.get.assert_has_calls( + [ + call(self.project.id), + call( + self.project.id, + parents_as_ids=True, + subtree_as_ids=True, + ), + ] + ) collist = ( 'description', @@ -1427,7 +1451,7 @@ def test_project_show_parents_and_children(self): 'parent_id', 'parents', 'subtree', - 'tags' + 'tags', ) self.assertEqual(columns, collist) datalist = ( @@ -1440,27 +1464,29 @@ def test_project_show_parents_and_children(self): self.project.parent_id, [{'project': {'id': self.project.parent_id}}], [{'project': {'id': 'children-id'}}], - self.project.tags + self.project.tags, ) self.assertEqual(data, datalist) def test_project_show_with_domain(self): project = identity_fakes.FakeProject.create_one_project( - {"name": self.project.name}) - - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'project': - {'domain': {"id": self.project.domain_id}, - 'name': self.project.name, - 'id': self.project.id - } - } - } - - identity_client = self.app.client_manager.identity + {"name": self.project.name} + ) + + self.identity_client.tokens.get_token_data.return_value = { + 'token': { + 'project': { + 'domain': {"id": self.project.domain_id}, + 'name': self.project.name, + 'id': self.project.id, + } + } + } + + identity_client = self.identity_client arglist = [ - "--domain", self.domain.id, + "--domain", + self.domain.id, project.name, ] verifylist = [ @@ -1469,13 +1495,14 @@ def test_project_show_with_domain(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - project_str = common._get_token_resource(identity_client, 'project', - parsed_args.project, - parsed_args.domain) + project_str = common._get_token_resource( + identity_client, 'project', parsed_args.project, parsed_args.domain + ) self.assertEqual(self.project.id, project_str) arglist = [ - "--domain", project.domain_id, + "--domain", + project.domain_id, project.name, ] verifylist = [ @@ -1484,7 +1511,7 @@ def test_project_show_with_domain(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - project_str = common._get_token_resource(identity_client, 'project', - parsed_args.project, - parsed_args.domain) + project_str = common._get_token_resource( + identity_client, 'project', parsed_args.project, parsed_args.domain + ) self.assertEqual(project.name, project_str) diff --git a/openstackclient/tests/unit/identity/v3/test_protocol.py b/openstackclient/tests/unit/identity/v3/test_protocol.py index 30b4aa4a11..c85699685a 100644 --- a/openstackclient/tests/unit/identity/v3/test_protocol.py +++ b/openstackclient/tests/unit/identity/v3/test_protocol.py @@ -20,19 +20,17 @@ class TestProtocol(identity_fakes.TestFederatedIdentity): - def setUp(self): - super(TestProtocol, self).setUp() + super().setUp() - federation_lib = self.app.client_manager.identity.federation + federation_lib = self.identity_client.federation self.protocols_mock = federation_lib.protocols self.protocols_mock.reset_mock() class TestProtocolCreate(TestProtocol): - def setUp(self): - super(TestProtocolCreate, self).setUp() + super().setUp() proto = copy.deepcopy(identity_fakes.PROTOCOL_OUTPUT) resource = fakes.FakeResource(None, proto, loaded=True) @@ -42,35 +40,39 @@ def setUp(self): def test_create_protocol(self): argslist = [ identity_fakes.protocol_id, - '--identity-provider', identity_fakes.idp_id, - '--mapping', identity_fakes.mapping_id + '--identity-provider', + identity_fakes.idp_id, + '--mapping', + identity_fakes.mapping_id, ] verifylist = [ ('federation_protocol', identity_fakes.protocol_id), ('identity_provider', identity_fakes.idp_id), - ('mapping', identity_fakes.mapping_id) + ('mapping', identity_fakes.mapping_id), ] parsed_args = self.check_parser(self.cmd, argslist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.protocols_mock.create.assert_called_with( protocol_id=identity_fakes.protocol_id, identity_provider=identity_fakes.idp_id, - mapping=identity_fakes.mapping_id) + mapping=identity_fakes.mapping_id, + ) collist = ('id', 'identity_provider', 'mapping') self.assertEqual(collist, columns) - datalist = (identity_fakes.protocol_id, - identity_fakes.idp_id, - identity_fakes.mapping_id) + datalist = ( + identity_fakes.protocol_id, + identity_fakes.idp_id, + identity_fakes.mapping_id, + ) self.assertEqual(datalist, data) class TestProtocolDelete(TestProtocol): - def setUp(self): - super(TestProtocolDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() self.protocols_mock.get.return_value = fakes.FakeResource( @@ -84,8 +86,9 @@ def setUp(self): def test_delete_identity_provider(self): arglist = [ - '--identity-provider', identity_fakes.idp_id, - identity_fakes.protocol_id + '--identity-provider', + identity_fakes.idp_id, + identity_fakes.protocol_id, ] verifylist = [ ('federation_protocol', [identity_fakes.protocol_id]), @@ -96,20 +99,24 @@ def test_delete_identity_provider(self): result = self.cmd.take_action(parsed_args) self.protocols_mock.delete.assert_called_with( - identity_fakes.idp_id, identity_fakes.protocol_id) + identity_fakes.idp_id, identity_fakes.protocol_id + ) self.assertIsNone(result) class TestProtocolList(TestProtocol): - def setUp(self): - super(TestProtocolList, self).setUp() + super().setUp() self.protocols_mock.get.return_value = fakes.FakeResource( - None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True) + None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True + ) - self.protocols_mock.list.return_value = [fakes.FakeResource( - None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True)] + self.protocols_mock.list.return_value = [ + fakes.FakeResource( + None, identity_fakes.PROTOCOL_ID_MAPPING, loaded=True + ) + ] self.cmd = federation_protocol.ListProtocols(self.app, None) @@ -124,65 +131,83 @@ def test_list_protocols(self): class TestProtocolSet(TestProtocol): - def setUp(self): - super(TestProtocolSet, self).setUp() + super().setUp() self.protocols_mock.get.return_value = fakes.FakeResource( - None, identity_fakes.PROTOCOL_OUTPUT, loaded=True) + None, identity_fakes.PROTOCOL_OUTPUT, loaded=True + ) self.protocols_mock.update.return_value = fakes.FakeResource( - None, identity_fakes.PROTOCOL_OUTPUT_UPDATED, loaded=True) + None, identity_fakes.PROTOCOL_OUTPUT_UPDATED, loaded=True + ) self.cmd = federation_protocol.SetProtocol(self.app, None) def test_set_new_mapping(self): arglist = [ identity_fakes.protocol_id, - '--identity-provider', identity_fakes.idp_id, - '--mapping', identity_fakes.mapping_id + '--identity-provider', + identity_fakes.idp_id, + '--mapping', + identity_fakes.mapping_id, + ] + verifylist = [ + ('identity_provider', identity_fakes.idp_id), + ('federation_protocol', identity_fakes.protocol_id), + ('mapping', identity_fakes.mapping_id), ] - verifylist = [('identity_provider', identity_fakes.idp_id), - ('federation_protocol', identity_fakes.protocol_id), - ('mapping', identity_fakes.mapping_id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.protocols_mock.update.assert_called_with( - identity_fakes.idp_id, identity_fakes.protocol_id, - identity_fakes.mapping_id) + identity_fakes.idp_id, + identity_fakes.protocol_id, + identity_fakes.mapping_id, + ) collist = ('id', 'identity_provider', 'mapping') self.assertEqual(collist, columns) - datalist = (identity_fakes.protocol_id, identity_fakes.idp_id, - identity_fakes.mapping_id_updated) + datalist = ( + identity_fakes.protocol_id, + identity_fakes.idp_id, + identity_fakes.mapping_id_updated, + ) self.assertEqual(datalist, data) class TestProtocolShow(TestProtocol): - def setUp(self): - super(TestProtocolShow, self).setUp() + super().setUp() self.protocols_mock.get.return_value = fakes.FakeResource( - None, identity_fakes.PROTOCOL_OUTPUT, loaded=False) + None, identity_fakes.PROTOCOL_OUTPUT, loaded=False + ) self.cmd = federation_protocol.ShowProtocol(self.app, None) def test_show_protocol(self): - arglist = [identity_fakes.protocol_id, '--identity-provider', - identity_fakes.idp_id] - verifylist = [('federation_protocol', identity_fakes.protocol_id), - ('identity_provider', identity_fakes.idp_id)] + arglist = [ + identity_fakes.protocol_id, + '--identity-provider', + identity_fakes.idp_id, + ] + verifylist = [ + ('federation_protocol', identity_fakes.protocol_id), + ('identity_provider', identity_fakes.idp_id), + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.protocols_mock.get.assert_called_with(identity_fakes.idp_id, - identity_fakes.protocol_id) + self.protocols_mock.get.assert_called_with( + identity_fakes.idp_id, identity_fakes.protocol_id + ) collist = ('id', 'identity_provider', 'mapping') self.assertEqual(collist, columns) - datalist = (identity_fakes.protocol_id, - identity_fakes.idp_id, - identity_fakes.mapping_id) + datalist = ( + identity_fakes.protocol_id, + identity_fakes.idp_id, + identity_fakes.mapping_id, + ) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_region.py b/openstackclient/tests/unit/identity/v3/test_region.py index e83a4e9f02..eecb079133 100644 --- a/openstackclient/tests/unit/identity/v3/test_region.py +++ b/openstackclient/tests/unit/identity/v3/test_region.py @@ -11,56 +11,44 @@ # under the License. # -import copy + +from openstack.identity.v3 import region as _region +from openstack.test import fakes as sdk_fakes from openstackclient.identity.v3 import region -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestRegion(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestRegion, self).setUp() - - # Get a shortcut to the RegionManager Mock - self.regions_mock = self.app.client_manager.identity.regions - self.regions_mock.reset_mock() - - -class TestRegionCreate(TestRegion): - +class TestRegionCreate(identity_fakes.TestIdentityv3): + region = sdk_fakes.generate_fake_resource(_region.Region) columns = ( + 'region', 'description', 'parent_region', - 'region', ) datalist = ( - identity_fakes.region_description, - identity_fakes.region_parent_region_id, - identity_fakes.region_id, + region.id, + region.description, + region.parent_region_id, ) def setUp(self): - super(TestRegionCreate, self).setUp() + super().setUp() - self.regions_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGION), - loaded=True, - ) + self.identity_sdk_client.create_region.return_value = self.region # Get the command object to test self.cmd = region.CreateRegion(self.app, None) def test_region_create_description(self): arglist = [ - identity_fakes.region_id, - '--description', identity_fakes.region_description, + self.region.id, + '--description', + self.region.description, ] verifylist = [ - ('region', identity_fakes.region_id), - ('description', identity_fakes.region_description) + ('region', self.region.id), + ('description', self.region.description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -71,23 +59,21 @@ def test_region_create_description(self): # Set expected values kwargs = { - 'description': identity_fakes.region_description, - 'id': identity_fakes.region_id, - 'parent_region': None, + 'description': self.region.description, + 'id': self.region.id, + 'parent_region_id': None, } - self.regions_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_region.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_region_create_no_options(self): arglist = [ - identity_fakes.region_id, + self.region.id, ] verifylist = [ - ('region', identity_fakes.region_id), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -99,24 +85,23 @@ def test_region_create_no_options(self): # Set expected values kwargs = { 'description': None, - 'id': identity_fakes.region_id, - 'parent_region': None, + 'id': self.region.id, + 'parent_region_id': None, } - self.regions_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_region.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_region_create_parent_region_id(self): arglist = [ - identity_fakes.region_id, - '--parent-region', identity_fakes.region_parent_region_id, + self.region.id, + '--parent-region', + self.region.parent_region_id, ] verifylist = [ - ('region', identity_fakes.region_id), - ('parent_region', identity_fakes.region_parent_region_id), + ('region', self.region.id), + ('parent_region', self.region.parent_region_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -128,45 +113,43 @@ def test_region_create_parent_region_id(self): # Set expected values kwargs = { 'description': None, - 'id': identity_fakes.region_id, - 'parent_region': identity_fakes.region_parent_region_id, + 'id': self.region.id, + 'parent_region_id': self.region.parent_region_id, } - self.regions_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_region.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) -class TestRegionDelete(TestRegion): - +class TestRegionDelete(identity_fakes.TestIdentityv3): def setUp(self): - super(TestRegionDelete, self).setUp() + super().setUp() - self.regions_mock.delete.return_value = None + self.region = sdk_fakes.generate_fake_resource(_region.Region) + self.identity_sdk_client.delete_region.return_value = None # Get the command object to test self.cmd = region.DeleteRegion(self.app, None) def test_region_delete_no_options(self): arglist = [ - identity_fakes.region_id, + self.region.id, ] verifylist = [ - ('region', [identity_fakes.region_id]), + ('region', [self.region.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.regions_mock.delete.assert_called_with( - identity_fakes.region_id, + self.identity_sdk_client.delete_region.assert_called_with( + self.region.id, ) self.assertIsNone(result) -class TestRegionList(TestRegion): - +class TestRegionList(identity_fakes.TestIdentityv3): + region = sdk_fakes.generate_fake_resource(_region.Region) columns = ( 'Region', 'Parent Region', @@ -174,22 +157,16 @@ class TestRegionList(TestRegion): ) datalist = ( ( - identity_fakes.region_id, - identity_fakes.region_parent_region_id, - identity_fakes.region_description, + region.id, + region.parent_region_id, + region.description, ), ) def setUp(self): - super(TestRegionList, self).setUp() - - self.regions_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGION), - loaded=True, - ), - ] + super().setUp() + + self.identity_sdk_client.regions.return_value = [self.region] # Get the command object to test self.cmd = region.ListRegion(self.app, None) @@ -203,17 +180,18 @@ def test_region_list_no_options(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.regions_mock.list.assert_called_with() + self.identity_sdk_client.regions.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) def test_region_list_parent_region_id(self): arglist = [ - '--parent-region', identity_fakes.region_parent_region_id, + '--parent-region', + self.region.parent_region_id, ] verifylist = [ - ('parent_region', identity_fakes.region_parent_region_id), + ('parent_region', self.region.parent_region_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -221,53 +199,49 @@ def test_region_list_parent_region_id(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.regions_mock.list.assert_called_with( - parent_region_id=identity_fakes.region_parent_region_id) + self.identity_sdk_client.regions.assert_called_with( + parent_region_id=self.region.parent_region_id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) -class TestRegionSet(TestRegion): - +class TestRegionSet(identity_fakes.TestIdentityv3): def setUp(self): - super(TestRegionSet, self).setUp() + super().setUp() - self.regions_mock.update.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGION), - loaded=True, - ) + self.region = sdk_fakes.generate_fake_resource(_region.Region) # Get the command object to test self.cmd = region.SetRegion(self.app, None) def test_region_set_no_options(self): arglist = [ - identity_fakes.region_id, + self.region.id, ] verifylist = [ - ('region', identity_fakes.region_id), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) kwargs = {} - self.regions_mock.update.assert_called_with( - identity_fakes.region_id, - **kwargs + self.identity_sdk_client.update_region.assert_called_with( + self.region.id, **kwargs ) self.assertIsNone(result) def test_region_set_description(self): arglist = [ - '--description', 'qwerty', - identity_fakes.region_id, + '--description', + 'qwerty', + self.region.id, ] verifylist = [ ('description', 'qwerty'), - ('region', identity_fakes.region_id), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -277,20 +251,20 @@ def test_region_set_description(self): kwargs = { 'description': 'qwerty', } - self.regions_mock.update.assert_called_with( - identity_fakes.region_id, - **kwargs + self.identity_sdk_client.update_region.assert_called_with( + self.region.id, **kwargs ) self.assertIsNone(result) def test_region_set_parent_region_id(self): arglist = [ - '--parent-region', 'new_parent', - identity_fakes.region_id, + '--parent-region', + 'new_parent', + self.region.id, ] verifylist = [ ('parent_region', 'new_parent'), - ('region', identity_fakes.region_id), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -298,35 +272,30 @@ def test_region_set_parent_region_id(self): # Set expected values kwargs = { - 'parent_region': 'new_parent', + 'parent_region_id': 'new_parent', } - self.regions_mock.update.assert_called_with( - identity_fakes.region_id, - **kwargs + self.identity_sdk_client.update_region.assert_called_with( + self.region.id, **kwargs ) self.assertIsNone(result) -class TestRegionShow(TestRegion): - +class TestRegionShow(identity_fakes.TestIdentityv3): def setUp(self): - super(TestRegionShow, self).setUp() + super().setUp() - self.regions_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGION), - loaded=True, - ) + self.region = sdk_fakes.generate_fake_resource(_region.Region) + self.identity_sdk_client.get_region.return_value = self.region # Get the command object to test self.cmd = region.ShowRegion(self.app, None) def test_region_show(self): arglist = [ - identity_fakes.region_id, + self.region.id, ] verifylist = [ - ('region', identity_fakes.region_id), + ('region', self.region.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -334,15 +303,15 @@ def test_region_show(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.regions_mock.get.assert_called_with( - identity_fakes.region_id, + self.identity_sdk_client.get_region.assert_called_with( + self.region.id, ) - collist = ('description', 'parent_region', 'region') + collist = ('region', 'description', 'parent_region') self.assertEqual(collist, columns) datalist = ( - identity_fakes.region_description, - identity_fakes.region_parent_region_id, - identity_fakes.region_id, + self.region.id, + self.region.description, + self.region.parent_region_id, ) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_registered_limit.py b/openstackclient/tests/unit/identity/v3/test_registered_limit.py index 262ca4f98f..a120714ec6 100644 --- a/openstackclient/tests/unit/identity/v3/test_registered_limit.py +++ b/openstackclient/tests/unit/identity/v3/test_registered_limit.py @@ -21,36 +21,29 @@ class TestRegisteredLimit(identity_fakes.TestIdentityv3): - def setUp(self): - super(TestRegisteredLimit, self).setUp() + super().setUp() - identity_manager = self.app.client_manager.identity - self.registered_limit_mock = identity_manager.registered_limits + self.registered_limit_mock = self.identity_client.registered_limits - self.services_mock = identity_manager.services + self.services_mock = self.identity_client.services self.services_mock.reset_mock() - self.regions_mock = identity_manager.regions + self.regions_mock = self.identity_client.regions self.regions_mock.reset_mock() class TestRegisteredLimitCreate(TestRegisteredLimit): - def setUp(self): - super(TestRegisteredLimitCreate, self).setUp() + super().setUp() self.service = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.SERVICE), - loaded=True + None, copy.deepcopy(identity_fakes.SERVICE), loaded=True ) self.services_mock.get.return_value = self.service self.region = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGION), - loaded=True + None, copy.deepcopy(identity_fakes.REGION), loaded=True ) self.regions_mock.get.return_value = self.region @@ -58,23 +51,23 @@ def setUp(self): def test_registered_limit_create_without_options(self): self.registered_limit_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGISTERED_LIMIT), - loaded=True + None, copy.deepcopy(identity_fakes.REGISTERED_LIMIT), loaded=True ) resource_name = identity_fakes.registered_limit_resource_name default_limit = identity_fakes.registered_limit_default_limit arglist = [ - '--service', identity_fakes.service_id, - '--default-limit', '10', + '--service', + identity_fakes.service_id, + '--default-limit', + '10', resource_name, ] verifylist = [ ('service', identity_fakes.service_id), ('default_limit', default_limit), - ('resource_name', resource_name) + ('resource_name', resource_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -85,8 +78,14 @@ def test_registered_limit_create_without_options(self): self.service, resource_name, default_limit, **kwargs ) - collist = ('default_limit', 'description', 'id', 'region_id', - 'resource_name', 'service_id') + collist = ( + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', + ) self.assertEqual(collist, columns) datalist = ( @@ -95,7 +94,7 @@ def test_registered_limit_create_without_options(self): identity_fakes.registered_limit_id, None, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) @@ -103,18 +102,22 @@ def test_registered_limit_create_with_options(self): self.registered_limit_mock.create.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.REGISTERED_LIMIT_OPTIONS), - loaded=True + loaded=True, ) resource_name = identity_fakes.registered_limit_resource_name default_limit = identity_fakes.registered_limit_default_limit description = identity_fakes.registered_limit_description arglist = [ - '--region', identity_fakes.region_id, - '--description', description, - '--service', identity_fakes.service_id, - '--default-limit', '10', - resource_name + '--region', + identity_fakes.region_id, + '--description', + description, + '--service', + identity_fakes.service_id, + '--default-limit', + '10', + resource_name, ] verifylist = [ @@ -122,7 +125,7 @@ def test_registered_limit_create_with_options(self): ('description', description), ('service', identity_fakes.service_id), ('default_limit', default_limit), - ('resource_name', resource_name) + ('resource_name', resource_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -133,8 +136,14 @@ def test_registered_limit_create_with_options(self): self.service, resource_name, default_limit, **kwargs ) - collist = ('default_limit', 'description', 'id', 'region_id', - 'resource_name', 'service_id') + collist = ( + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', + ) self.assertEqual(collist, columns) datalist = ( @@ -143,15 +152,14 @@ def test_registered_limit_create_with_options(self): identity_fakes.registered_limit_id, identity_fakes.region_id, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) class TestRegisteredLimitDelete(TestRegisteredLimit): - def setUp(self): - super(TestRegisteredLimitDelete, self).setUp() + super().setUp() self.cmd = registered_limit.DeleteRegisteredLimit(self.app, None) @@ -160,7 +168,7 @@ def test_registered_limit_delete(self): arglist = [identity_fakes.registered_limit_id] verifylist = [ - ('registered_limit_id', [identity_fakes.registered_limit_id]) + ('registered_limits', [identity_fakes.registered_limit_id]) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -176,9 +184,7 @@ def test_registered_limit_delete_with_exception(self): self.registered_limit_mock.delete.side_effect = return_value arglist = ['fake-registered-limit-id'] - verifylist = [ - ('registered_limit_id', ['fake-registered-limit-id']) - ] + verifylist = [('registered_limits', ['fake-registered-limit-id'])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) try: @@ -191,14 +197,11 @@ def test_registered_limit_delete_with_exception(self): class TestRegisteredLimitShow(TestRegisteredLimit): - def setUp(self): - super(TestRegisteredLimitShow, self).setUp() + super().setUp() self.registered_limit_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGISTERED_LIMIT), - loaded=True + None, copy.deepcopy(identity_fakes.REGISTERED_LIMIT), loaded=True ) self.cmd = registered_limit.ShowRegisteredLimit(self.app, None) @@ -217,8 +220,12 @@ def test_registered_limit_show(self): ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -227,15 +234,14 @@ def test_registered_limit_show(self): identity_fakes.registered_limit_id, None, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) class TestRegisteredLimitSet(TestRegisteredLimit): - def setUp(self): - super(TestRegisteredLimitSet, self).setUp() + super().setUp() self.cmd = registered_limit.SetRegisteredLimit(self.app, None) def test_registered_limit_set_description(self): @@ -248,12 +254,13 @@ def test_registered_limit_set_description(self): ) arglist = [ - '--description', identity_fakes.registered_limit_description, - identity_fakes.registered_limit_id + '--description', + identity_fakes.registered_limit_description, + identity_fakes.registered_limit_id, ] verifylist = [ ('description', identity_fakes.registered_limit_description), - ('registered_limit_id', identity_fakes.registered_limit_id) + ('registered_limit_id', identity_fakes.registered_limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -265,12 +272,16 @@ def test_registered_limit_set_description(self): resource_name=None, default_limit=None, description=identity_fakes.registered_limit_description, - region=None + region=None, ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -279,7 +290,7 @@ def test_registered_limit_set_description(self): identity_fakes.registered_limit_id, None, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) @@ -292,12 +303,13 @@ def test_registered_limit_set_default_limit(self): ) arglist = [ - '--default-limit', str(default_limit), - identity_fakes.registered_limit_id + '--default-limit', + str(default_limit), + identity_fakes.registered_limit_id, ] verifylist = [ ('default_limit', default_limit), - ('registered_limit_id', identity_fakes.registered_limit_id) + ('registered_limit_id', identity_fakes.registered_limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -309,12 +321,16 @@ def test_registered_limit_set_default_limit(self): resource_name=None, default_limit=default_limit, description=None, - region=None + region=None, ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -323,7 +339,7 @@ def test_registered_limit_set_default_limit(self): identity_fakes.registered_limit_id, None, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) @@ -336,12 +352,13 @@ def test_registered_limit_set_resource_name(self): ) arglist = [ - '--resource-name', resource_name, - identity_fakes.registered_limit_id + '--resource-name', + resource_name, + identity_fakes.registered_limit_id, ] verifylist = [ ('resource_name', resource_name), - ('registered_limit_id', identity_fakes.registered_limit_id) + ('registered_limit_id', identity_fakes.registered_limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -353,12 +370,16 @@ def test_registered_limit_set_resource_name(self): resource_name=resource_name, default_limit=None, description=None, - region=None + region=None, ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -367,7 +388,7 @@ def test_registered_limit_set_resource_name(self): identity_fakes.registered_limit_id, None, resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) @@ -380,13 +401,10 @@ def test_registered_limit_set_service(self): ) self.services_mock.get.return_value = service - arglist = [ - '--service', service.id, - identity_fakes.registered_limit_id - ] + arglist = ['--service', service.id, identity_fakes.registered_limit_id] verifylist = [ ('service', service.id), - ('registered_limit_id', identity_fakes.registered_limit_id) + ('registered_limit_id', identity_fakes.registered_limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -398,12 +416,16 @@ def test_registered_limit_set_service(self): resource_name=None, default_limit=None, description=None, - region=None + region=None, ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -412,7 +434,7 @@ def test_registered_limit_set_service(self): identity_fakes.registered_limit_id, None, identity_fakes.registered_limit_resource_name, - service.id + service.id, ) self.assertEqual(datalist, data) @@ -420,24 +442,17 @@ def test_registered_limit_set_region(self): registered_limit = copy.deepcopy(identity_fakes.REGISTERED_LIMIT) region = identity_fakes.REGION region['id'] = 'RegionTwo' - region = fakes.FakeResource( - None, - copy.deepcopy(region), - loaded=True - ) + region = fakes.FakeResource(None, copy.deepcopy(region), loaded=True) registered_limit['region_id'] = region.id self.registered_limit_mock.update.return_value = fakes.FakeResource( None, registered_limit, loaded=True ) self.regions_mock.get.return_value = region - arglist = [ - '--region', region.id, - identity_fakes.registered_limit_id - ] + arglist = ['--region', region.id, identity_fakes.registered_limit_id] verifylist = [ ('region', region.id), - ('registered_limit_id', identity_fakes.registered_limit_id) + ('registered_limit_id', identity_fakes.registered_limit_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -449,12 +464,16 @@ def test_registered_limit_set_region(self): resource_name=None, default_limit=None, description=None, - region=region + region=region, ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -463,20 +482,17 @@ def test_registered_limit_set_region(self): identity_fakes.registered_limit_id, region.id, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) class TestRegisteredLimitList(TestRegisteredLimit): - def setUp(self): - super(TestRegisteredLimitList, self).setUp() + super().setUp() self.registered_limit_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.REGISTERED_LIMIT), - loaded=True + None, copy.deepcopy(identity_fakes.REGISTERED_LIMIT), loaded=True ) self.cmd = registered_limit.ShowRegisteredLimit(self.app, None) @@ -495,8 +511,12 @@ def test_limit_show(self): ) collist = ( - 'default_limit', 'description', 'id', 'region_id', 'resource_name', - 'service_id' + 'default_limit', + 'description', + 'id', + 'region_id', + 'resource_name', + 'service_id', ) self.assertEqual(collist, columns) datalist = ( @@ -505,6 +525,6 @@ def test_limit_show(self): identity_fakes.registered_limit_id, None, identity_fakes.registered_limit_resource_name, - identity_fakes.service_id + identity_fakes.service_id, ) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_role.py b/openstackclient/tests/unit/identity/v3/test_role.py index 774b2c2b5f..90b2d7121c 100644 --- a/openstackclient/tests/unit/identity/v3/test_role.py +++ b/openstackclient/tests/unit/identity/v3/test_role.py @@ -13,111 +13,96 @@ # under the License. # -import copy from unittest import mock +from openstack import exceptions as sdk_exc +from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import group as _group +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import role as _role +from openstack.identity.v3 import system as _system +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions -from osc_lib import utils -from openstackclient.identity import common from openstackclient.identity.v3 import role -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestRole(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestRole, self).setUp() - - # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - - # Get a shortcut to the UserManager Mock - self.groups_mock = self.app.client_manager.identity.groups - self.groups_mock.reset_mock() - - # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains - self.domains_mock.reset_mock() - - # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - - # Get a shortcut to the RoleManager Mock - self.roles_mock = self.app.client_manager.identity.roles - self.roles_mock.reset_mock() - +class TestRoleInherited(identity_fakes.TestIdentityv3): def _is_inheritance_testcase(self): - return False - + return True -class TestRoleInherited(TestRole): +class TestRoleAdd(identity_fakes.TestIdentityv3): def _is_inheritance_testcase(self): - return True - + return False -class TestRoleAdd(TestRole): + user = sdk_fakes.generate_fake_resource(_user.User) + group = sdk_fakes.generate_fake_resource(_group.Group) + domain = sdk_fakes.generate_fake_resource(_domain.Domain) + project = sdk_fakes.generate_fake_resource(_project.Project) def setUp(self): - super(TestRoleAdd, self).setUp() + super().setUp() - self.users_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.USER), - loaded=True, - ) + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_domain.return_value = self.domain + self.identity_sdk_client.find_project.return_value = self.project - self.groups_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.GROUP), - loaded=True, + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, ) - - self.domains_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.DOMAIN), - loaded=True, + self.identity_sdk_client.find_role.return_value = self.role + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, ) - self.projects_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.PROJECT), - loaded=True, + self.identity_sdk_client.assign_domain_role_to_user.return_value = ( + self.role ) - - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, + self.identity_sdk_client.assign_domain_role_to_group.return_value = ( + self.role ) - self.roles_mock.grant.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, + self.identity_sdk_client.assign_project_role_to_user.return_value = ( + self.role + ) + self.identity_sdk_client.assign_project_role_to_group.return_value = ( + self.role + ) + self.identity_sdk_client.assign_system_role_to_user.return_value = ( + self.role + ) + self.identity_sdk_client.assign_system_role_to_group.return_value = ( + self.role ) # Get the command object to test self.cmd = role.AddRole(self.app, None) - def test_role_add_user_system(self): + @mock.patch.object(role.LOG, 'warning') + def test_role_add_user_system(self, mock_warning): arglist = [ - '--user', identity_fakes.user_name, - '--system', 'all', - identity_fakes.role_name, + '--user', + self.user.name, + '--system', + 'all', + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), ('system', 'all'), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -126,31 +111,36 @@ def test_role_add_user_system(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, 'system': 'all', - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'user': self.user.id, + 'role': self.role.id, } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.assign_system_role_to_user.assert_called_with( **kwargs ) self.assertIsNone(result) + if self._is_inheritance_testcase(): + mock_warning.assert_called_with( + "'--inherited' was given, which is not supported when adding a system role; this will be an error in a future release" + ) + def test_role_add_user_domain(self): arglist = [ - '--user', identity_fakes.user_name, - '--domain', identity_fakes.domain_name, - identity_fakes.role_name, + '--user', + self.user.name, + '--domain', + self.domain.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), - ('domain', identity_fakes.domain_name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -159,31 +149,32 @@ def test_role_add_user_domain(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'domain': self.domain.id, + 'user': self.user.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.assign_domain_role_to_user.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_add_user_project(self): arglist = [ - '--user', identity_fakes.user_name, - '--project', identity_fakes.project_name, - identity_fakes.role_name, + '--user', + self.user.name, + '--project', + self.project.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.role_name), + ('project', self.project.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -192,32 +183,34 @@ def test_role_add_user_project(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'project': self.project.id, + 'user': self.user.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.assign_project_role_to_user.assert_called_with( **kwargs ) self.assertIsNone(result) - def test_role_add_group_system(self): + @mock.patch.object(role.LOG, 'warning') + def test_role_add_group_system(self, mock_warning): arglist = [ - '--group', identity_fakes.group_name, - '--system', 'all', - identity_fakes.role_name, + '--group', + self.group.name, + '--system', + 'all', + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), + ('group', self.group.name), ('system', 'all'), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -226,31 +219,36 @@ def test_role_add_group_system(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, 'system': 'all', - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'group': self.group.id, + 'role': self.role.id, } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.assign_system_role_to_group.assert_called_with( **kwargs ) self.assertIsNone(result) + if self._is_inheritance_testcase(): + mock_warning.assert_called_with( + "'--inherited' was given, which is not supported when adding a system role; this will be an error in a future release" + ) + def test_role_add_group_domain(self): arglist = [ - '--group', identity_fakes.group_name, - '--domain', identity_fakes.domain_name, - identity_fakes.role_name, + '--group', + self.group.name, + '--domain', + self.domain.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), - ('domain', identity_fakes.domain_name), + ('group', self.group.name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -259,31 +257,32 @@ def test_role_add_group_domain(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'domain': self.domain.id, + 'group': self.group.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.assign_domain_role_to_group.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_add_group_project(self): arglist = [ - '--group', identity_fakes.group_name, - '--project', identity_fakes.project_name, - identity_fakes.role_name, + '--group', + self.group.name, + '--project', + self.project.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), + ('group', self.group.name), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.role_name), + ('project', self.project.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -292,37 +291,36 @@ def test_role_add_group_project(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'project': self.project.id, + 'group': self.group.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.assign_project_role_to_group.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_add_domain_role_on_user_project(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ) + self.identity_sdk_client.find_role.return_value = self.role_with_domain + arglist = [ - '--user', identity_fakes.user_name, - '--project', identity_fakes.project_name, - '--role-domain', identity_fakes.domain_name, - identity_fakes.ROLE_2['name'], + '--user', + self.user.name, + '--project', + self.project.name, + '--role-domain', + self.domain.name, + self.role_with_domain.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.ROLE_2['name']), + ('project', self.project.name), + ('role', self.role_with_domain.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -331,64 +329,79 @@ def test_role_add_domain_role_on_user_project(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'project': self.project.id, + 'user': self.user.id, + 'role': self.role_with_domain.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.grant(role, user=, group=, domain=, project=) - self.roles_mock.grant.assert_called_with( - identity_fakes.ROLE_2['id'], + self.identity_sdk_client.assign_project_role_to_user.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_add_with_error(self): arglist = [ - identity_fakes.role_name, + self.role.name, ] verifylist = [ ('user', None), ('group', None), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestRoleAddInherited(TestRoleAdd, TestRoleInherited): pass -class TestRoleCreate(TestRole): +class TestRoleCreate(identity_fakes.TestIdentityv3): + collist = ('id', 'name', 'domain_id', 'description') + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestRoleCreate, self).setUp() + super().setUp() - self.domains_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.DOMAIN), - loaded=True, + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, ) - - self.roles_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, + ) + self.role_with_description = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description='role description', + ) + self.role_with_immutable_option = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + options={'immutable': True}, ) + self.identity_sdk_client.find_domain.return_value = self.domain # Get the command object to test self.cmd = role.CreateRole(self.app, None) def test_role_create_no_options(self): + self.identity_sdk_client.create_role.return_value = self.role + arglist = [ - identity_fakes.role_name, + self.role.name, ] verifylist = [ - ('name', identity_fakes.role_name), + ('name', self.role.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -399,41 +412,33 @@ def test_role_create_no_options(self): # Set expected values kwargs = { - 'domain': None, - 'name': identity_fakes.role_name, - 'description': None, - 'options': {}, + 'name': self.role.name, } - # RoleManager.create(name=, domain=) - self.roles_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_role.assert_called_with(**kwargs) - collist = ('domain', 'id', 'name') - self.assertEqual(collist, columns) + self.assertEqual(self.collist, columns) datalist = ( + self.role.id, + self.role.name, + None, None, - identity_fakes.role_id, - identity_fakes.role_name, ) self.assertEqual(datalist, data) def test_role_create_with_domain(self): - - self.roles_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, + self.identity_sdk_client.create_role.return_value = ( + self.role_with_domain ) arglist = [ - '--domain', identity_fakes.domain_name, - identity_fakes.ROLE_2['name'], + '--domain', + self.domain.name, + self.role_with_domain.name, ] verifylist = [ - ('domain', identity_fakes.domain_name), - ('name', identity_fakes.ROLE_2['name']), + ('domain', self.domain.name), + ('name', self.role_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -444,40 +449,34 @@ def test_role_create_with_domain(self): # Set expected values kwargs = { - 'domain': identity_fakes.domain_id, - 'name': identity_fakes.ROLE_2['name'], - 'description': None, - 'options': {}, + 'domain_id': self.domain.id, + 'name': self.role_with_domain.name, } - # RoleManager.create(name=, domain=) - self.roles_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_role.assert_called_with(**kwargs) - collist = ('domain', 'id', 'name') - self.assertEqual(collist, columns) + self.assertEqual(self.collist, columns) datalist = ( - identity_fakes.domain_id, - identity_fakes.ROLE_2['id'], - identity_fakes.ROLE_2['name'], + self.role_with_domain.id, + self.role_with_domain.name, + self.domain.id, + None, ) self.assertEqual(datalist, data) def test_role_create_with_description(self): - - self.roles_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, + self.identity_sdk_client.create_role.return_value = ( + self.role_with_description ) + arglist = [ - '--description', identity_fakes.role_description, - identity_fakes.ROLE_2['name'], + '--description', + self.role_with_description.description, + self.role_with_description.name, ] verifylist = [ - ('description', identity_fakes.role_description), - ('name', identity_fakes.ROLE_2['name']), + ('description', self.role_with_description.description), + ('name', self.role_with_description.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -488,40 +487,31 @@ def test_role_create_with_description(self): # Set expected values kwargs = { - 'description': identity_fakes.role_description, - 'name': identity_fakes.ROLE_2['name'], - 'domain': None, - 'options': {}, + 'name': self.role_with_description.name, + 'description': self.role_with_description.description, } - # RoleManager.create(name=, domain=) - self.roles_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_role.assert_called_with(**kwargs) - collist = ('domain', 'id', 'name') - self.assertEqual(collist, columns) + self.assertEqual(self.collist, columns) datalist = ( - 'd1', - identity_fakes.ROLE_2['id'], - identity_fakes.ROLE_2['name'], + self.role_with_description.id, + self.role_with_description.name, + None, + self.role_with_description.description, ) self.assertEqual(datalist, data) def test_role_create_with_immutable_option(self): + self.identity_sdk_client.create_role.return_value = self.role - self.roles_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ) arglist = [ '--immutable', - identity_fakes.ROLE_2['name'], + self.role.name, ] verifylist = [ ('immutable', True), - ('name', identity_fakes.ROLE_2['name']), + ('name', self.role.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -532,41 +522,31 @@ def test_role_create_with_immutable_option(self): # Set expected values kwargs = { - 'options': {'immutable': True}, - 'description': None, - 'name': identity_fakes.ROLE_2['name'], - 'domain': None, + 'name': self.role.name, } - # RoleManager.create(name=, domain=) - self.roles_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_role.assert_called_with(**kwargs) - collist = ('domain', 'id', 'name') - self.assertEqual(collist, columns) + self.assertEqual(self.collist, columns) datalist = ( - 'd1', - identity_fakes.ROLE_2['id'], - identity_fakes.ROLE_2['name'], + self.role.id, + self.role.name, + None, + None, ) self.assertEqual(datalist, data) def test_role_create_with_no_immutable_option(self): + self.identity_sdk_client.create_role.return_value = self.role - self.roles_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ) arglist = [ '--no-immutable', - identity_fakes.ROLE_2['name'], + self.role.name, ] verifylist = [ - ('no_immutable', True), - ('name', identity_fakes.ROLE_2['name']), + ('immutable', False), + ('name', self.role.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -577,90 +557,95 @@ def test_role_create_with_no_immutable_option(self): # Set expected values kwargs = { - 'options': {'immutable': False}, - 'description': None, - 'name': identity_fakes.ROLE_2['name'], - 'domain': None, + 'name': self.role.name, } - # RoleManager.create(name=, domain=) - self.roles_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_role.assert_called_with(**kwargs) - collist = ('domain', 'id', 'name') - self.assertEqual(collist, columns) + self.assertEqual(self.collist, columns) datalist = ( - 'd1', - identity_fakes.ROLE_2['id'], - identity_fakes.ROLE_2['name'], + self.role.id, + self.role.name, + None, + None, ) self.assertEqual(datalist, data) -class TestRoleDelete(TestRole): - +class TestRoleDelete(identity_fakes.TestIdentityv3): def setUp(self): - super(TestRoleDelete, self).setUp() - - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, - ) - self.roles_mock.delete.return_value = None + super().setUp() # Get the command object to test self.cmd = role.DeleteRole(self.app, None) def test_role_delete_no_options(self): + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + ) + self.identity_sdk_client.find_role.return_value = self.role + self.identity_sdk_client.delete_role.return_value = None + arglist = [ - identity_fakes.role_name, + self.role.name, ] verifylist = [ - ('roles', [identity_fakes.role_name]), + ('roles', [self.role.name]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.roles_mock.delete.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.delete_role.assert_called_with( + role=self.role.id, + ignore_missing=False, ) self.assertIsNone(result) def test_role_delete_with_domain(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, + self.domain = sdk_fakes.generate_fake_resource(_domain.Domain) + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, ) - self.roles_mock.delete.return_value = None + self.identity_sdk_client.find_role.return_value = self.role_with_domain + self.identity_sdk_client.delete_role.return_value = None arglist = [ - '--domain', identity_fakes.domain_name, - identity_fakes.ROLE_2['name'], + '--domain', + self.domain.name, + self.role_with_domain.name, ] verifylist = [ - ('roles', [identity_fakes.ROLE_2['name']]), - ('domain', identity_fakes.domain_name), + ('roles', [self.role_with_domain.name]), + ('domain', self.domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.roles_mock.delete.assert_called_with( - identity_fakes.ROLE_2['id'], + self.identity_sdk_client.delete_role.assert_called_with( + role=self.role_with_domain.id, + ignore_missing=False, ) self.assertIsNone(result) - @mock.patch.object(utils, 'find_resource') - def test_delete_multi_roles_with_exception(self, find_mock): - find_mock.side_effect = [self.roles_mock.get.return_value, - exceptions.CommandError] + def test_delete_multi_roles_with_exception(self): + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + ) + self.identity_sdk_client.find_role.side_effect = [ + self.role, + sdk_exc.ResourceNotFound, + ] arglist = [ - identity_fakes.role_name, + self.role.id, 'unexist_role', ] verifylist = [ @@ -672,48 +657,64 @@ def test_delete_multi_roles_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 roles failed to delete.', - str(e)) + self.assertEqual('1 of 2 roles failed to delete.', str(e)) - find_mock.assert_any_call(self.roles_mock, - identity_fakes.role_name, - domain_id=None) - find_mock.assert_any_call(self.roles_mock, - 'unexist_role', - domain_id=None) - - self.assertEqual(2, find_mock.call_count) - self.roles_mock.delete.assert_called_once_with(identity_fakes.role_id) + self.identity_sdk_client.find_role.assert_has_calls( + [ + mock.call( + name_or_id=self.role.id, + ignore_missing=False, + domain_id=None, + ), + mock.call( + name_or_id='unexist_role', + ignore_missing=False, + domain_id=None, + ), + ] + ) + self.assertEqual(2, self.identity_sdk_client.find_role.call_count) + self.identity_sdk_client.delete_role.assert_called_once_with( + role=self.role.id, ignore_missing=False + ) -class TestRoleList(TestRole): +class TestRoleList(identity_fakes.TestIdentityv3): columns = ( 'ID', 'Name', ) - datalist = ( - ( - identity_fakes.role_id, - identity_fakes.role_name, - ), - ) + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestRoleList, self).setUp() + super().setUp() - self.roles_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, - ), + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + ) + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, + ) + self.identity_sdk_client.roles.return_value = [ + self.role, + self.role_with_domain, ] + self.identity_sdk_client.find_domain.return_value = self.domain - self.domains_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.DOMAIN), - loaded=True, + self.datalist = ( + ( + self.role.id, + self.role.name, + ), + ( + self.role_with_domain.id, + self.role_with_domain.name, + ), ) # Get the command object to test @@ -729,24 +730,19 @@ def test_role_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.roles_mock.list.assert_called_with() + self.identity_sdk_client.roles.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) def test_role_list_domain_role(self): - self.roles_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ), - ] + self.identity_sdk_client.roles.return_value = [self.role_with_domain] arglist = [ - '--domain', identity_fakes.domain_name, + '--domain', + self.domain.name, ] verifylist = [ - ('domain', identity_fakes.domain_name), + ('domain', self.domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -756,78 +752,72 @@ def test_role_list_domain_role(self): columns, data = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - 'domain_id': identity_fakes.domain_id - } - # RoleManager.list(user=, group=, domain=, project=, **kwargs) - self.roles_mock.list.assert_called_with( - **kwargs - ) + kwargs = {'domain_id': self.domain.id} + self.identity_sdk_client.roles.assert_called_with(**kwargs) collist = ('ID', 'Name', 'Domain') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.ROLE_2['id'], - identity_fakes.ROLE_2['name'], - identity_fakes.domain_name, - ), ) - self.assertEqual(datalist, tuple(data)) - - -class TestRoleRemove(TestRole): - - def setUp(self): - super(TestRoleRemove, self).setUp() - - self.users_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.USER), - loaded=True, + datalist = ( + ( + self.role_with_domain.id, + self.role_with_domain.name, + self.domain.name, + ), ) + self.assertEqual(datalist, tuple(data)) - self.groups_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.GROUP), - loaded=True, - ) - self.domains_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.DOMAIN), - loaded=True, - ) +class TestRoleRemove(identity_fakes.TestIdentityv3): + def _is_inheritance_testcase(self): + return False - self.projects_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.PROJECT), - loaded=True, - ) + user = sdk_fakes.generate_fake_resource(_user.User) + group = sdk_fakes.generate_fake_resource(_group.Group) + domain = sdk_fakes.generate_fake_resource(_domain.Domain) + project = sdk_fakes.generate_fake_resource(_project.Project) + system = sdk_fakes.generate_fake_resource(_system.System) - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, - ) - self.roles_mock.revoke.return_value = None + def setUp(self): + super().setUp() + + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + ) + self.identity_sdk_client.find_role.return_value = self.role + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_domain.return_value = self.domain + self.identity_sdk_client.find_project.return_value = self.project + + self.identity_sdk_client.unassign_domain_role_from_user.return_value = None + self.identity_sdk_client.unassign_domain_role_from_group.return_value = None + self.identity_sdk_client.unassign_project_role_from_user.return_value = None + self.identity_sdk_client.unassign_project_role_from_group.return_value = None + self.identity_sdk_client.unassign_system_role_from_user.return_value = None + self.identity_sdk_client.unassign_system_role_from_group.return_value = None # Get the command object to test self.cmd = role.RemoveRole(self.app, None) def test_role_remove_user_system(self): arglist = [ - '--user', identity_fakes.user_name, - '--system', 'all', - identity_fakes.role_name + '--user', + self.user.name, + '--system', + 'all', + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), ('system', 'all'), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -836,39 +826,39 @@ def test_role_remove_user_system(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, + 'user': self.user.id, 'system': 'all', - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'role': self.role.id, } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_system_role_from_user.assert_called_with( **kwargs ) self.assertIsNone(result) - @mock.patch.object(common, 'find_user') - def test_role_remove_non_existent_user_system(self, find_mock): - # Simulate the user not being in keystone, the client should gracefully + def test_role_remove_non_existent_user_system(self): + # Simulate the user not being in keystone; the client should gracefully # handle this exception and send the request to remove the role since # keystone supports removing role assignments with non-existent actors # (e.g., users or groups). - find_mock.side_effect = exceptions.CommandError - + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ResourceNotFound, + ] arglist = [ - '--user', identity_fakes.user_id, - '--system', 'all', - identity_fakes.role_name + '--user', + self.user.id, + '--system', + 'all', + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_id), + ('user', self.user.id), ('group', None), ('system', 'all'), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -877,31 +867,31 @@ def test_role_remove_non_existent_user_system(self, find_mock): # Set expected values kwargs = { - 'user': identity_fakes.user_id, + 'user': self.user.id, 'system': 'all', - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'role': self.role.id, } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_system_role_from_user.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_user_domain(self): arglist = [ - '--user', identity_fakes.user_name, - '--domain', identity_fakes.domain_name, - identity_fakes.role_name, + '--user', + self.user.name, + '--domain', + self.domain.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), - ('domain', identity_fakes.domain_name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -910,38 +900,39 @@ def test_role_remove_user_domain(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'user': self.user.id, + 'domain': self.domain.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_domain_role_from_user.assert_called_with( **kwargs ) self.assertIsNone(result) - @mock.patch.object(common, 'find_user') - def test_role_remove_non_existent_user_domain(self, find_mock): + def test_role_remove_non_existent_user_domain(self): # Simulate the user not being in keystone, the client the gracefully # handle this exception and send the request to remove the role since # keystone will validate. - find_mock.side_effect = exceptions.CommandError - + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ResourceNotFound, + ] arglist = [ - '--user', identity_fakes.user_id, - '--domain', identity_fakes.domain_name, - identity_fakes.role_name + '--user', + self.user.id, + '--domain', + self.domain.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_id), + ('user', self.user.id), ('group', None), ('system', None), - ('domain', identity_fakes.domain_name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -950,31 +941,32 @@ def test_role_remove_non_existent_user_domain(self, find_mock): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'user': self.user.id, + 'domain': self.domain.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_domain_role_from_user.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_user_project(self): arglist = [ - '--user', identity_fakes.user_name, - '--project', identity_fakes.project_name, - identity_fakes.role_name, + '--user', + self.user.name, + '--project', + self.project.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.name), ('group', None), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.role_name), + ('project', self.project.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -983,38 +975,40 @@ def test_role_remove_user_project(self): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'user': self.user.id, + 'project': self.project.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_project_role_from_user.assert_called_with( **kwargs ) self.assertIsNone(result) - @mock.patch.object(common, 'find_user') - def test_role_remove_non_existent_user_project(self, find_mock): + def test_role_remove_non_existent_user_project(self): # Simulate the user not being in keystone, the client the gracefully # handle this exception and send the request to remove the role since # keystone will validate. - find_mock.side_effect = exceptions.CommandError + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ResourceNotFound, + ] arglist = [ - '--user', identity_fakes.user_id, - '--project', identity_fakes.project_name, - identity_fakes.role_name + '--user', + self.user.id, + '--project', + self.project.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ - ('user', identity_fakes.user_id), + ('user', self.user.id), ('group', None), ('system', None), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.role_name), + ('project', self.project.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1023,33 +1017,34 @@ def test_role_remove_non_existent_user_project(self, find_mock): # Set expected values kwargs = { - 'user': identity_fakes.user_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'user': self.user.id, + 'project': self.project.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_project_role_from_user.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_group_system(self): arglist = [ - '--group', identity_fakes.group_name, - '--system', 'all', - identity_fakes.role_name, + '--group', + self.group.name, + '--system', + 'all', + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), + ('group', self.group.name), ('system', 'all'), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), - ('role', identity_fakes.role_name), + ('role', self.role.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1058,38 +1053,39 @@ def test_role_remove_group_system(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, + 'group': self.group.id, 'system': 'all', - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'role': self.role.id, } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_system_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) - @mock.patch.object(common, 'find_group') - def test_role_remove_non_existent_group_system(self, find_mock): + def test_role_remove_non_existent_group_system(self): # Simulate the user not being in keystone, the client the gracefully # handle this exception and send the request to remove the role since # keystone will validate. - find_mock.side_effect = exceptions.CommandError + self.identity_sdk_client.find_group.side_effect = [ + sdk_exc.ResourceNotFound, + ] arglist = [ - '--group', identity_fakes.group_id, - '--system', 'all', - identity_fakes.role_name + '--group', + self.group.id, + '--system', + 'all', + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_id), + ('group', self.group.id), ('system', 'all'), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1098,32 +1094,32 @@ def test_role_remove_non_existent_group_system(self, find_mock): # Set expected values kwargs = { - 'group': identity_fakes.group_id, + 'group': self.group.id, 'system': 'all', - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'role': self.role.id, } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_system_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_group_domain(self): arglist = [ - '--group', identity_fakes.group_name, - '--domain', identity_fakes.domain_name, - identity_fakes.role_name, + '--group', + self.group.name, + '--domain', + self.domain.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), - ('domain', identity_fakes.domain_name), + ('group', self.group.name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.role_name), - ('role', identity_fakes.role_name), + ('role', self.role.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1132,38 +1128,40 @@ def test_role_remove_group_domain(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'group': self.group.id, + 'domain': self.domain.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_domain_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) - @mock.patch.object(common, 'find_group') - def test_role_remove_non_existent_group_domain(self, find_mock): + def test_role_remove_non_existent_group_domain(self): # Simulate the user not being in keystone, the client the gracefully # handle this exception and send the request to remove the role since # keystone will validate. - find_mock.side_effect = exceptions.CommandError + self.identity_sdk_client.find_group.side_effect = [ + sdk_exc.ResourceNotFound, + ] arglist = [ - '--group', identity_fakes.group_id, - '--domain', identity_fakes.domain_name, - identity_fakes.role_name + '--group', + self.group.id, + '--domain', + self.domain.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_id), + ('group', self.group.id), ('system', None), - ('domain', identity_fakes.domain_name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1172,31 +1170,32 @@ def test_role_remove_non_existent_group_domain(self, find_mock): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'group': self.group.id, + 'domain': self.domain.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_domain_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_group_project(self): arglist = [ - '--group', identity_fakes.group_name, - '--project', identity_fakes.project_name, - identity_fakes.role_name, + '--group', + self.group.name, + '--project', + self.project.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), + ('group', self.group.name), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.role_name), + ('project', self.project.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1205,38 +1204,39 @@ def test_role_remove_group_project(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'group': self.group.id, + 'project': self.project.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_project_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) - @mock.patch.object(common, 'find_group') - def test_role_remove_non_existent_group_project(self, find_mock): + def test_role_remove_non_existent_group_project(self): # Simulate the user not being in keystone, the client the gracefully # handle this exception and send the request to remove the role since # keystone will validate. - find_mock.side_effect = exceptions.CommandError - + self.identity_sdk_client.find_group.side_effect = [ + sdk_exc.ResourceNotFound, + ] arglist = [ - '--group', identity_fakes.group_id, - '--project', identity_fakes.project_name, - identity_fakes.role_name + '--group', + self.group.id, + '--project', + self.project.name, + self.role.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_id), + ('group', self.group.id), ('system', None), ('domain', None), - ('project', identity_fakes.project_name), - ('role', identity_fakes.role_name), + ('project', self.project.name), + ('role', self.role.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1245,36 +1245,38 @@ def test_role_remove_non_existent_group_project(self, find_mock): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'project': identity_fakes.project_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'group': self.group.id, + 'project': self.project.id, + 'role': self.role.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.role_id, + self.identity_sdk_client.unassign_project_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_domain_role_on_group_domain(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, ) + self.identity_sdk_client.find_role.return_value = self.role_with_domain arglist = [ - '--group', identity_fakes.group_name, - '--domain', identity_fakes.domain_name, - identity_fakes.ROLE_2['name'], + '--group', + self.group.name, + '--domain', + self.domain.name, + self.role_with_domain.name, ] if self._is_inheritance_testcase(): arglist.append('--inherited') verifylist = [ ('user', None), - ('group', identity_fakes.group_name), - ('domain', identity_fakes.domain_name), + ('group', self.group.name), + ('domain', self.domain.name), ('project', None), - ('role', identity_fakes.ROLE_2['name']), + ('role', self.role_with_domain.name), ('inherited', self._is_inheritance_testcase()), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1283,57 +1285,65 @@ def test_role_remove_domain_role_on_group_domain(self): # Set expected values kwargs = { - 'group': identity_fakes.group_id, - 'domain': identity_fakes.domain_id, - 'os_inherit_extension_inherited': self._is_inheritance_testcase(), + 'group': self.group.id, + 'domain': self.domain.id, + 'role': self.role_with_domain.id, + 'inherited': self._is_inheritance_testcase(), } - # RoleManager.revoke(role, user=, group=, domain=, project=) - self.roles_mock.revoke.assert_called_with( - identity_fakes.ROLE_2['id'], + self.identity_sdk_client.unassign_domain_role_from_group.assert_called_with( **kwargs ) self.assertIsNone(result) def test_role_remove_with_error(self): arglist = [ - identity_fakes.role_name, + self.role.name, ] verifylist = [ ('user', None), ('group', None), ('domain', None), ('project', None), - ('role', identity_fakes.role_name), + ('role', self.role.name), ('inherited', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) - + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) -class TestRoleSet(TestRole): +class TestRoleSet(identity_fakes.TestIdentityv3): def setUp(self): - super(TestRoleSet, self).setUp() + super().setUp() - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, + self.domain = sdk_fakes.generate_fake_resource(_domain.Domain) + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, ) - self.roles_mock.update.return_value = None # Get the command object to test self.cmd = role.SetRole(self.app, None) def test_role_set_no_options(self): + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + ) + self.identity_sdk_client.find_role.return_value = self.role + self.identity_sdk_client.update_role.return_value = self.role + arglist = [ - '--name', 'over', - identity_fakes.role_name, + '--name', + 'over', + self.role.name, ] verifylist = [ ('name', 'over'), - ('role', identity_fakes.role_name), + ('role', self.role.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1342,31 +1352,31 @@ def test_role_set_no_options(self): # Set expected values kwargs = { 'name': 'over', - 'description': None, - 'options': {}, + 'role': self.role.id, } - # RoleManager.update(role, name=) - self.roles_mock.update.assert_called_with( - identity_fakes.role_id, - **kwargs - ) + self.identity_sdk_client.update_role.assert_called_with(**kwargs) self.assertIsNone(result) def test_role_set_domain_role(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, + self.domain2 = sdk_fakes.generate_fake_resource(_domain.Domain) + self.identity_sdk_client.find_domain.return_value = self.domain2 + + self.identity_sdk_client.find_role.return_value = self.role_with_domain + self.identity_sdk_client.update_role.return_value = ( + self.role_with_domain ) + arglist = [ - '--name', 'over', - '--domain', identity_fakes.domain_name, - identity_fakes.ROLE_2['name'], + '--name', + 'over', + '--domain', + self.domain2.name, + self.role_with_domain.name, ] verifylist = [ ('name', 'over'), - ('domain', identity_fakes.domain_name), - ('role', identity_fakes.ROLE_2['name']), + ('domain', self.domain2.name), + ('role', self.role_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1375,31 +1385,26 @@ def test_role_set_domain_role(self): # Set expected values kwargs = { 'name': 'over', - 'description': None, - 'options': {}, + 'role': self.role_with_domain.id, + 'domain_id': self.domain2.id, } - # RoleManager.update(role, name=) - self.roles_mock.update.assert_called_with( - identity_fakes.ROLE_2['id'], - **kwargs - ) + self.identity_sdk_client.update_role.assert_called_with(**kwargs) self.assertIsNone(result) def test_role_set_description(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ) + self.identity_sdk_client.find_role.return_value = self.role_with_domain + arglist = [ - '--name', 'over', - '--description', identity_fakes.role_description, - identity_fakes.ROLE_2['name'], + '--name', + 'over', + '--description', + 'role description', + self.role_with_domain.name, ] verifylist = [ ('name', 'over'), - ('description', identity_fakes.role_description), - ('role', identity_fakes.ROLE_2['name']), + ('description', 'role description'), + ('role', self.role_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1408,31 +1413,25 @@ def test_role_set_description(self): # Set expected values kwargs = { 'name': 'over', - 'description': identity_fakes.role_description, - 'options': {}, + 'description': 'role description', + 'role': self.role_with_domain.id, } - # RoleManager.update(role, name=) - self.roles_mock.update.assert_called_with( - identity_fakes.ROLE_2['id'], - **kwargs - ) + self.identity_sdk_client.update_role.assert_called_with(**kwargs) self.assertIsNone(result) def test_role_set_with_immutable(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ) + self.identity_sdk_client.find_role.return_value = self.role_with_domain + arglist = [ - '--name', 'over', + '--name', + 'over', '--immutable', - identity_fakes.ROLE_2['name'], + self.role_with_domain.name, ] verifylist = [ ('name', 'over'), ('immutable', True), - ('role', identity_fakes.ROLE_2['name']), + ('role', self.role_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1441,31 +1440,25 @@ def test_role_set_with_immutable(self): # Set expected values kwargs = { 'name': 'over', - 'description': None, + 'role': self.role_with_domain.id, 'options': {'immutable': True}, } - # RoleManager.update(role, name=) - self.roles_mock.update.assert_called_with( - identity_fakes.ROLE_2['id'], - **kwargs - ) + self.identity_sdk_client.update_role.assert_called_with(**kwargs) self.assertIsNone(result) def test_role_set_with_no_immutable(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, - ) + self.identity_sdk_client.find_role.return_value = self.role_with_domain + arglist = [ - '--name', 'over', + '--name', + 'over', '--no-immutable', - identity_fakes.ROLE_2['name'], + self.role_with_domain.name, ] verifylist = [ ('name', 'over'), - ('no_immutable', True), - ('role', identity_fakes.ROLE_2['name']), + ('immutable', False), + ('role', self.role_with_domain.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1474,37 +1467,37 @@ def test_role_set_with_no_immutable(self): # Set expected values kwargs = { 'name': 'over', - 'description': None, + 'role': self.role_with_domain.id, 'options': {'immutable': False}, } - # RoleManager.update(role, name=) - self.roles_mock.update.assert_called_with( - identity_fakes.ROLE_2['id'], - **kwargs - ) + self.identity_sdk_client.update_role.assert_called_with(**kwargs) self.assertIsNone(result) -class TestRoleShow(TestRole): +class TestRoleShow(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) def setUp(self): - super(TestRoleShow, self).setUp() + super().setUp() - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, - ) + self.identity_sdk_client.find_domain.return_value = self.domain # Get the command object to test self.cmd = role.ShowRole(self.app, None) def test_role_show(self): + self.role = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=None, + description=None, + ) + self.identity_sdk_client.find_role.return_value = self.role + arglist = [ - identity_fakes.role_name, + self.role.name, ] verifylist = [ - ('role', identity_fakes.role_name), + ('role', self.role.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1513,33 +1506,38 @@ def test_role_show(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # RoleManager.get(role) - self.roles_mock.get.assert_called_with( - identity_fakes.role_name, + self.identity_sdk_client.find_role.assert_called_with( + name_or_id=self.role.name, + domain_id=None, + ignore_missing=False, ) - collist = ('domain', 'id', 'name') + collist = ('id', 'name', 'domain_id', 'description') self.assertEqual(collist, columns) datalist = ( + self.role.id, + self.role.name, + None, None, - identity_fakes.role_id, - identity_fakes.role_name, ) self.assertEqual(datalist, data) def test_role_show_domain_role(self): - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE_2), - loaded=True, + self.role_with_domain = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=self.domain.id, + description=None, ) + self.identity_sdk_client.find_role.return_value = self.role_with_domain + arglist = [ - '--domain', identity_fakes.domain_name, - identity_fakes.ROLE_2['name'], + '--domain', + self.domain.name, + self.role_with_domain.id, ] verifylist = [ - ('domain', identity_fakes.domain_name), - ('role', identity_fakes.ROLE_2['name']), + ('domain', self.domain.name), + ('role', self.role_with_domain.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1548,23 +1546,18 @@ def test_role_show_domain_role(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # RoleManager.get(role). This is called from utils.find_resource(). - # In fact, the current implementation calls the get(role) first with - # just the name, then with the name+domain_id. So technically we should - # mock this out with a call list, with the first call returning None - # and the second returning the object. However, if we did that we are - # then just testing the current sequencing within the utils method, and - # would become brittle to changes within that method. Hence we just - # check for the first call which is always lookup by name. - self.roles_mock.get.assert_called_with( - identity_fakes.ROLE_2['name'], + self.identity_sdk_client.find_role.assert_called_with( + name_or_id=self.role_with_domain.id, + domain_id=self.domain.id, + ignore_missing=False, ) - collist = ('domain', 'id', 'name') + collist = ('id', 'name', 'domain_id', 'description') self.assertEqual(collist, columns) datalist = ( - identity_fakes.domain_id, - identity_fakes.ROLE_2['id'], - identity_fakes.ROLE_2['name'], + self.role_with_domain.id, + self.role_with_domain.name, + self.domain.id, + None, ) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_role_assignment.py b/openstackclient/tests/unit/identity/v3/test_role_assignment.py index 7d38d360b0..6fc66469c2 100644 --- a/openstackclient/tests/unit/identity/v3/test_role_assignment.py +++ b/openstackclient/tests/unit/identity/v3/test_role_assignment.py @@ -11,22 +11,21 @@ # under the License. # -import copy from unittest import mock +from openstack import exceptions as sdk_exc +from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import group as _group +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import role as _role +from openstack.identity.v3 import role_assignment as _role_assignment +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from openstackclient.identity.v3 import role_assignment -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestRoleAssignment(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestRoleAssignment, self).setUp() - - -class TestRoleAssignmentList(TestRoleAssignment): - +class TestRoleAssignmentList(identity_fakes.TestIdentityv3): columns = ( 'Role', 'User', @@ -38,50 +37,61 @@ class TestRoleAssignmentList(TestRoleAssignment): ) def setUp(self): - super(TestRoleAssignment, self).setUp() - - # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - - # Get a shortcut to the GroupManager Mock - self.groups_mock = self.app.client_manager.identity.groups - self.groups_mock.reset_mock() - - # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains - self.domains_mock.reset_mock() - - # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - - # Get a shortcut to the RoleManager Mock - self.roles_mock = self.app.client_manager.identity.roles - self.roles_mock.reset_mock() + super().setUp() + + self.user = sdk_fakes.generate_fake_resource(_user.User) + self.group = sdk_fakes.generate_fake_resource(_group.Group) + self.domain = sdk_fakes.generate_fake_resource(_domain.Domain) + self.project = sdk_fakes.generate_fake_resource(_project.Project) + self.role = sdk_fakes.generate_fake_resource(_role.Role) + self.assignment_with_project_id_and_user_id = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id}, + scope={'project': {'id': self.project.id}}, + user={'id': self.user.id}, + ) + ) + self.assignment_with_project_id_and_group_id = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id}, + scope={'project': {'id': self.project.id}}, + group={'id': self.group.id}, + ) + ) + self.assignment_with_domain_id_and_user_id = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id}, + scope={'domain': {'id': self.domain.id}}, + user={'id': self.user.id}, + ) + ) + self.assignment_with_domain_id_and_group_id = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id}, + scope={'domain': {'id': self.domain.id}}, + group={'id': self.group.id}, + ) + ) - self.role_assignments_mock = self.app.client_manager.identity.\ - role_assignments - self.role_assignments_mock.reset_mock() + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_project.return_value = self.project + self.identity_sdk_client.find_domain.return_value = self.domain + self.identity_sdk_client.find_role.return_value = self.role # Get the command object to test self.cmd = role_assignment.ListRoleAssignment(self.app, None) def test_role_assignment_list_no_filters(self): - - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID), - loaded=True, - ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID), - loaded=True, - ), + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_project_id_and_user_id, + self.assignment_with_project_id_and_group_id, + self.assignment_with_domain_id_and_user_id, + self.assignment_with_domain_id_and_group_id, ] arglist = [] @@ -93,64 +103,218 @@ def test_role_assignment_list_no_filters(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - group=None, - effective=False, - role=None, - user=None, - project=None, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - identity_fakes.project_id, - '', - '', - False - ), (identity_fakes.role_id, - '', - identity_fakes.group_id, - identity_fakes.project_id, - '', - '', - False - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ( + self.role.id, + '', + self.group.id, + self.project.id, + '', + '', + False, + ), + ( + self.role.id, + self.user.id, + '', + '', + self.domain.id, + '', + False, + ), + ( + self.role.id, + '', + self.group.id, + '', + self.domain.id, + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_user(self): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_domain_id_and_user_id, + self.assignment_with_project_id_and_user_id, + ] - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID), - loaded=True, + arglist = ['--user', self.user.name] + verifylist = [ + ('user', self.user.name), + ('user_domain', None), + ('group', None), + ('group_domain', None), + ('system', None), + ('domain', None), + ('project', None), + ('project_domain', None), + ('role', None), + ('effective', None), + ('inherited', False), + ('names', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + self.identity_sdk_client.find_user.assert_called_with( + name_or_id=self.user.name, ignore_missing=False, domain_id=None + ) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=self.user.id, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) + + self.assertEqual(self.columns, columns) + datalist = ( + ( + self.role.id, + self.user.id, + '', + '', + self.domain.id, + '', + False, ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID), - loaded=True, + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, ), + ) + self.assertEqual(datalist, tuple(data)) + + def test_role_assignment_list_user_with_domain(self): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_domain_id_and_user_id, + self.assignment_with_project_id_and_user_id, ] - arglist = [ - '--user', identity_fakes.user_name + arglist = ['--user', self.user.name, '--user-domain', self.domain.name] + verifylist = [ + ('user', self.user.name), + ('user_domain', self.domain.name), + ('group', None), + ('group_domain', None), + ('system', None), + ('domain', None), + ('project', None), + ('role', None), + ('effective', None), + ('inherited', False), + ('names', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + self.identity_sdk_client.find_domain.assert_called_with( + name_or_id=self.domain.name, ignore_missing=False + ) + self.identity_sdk_client.find_user.assert_called_with( + name_or_id=self.user.name, + ignore_missing=False, + domain_id=self.domain.id, + ) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=self.user.id, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) + + self.assertEqual(self.columns, columns) + datalist = ( + ( + self.role.id, + self.user.id, + '', + '', + self.domain.id, + '', + False, + ), + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ) + self.assertEqual(datalist, tuple(data)) + + @mock.patch.object(_user.User, 'find') + def test_role_assignment_list_user_not_found(self, find_mock): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_domain_id_and_user_id, + self.assignment_with_project_id_and_user_id, + ] + self.identity_sdk_client.find_user.side_effect = [ + sdk_exc.ForbiddenException, + sdk_exc.ForbiddenException, ] + + arglist = ['--user', self.user.id] verifylist = [ - ('user', identity_fakes.user_name), + ('user', self.user.id), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), ('project', None), + ('project_domain', None), ('role', None), - ('effective', False), + ('effective', None), ('inherited', False), ('names', False), ] @@ -161,64 +325,130 @@ def test_role_assignment_list_user(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - user=self.users_mock.get(), - group=None, - project=None, - role=None, - effective=False, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=self.user.id, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - '', - identity_fakes.domain_id, - '', - False - ), (identity_fakes.role_id, - identity_fakes.user_id, - '', - identity_fakes.project_id, - '', - '', - False - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + '', + self.domain.id, + '', + False, + ), + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_group(self): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_domain_id_and_group_id, + self.assignment_with_project_id_and_group_id, + ] - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_GROUP_ID), - loaded=True, + arglist = ['--group', self.group.name] + verifylist = [ + ('user', None), + ('user_domain', None), + ('group', self.group.name), + ('group_domain', None), + ('system', None), + ('domain', None), + ('project', None), + ('project_domain', None), + ('role', None), + ('effective', None), + ('inherited', False), + ('names', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + self.identity_sdk_client.find_group.assert_called_with( + name_or_id=self.group.name, ignore_missing=False, domain_id=None + ) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=self.group.id, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) + + self.assertEqual(self.columns, columns) + datalist = ( + ( + self.role.id, + '', + self.group.id, + '', + self.domain.id, + '', + False, ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID), - loaded=True, + ( + self.role.id, + '', + self.group.id, + self.project.id, + '', + '', + False, ), + ) + self.assertEqual(datalist, tuple(data)) + + def test_role_assignment_list_group_with_domain(self): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_domain_id_and_group_id, + self.assignment_with_project_id_and_group_id, ] arglist = [ - '--group', identity_fakes.group_name + '--group', + self.group.name, + '--group-domain', + self.domain.name, ] verifylist = [ ('user', None), - ('group', identity_fakes.group_name), + ('user_domain', None), + ('group', self.group.name), + ('group_domain', self.domain.name), ('system', None), ('domain', None), ('project', None), + ('project_domain', None), ('role', None), - ('effective', False), + ('effective', None), ('inherited', False), ('names', False), ] @@ -229,64 +459,67 @@ def test_role_assignment_list_group(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - group=self.groups_mock.get(), - effective=False, - project=None, - role=None, - user=None, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.find_domain.assert_called_with( + name_or_id=self.domain.name, ignore_missing=False + ) + self.identity_sdk_client.find_group.assert_called_with( + name_or_id=self.group.name, + ignore_missing=False, + domain_id=self.domain.id, + ) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=self.group.id, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - '', - identity_fakes.group_id, - '', - identity_fakes.domain_id, - '', - False - ), (identity_fakes.role_id, - '', - identity_fakes.group_id, - identity_fakes.project_id, - '', - '', - False - ),) + datalist = ( + ( + self.role.id, + '', + self.group.id, + '', + self.domain.id, + '', + False, + ), + ( + self.role.id, + '', + self.group.id, + self.project.id, + '', + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_domain(self): - - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID), - loaded=True, - ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_GROUP_ID), - loaded=True, - ), + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_domain_id_and_user_id, + self.assignment_with_domain_id_and_group_id, ] - arglist = [ - '--domain', identity_fakes.domain_name - ] + arglist = ['--domain', self.domain.name] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), - ('domain', identity_fakes.domain_name), + ('domain', self.domain.name), ('project', None), + ('project_domain', None), ('role', None), - ('effective', False), + ('effective', None), ('inherited', False), ('names', False), ] @@ -297,64 +530,130 @@ def test_role_assignment_list_domain(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=self.domains_mock.get(), - system=None, - group=None, - effective=False, - project=None, - role=None, - user=None, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=None, + scope_domain_id=self.domain.id, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - '', - identity_fakes.domain_id, - '', - False - ), (identity_fakes.role_id, - '', - identity_fakes.group_id, - '', - identity_fakes.domain_id, - '', - False - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + '', + self.domain.id, + '', + False, + ), + ( + self.role.id, + '', + self.group.id, + '', + self.domain.id, + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_project(self): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_project_id_and_user_id, + self.assignment_with_project_id_and_group_id, + ] + + arglist = ['--project', self.project.name] + verifylist = [ + ('user', None), + ('user_domain', None), + ('group', None), + ('group_domain', None), + ('system', None), + ('domain', None), + ('project', self.project.name), + ('project_domain', None), + ('role', None), + ('effective', None), + ('inherited', False), + ('names', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + self.identity_sdk_client.find_project.assert_called_with( + name_or_id=self.project.name, ignore_missing=False, domain_id=None + ) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=self.project.id, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID), - loaded=True, + self.assertEqual(self.columns, columns) + datalist = ( + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID), - loaded=True, + ( + self.role.id, + '', + self.group.id, + self.project.id, + '', + '', + False, ), + ) + self.assertEqual(datalist, tuple(data)) + + def test_role_assignment_list_project_with_domain(self): + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_project_id_and_user_id, + self.assignment_with_project_id_and_group_id, ] arglist = [ - '--project', identity_fakes.project_name + '--project', + self.project.name, + '--project-domain', + self.domain.name, ] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), - ('project', identity_fakes.project_name), + ('project', self.project.name), + ('project_domain', self.domain.name), ('role', None), - ('effective', False), + ('effective', None), ('inherited', False), ('names', False), ] @@ -365,49 +664,57 @@ def test_role_assignment_list_project(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - group=None, - effective=False, - project=self.projects_mock.get(), - role=None, - user=None, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.find_domain.assert_called_with( + name_or_id=self.domain.name, ignore_missing=False + ) + self.identity_sdk_client.find_project.assert_called_with( + name_or_id=self.project.name, + ignore_missing=False, + domain_id=self.domain.id, + ) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=self.project.id, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - identity_fakes.project_id, - '', - '', - False - ), (identity_fakes.role_id, - '', - identity_fakes.group_id, - identity_fakes.project_id, - '', - '', - False - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ( + self.role.id, + '', + self.group.id, + self.project.id, + '', + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_def_creds(self): + self.app.client_manager.auth_ref = mock.Mock() + auth_ref = self.app.client_manager.auth_ref + auth_ref.project_id.return_value = self.project.id + auth_ref.user_id.return_value = self.user.id - auth_ref = self.app.client_manager.auth_ref = mock.Mock() - auth_ref.project_id.return_value = identity_fakes.project_id - auth_ref.user_id.return_value = identity_fakes.user_id - - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID), - loaded=True, - ), + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_project_id_and_user_id, ] arglist = [ @@ -416,12 +723,15 @@ def test_role_assignment_list_def_creds(self): ] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), ('project', None), + ('project_domain', None), ('role', None), - ('effective', False), + ('effective', None), ('inherited', False), ('names', False), ('authuser', True), @@ -434,53 +744,48 @@ def test_role_assignment_list_def_creds(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - user=self.users_mock.get(), - group=None, - project=self.projects_mock.get(), - role=None, - effective=False, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=self.user.id, + group_id=None, + scope_project_id=self.project.id, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - identity_fakes.project_id, - '', - '', - False - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_effective(self): - - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID), - loaded=True, - ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID), - loaded=True, - ), + self.identity_sdk_client.role_assignments.return_value = [ + self.assignment_with_project_id_and_user_id, + self.assignment_with_domain_id_and_user_id, ] arglist = ['--effective'] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), ('project', None), + ('project_domain', None), ('role', None), ('effective', True), ('inherited', False), @@ -493,64 +798,81 @@ def test_role_assignment_list_effective(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - group=None, + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, effective=True, - project=None, - role=None, - user=None, - os_inherit_extension_inherited_to=None, - include_names=False) + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - identity_fakes.project_id, - '', - '', - False - ), (identity_fakes.role_id, - identity_fakes.user_id, - '', - '', - identity_fakes.domain_id, - '', - False - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ( + self.role.id, + self.user.id, + '', + '', + self.domain.id, + '', + False, + ), + ) self.assertEqual(tuple(data), datalist) def test_role_assignment_list_inherited(self): - - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - (identity_fakes. - ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID_INHERITED)), - loaded=True, - ), - fakes.FakeResource( - None, - copy.deepcopy( - (identity_fakes. - ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID_INHERITED)), - loaded=True, - ), + assignment_with_project_id_and_user_id_inherited = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id}, + scope={ + 'project': {'id': self.project.id}, + 'OS-INHERIT:inherited_to': 'projects', + }, + user={'id': self.user.id}, + ) + ) + assignment_with_domain_id_and_group_id_inherited = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id}, + scope={ + 'domain': {'id': self.domain.id}, + 'OS-INHERIT:inherited_to': 'projects', + }, + group={'id': self.group.id}, + ) + ) + self.identity_sdk_client.role_assignments.return_value = [ + assignment_with_project_id_and_user_id_inherited, + assignment_with_domain_id_and_group_id_inherited, ] arglist = ['--inherited'] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), ('project', None), + ('project_domain', None), ('role', None), - ('effective', False), + ('effective', None), ('inherited', True), ('names', False), ] @@ -561,64 +883,95 @@ def test_role_assignment_list_inherited(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - group=None, - effective=False, - project=None, - role=None, - user=None, - os_inherit_extension_inherited_to='projects', - include_names=False) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to='projects', + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.role_id, - identity_fakes.user_id, - '', - identity_fakes.project_id, - '', - '', - True - ), (identity_fakes.role_id, - identity_fakes.user_id, - '', - '', - identity_fakes.domain_id, - '', - True - ),) + datalist = ( + ( + self.role.id, + self.user.id, + '', + self.project.id, + '', + '', + True, + ), + ( + self.role.id, + '', + self.group.id, + '', + self.domain.id, + '', + True, + ), + ) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_include_names(self): + assignment_with_project_id_and_user_id_include_names = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id, 'name': self.role.name}, + scope={ + 'project': { + 'domain': { + 'id': self.domain.id, + 'name': self.domain.name, + }, + 'id': self.project.id, + 'name': self.project.name, + } + }, + user={ + 'domain': {'id': self.domain.id, 'name': self.domain.name}, + 'id': self.user.id, + 'name': self.user.name, + }, + ) + ) + assignment_with_domain_id_and_group_id_include_names = ( + sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': self.role.id, 'name': self.role.name}, + scope={ + 'domain': {'id': self.domain.id, 'name': self.domain.name} + }, + group={ + 'domain': {'id': self.domain.id, 'name': self.domain.name}, + 'id': self.group.id, + 'name': self.group.name, + }, + ) + ) - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes - .ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID_INCLUDE_NAMES), - loaded=True, - ), - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes - .ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID_INCLUDE_NAMES), - loaded=True, - ), + self.identity_sdk_client.role_assignments.return_value = [ + assignment_with_project_id_and_user_id_include_names, + assignment_with_domain_id_and_group_id_include_names, ] arglist = ['--names'] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), ('project', None), + ('project_domain', None), ('role', None), - ('effective', False), + ('effective', None), ('inherited', False), ('names', True), ] @@ -631,64 +984,89 @@ def test_role_assignment_list_include_names(self): # correct information columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - group=None, - effective=False, - project=None, - role=None, - user=None, - os_inherit_extension_inherited_to=None, - include_names=True) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=None, + user_id=None, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=True, + inherited_to=None, + ) collist = ( - 'Role', 'User', 'Group', 'Project', 'Domain', 'System', 'Inherited' + 'Role', + 'User', + 'Group', + 'Project', + 'Domain', + 'System', + 'Inherited', ) self.assertEqual(columns, collist) - datalist1 = (( - identity_fakes.role_name, - '@'.join([identity_fakes.user_name, identity_fakes.domain_name]), - '', - '@'.join([identity_fakes.project_name, - identity_fakes.domain_name]), - '', - '', - False - ), (identity_fakes.role_name, - '@'.join([identity_fakes.user_name, identity_fakes.domain_name]), - '', - '', - identity_fakes.domain_name, - '', - False - ),) - self.assertEqual(tuple(data), datalist1) + datalist = ( + ( + self.role.name, + '@'.join([self.user.name, self.domain.name]), + '', + '@'.join([self.project.name, self.domain.name]), + '', + '', + False, + ), + ( + self.role.name, + '', + '@'.join([self.group.name, self.domain.name]), + '', + self.domain.name, + '', + False, + ), + ) + self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_domain_role(self): + domain_2 = sdk_fakes.generate_fake_resource(_domain.Domain) + # Create new role with same name but different domain + role_2 = sdk_fakes.generate_fake_resource( + resource_type=_role.Role, + domain_id=domain_2.id, + name=self.role.name, + ) + assignment_with_role_domain_2 = sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, + role={'id': role_2.id, 'name': role_2.name}, + scope={'project': {'id': self.project.id}}, + user={'id': self.user.id}, + ) - self.role_assignments_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy( - identity_fakes.ASSIGNMENT_WITH_DOMAIN_ROLE), - loaded=True, - ), + self.identity_sdk_client.find_domain.return_value = domain_2 + self.identity_sdk_client.find_role.return_value = role_2 + self.identity_sdk_client.role_assignments.return_value = [ + assignment_with_role_domain_2, ] arglist = [ - '--role', identity_fakes.ROLE_2['name'], - '--role-domain', identity_fakes.domain_name + '--role', + role_2.name, + '--role-domain', + domain_2.name, ] verifylist = [ ('user', None), + ('user_domain', None), ('group', None), + ('group_domain', None), ('system', None), ('domain', None), ('project', None), - ('role', identity_fakes.ROLE_2['name']), - ('effective', False), + ('project_domain', None), + ('role', role_2.name), + ('effective', None), ('inherited', False), ('names', False), ] @@ -699,25 +1077,28 @@ def test_role_assignment_list_domain_role(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.role_assignments_mock.list.assert_called_with( - domain=None, - system=None, - user=None, - group=None, - project=None, - role=self.roles_mock.get(), - effective=False, - os_inherit_extension_inherited_to=None, - include_names=False) + self.identity_sdk_client.role_assignments.assert_called_with( + role_id=role_2.id, + user_id=None, + group_id=None, + scope_project_id=None, + scope_domain_id=None, + scope_system=None, + effective=None, + include_names=None, + inherited_to=None, + ) self.assertEqual(self.columns, columns) - datalist = (( - identity_fakes.ROLE_2['id'], - identity_fakes.user_id, - '', - '', - identity_fakes.domain_id, - '', - False - ),) + datalist = ( + ( + role_2.id, + self.user.id, + '', + self.project.id, + '', + '', + False, + ), + ) self.assertEqual(datalist, tuple(data)) diff --git a/openstackclient/tests/unit/identity/v3/test_service.py b/openstackclient/tests/unit/identity/v3/test_service.py index 4cba445bfc..d6dd0287b1 100644 --- a/openstackclient/tests/unit/identity/v3/test_service.py +++ b/openstackclient/tests/unit/identity/v3/test_service.py @@ -13,59 +13,51 @@ # under the License. # -from keystoneclient import exceptions as identity_exc +from openstack import exceptions as sdk_exceptions +from openstack.identity.v3 import service as _service +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.identity.v3 import service from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestService(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestService, self).setUp() - - # Get a shortcut to the ServiceManager Mock - self.services_mock = self.app.client_manager.identity.services - self.services_mock.reset_mock() - - -class TestServiceCreate(TestService): - +class TestServiceCreate(identity_fakes.TestIdentityv3): columns = ( - 'description', - 'enabled', 'id', 'name', 'type', + 'enabled', + 'description', ) def setUp(self): - super(TestServiceCreate, self).setUp() + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) - self.service = identity_fakes.FakeService.create_one_service() self.datalist = ( - self.service.description, - True, self.service.id, self.service.name, self.service.type, + True, + self.service.description, ) - self.services_mock.create.return_value = self.service + self.identity_sdk_client.create_service.return_value = self.service # Get the command object to test self.cmd = service.CreateService(self.app, None) def test_service_create_name(self): arglist = [ - '--name', self.service.name, + '--name', + self.service.name, self.service.type, ] verifylist = [ ('name', self.service.name), ('description', None), - ('enable', False), - ('disable', False), + ('is_enabled', True), ('type', self.service.type), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -75,12 +67,11 @@ def test_service_create_name(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # ServiceManager.create(name=, type=, enabled=, **kwargs) - self.services_mock.create.assert_called_with( + self.identity_sdk_client.create_service.assert_called_with( name=self.service.name, type=self.service.type, description=None, - enabled=True, + is_enabled=True, ) self.assertEqual(self.columns, columns) @@ -88,14 +79,14 @@ def test_service_create_name(self): def test_service_create_description(self): arglist = [ - '--description', self.service.description, + '--description', + self.service.description, self.service.type, ] verifylist = [ ('name', None), ('description', self.service.description), - ('enable', False), - ('disable', False), + ('is_enabled', True), ('type', self.service.type), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -105,12 +96,11 @@ def test_service_create_description(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # ServiceManager.create(name=, type=, enabled=, **kwargs) - self.services_mock.create.assert_called_with( + self.identity_sdk_client.create_service.assert_called_with( name=None, type=self.service.type, description=self.service.description, - enabled=True, + is_enabled=True, ) self.assertEqual(self.columns, columns) @@ -124,8 +114,7 @@ def test_service_create_enable(self): verifylist = [ ('name', None), ('description', None), - ('enable', True), - ('disable', False), + ('is_enabled', True), ('type', self.service.type), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -135,12 +124,11 @@ def test_service_create_enable(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # ServiceManager.create(name=, type=, enabled=, **kwargs) - self.services_mock.create.assert_called_with( + self.identity_sdk_client.create_service.assert_called_with( name=None, type=self.service.type, description=None, - enabled=True, + is_enabled=True, ) self.assertEqual(self.columns, columns) @@ -154,8 +142,7 @@ def test_service_create_disable(self): verifylist = [ ('name', None), ('description', None), - ('enable', False), - ('disable', True), + ('is_enabled', False), ('type', self.service.type), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -165,28 +152,28 @@ def test_service_create_disable(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # ServiceManager.create(name=, type=, enabled=, **kwargs) - self.services_mock.create.assert_called_with( + self.identity_sdk_client.create_service.assert_called_with( name=None, type=self.service.type, description=None, - enabled=False, + is_enabled=False, ) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) -class TestServiceDelete(TestService): - - service = identity_fakes.FakeService.create_one_service() +class TestServiceDelete(identity_fakes.TestIdentityv3): + service = sdk_fakes.generate_fake_resource(_service.Service) def setUp(self): - super(TestServiceDelete, self).setUp() + super().setUp() - self.services_mock.get.side_effect = identity_exc.NotFound(None) - self.services_mock.find.return_value = self.service - self.services_mock.delete.return_value = None + self.identity_sdk_client.get_service.side_effect = ( + sdk_exceptions.ResourceNotFound + ) + self.identity_sdk_client.find_service.return_value = self.service + self.identity_sdk_client.delete_service.return_value = None # Get the command object to test self.cmd = service.DeleteService(self.app, None) @@ -202,20 +189,19 @@ def test_service_delete_no_options(self): result = self.cmd.take_action(parsed_args) - self.services_mock.delete.assert_called_with( + self.identity_sdk_client.delete_service.assert_called_with( self.service.id, ) self.assertIsNone(result) -class TestServiceList(TestService): - - service = identity_fakes.FakeService.create_one_service() +class TestServiceList(identity_fakes.TestIdentityv3): + service = sdk_fakes.generate_fake_resource(_service.Service) def setUp(self): - super(TestServiceList, self).setUp() + super().setUp() - self.services_mock.list.return_value = [self.service] + self.identity_sdk_client.services.return_value = [self.service] # Get the command object to test self.cmd = service.ListService(self.app, None) @@ -230,15 +216,17 @@ def test_service_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.services_mock.list.assert_called_with() + self.identity_sdk_client.services.assert_called_with() collist = ('ID', 'Name', 'Type') self.assertEqual(collist, columns) - datalist = (( - self.service.id, - self.service.name, - self.service.type, - ), ) + datalist = ( + ( + self.service.id, + self.service.name, + self.service.type, + ), + ) self.assertEqual(datalist, tuple(data)) def test_service_list_long(self): @@ -255,30 +243,33 @@ def test_service_list_long(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.services_mock.list.assert_called_with() + self.identity_sdk_client.services.assert_called_with() collist = ('ID', 'Name', 'Type', 'Description', 'Enabled') self.assertEqual(collist, columns) - datalist = (( - self.service.id, - self.service.name, - self.service.type, - self.service.description, - True, - ), ) + datalist = ( + ( + self.service.id, + self.service.name, + self.service.type, + self.service.description, + True, + ), + ) self.assertEqual(datalist, tuple(data)) -class TestServiceSet(TestService): - - service = identity_fakes.FakeService.create_one_service() +class TestServiceSet(identity_fakes.TestIdentityv3): + service = sdk_fakes.generate_fake_resource(_service.Service) def setUp(self): - super(TestServiceSet, self).setUp() + super().setUp() - self.services_mock.get.side_effect = identity_exc.NotFound(None) - self.services_mock.find.return_value = self.service - self.services_mock.update.return_value = self.service + self.identity_sdk_client.get_service.side_effect = ( + sdk_exceptions.ResourceNotFound + ) + self.identity_sdk_client.find_service.return_value = self.service + self.identity_sdk_client.update_service.return_value = self.service # Get the command object to test self.cmd = service.SetService(self.app, None) @@ -291,8 +282,7 @@ def test_service_set_no_options(self): ('type', None), ('name', None), ('description', None), - ('enable', False), - ('disable', False), + ('is_enabled', None), ('service', self.service.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -303,15 +293,15 @@ def test_service_set_no_options(self): def test_service_set_type(self): arglist = [ - '--type', self.service.type, + '--type', + self.service.type, self.service.name, ] verifylist = [ ('type', self.service.type), ('name', None), ('description', None), - ('enable', False), - ('disable', False), + ('is_enabled', None), ('service', self.service.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -322,24 +312,22 @@ def test_service_set_type(self): kwargs = { 'type': self.service.type, } - # ServiceManager.update(service, name=, type=, enabled=, **kwargs) - self.services_mock.update.assert_called_with( - self.service.id, - **kwargs + self.identity_sdk_client.update_service.assert_called_with( + self.service.id, **kwargs ) self.assertIsNone(result) def test_service_set_name(self): arglist = [ - '--name', self.service.name, + '--name', + self.service.name, self.service.name, ] verifylist = [ ('type', None), ('name', self.service.name), ('description', None), - ('enable', False), - ('disable', False), + ('is_enabled', None), ('service', self.service.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -350,24 +338,22 @@ def test_service_set_name(self): kwargs = { 'name': self.service.name, } - # ServiceManager.update(service, name=, type=, enabled=, **kwargs) - self.services_mock.update.assert_called_with( - self.service.id, - **kwargs + self.identity_sdk_client.update_service.assert_called_with( + self.service.id, **kwargs ) self.assertIsNone(result) def test_service_set_description(self): arglist = [ - '--description', self.service.description, + '--description', + self.service.description, self.service.name, ] verifylist = [ ('type', None), ('name', None), ('description', self.service.description), - ('enable', False), - ('disable', False), + ('is_enabled', None), ('service', self.service.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -378,10 +364,8 @@ def test_service_set_description(self): kwargs = { 'description': self.service.description, } - # ServiceManager.update(service, name=, type=, enabled=, **kwargs) - self.services_mock.update.assert_called_with( - self.service.id, - **kwargs + self.identity_sdk_client.update_service.assert_called_with( + self.service.id, **kwargs ) self.assertIsNone(result) @@ -394,8 +378,7 @@ def test_service_set_enable(self): ('type', None), ('name', None), ('description', None), - ('enable', True), - ('disable', False), + ('is_enabled', True), ('service', self.service.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -404,12 +387,10 @@ def test_service_set_enable(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, } - # ServiceManager.update(service, name=, type=, enabled=, **kwargs) - self.services_mock.update.assert_called_with( - self.service.id, - **kwargs + self.identity_sdk_client.update_service.assert_called_with( + self.service.id, **kwargs ) self.assertIsNone(result) @@ -422,8 +403,7 @@ def test_service_set_disable(self): ('type', None), ('name', None), ('description', None), - ('enable', False), - ('disable', True), + ('is_enabled', False), ('service', self.service.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -432,25 +412,24 @@ def test_service_set_disable(self): # Set expected values kwargs = { - 'enabled': False, + 'is_enabled': False, } - # ServiceManager.update(service, name=, type=, enabled=, **kwargs) - self.services_mock.update.assert_called_with( - self.service.id, - **kwargs + self.identity_sdk_client.update_service.assert_called_with( + self.service.id, **kwargs ) self.assertIsNone(result) -class TestServiceShow(TestService): - - service = identity_fakes.FakeService.create_one_service() +class TestServiceShow(identity_fakes.TestIdentityv3): + service = sdk_fakes.generate_fake_resource(_service.Service) def setUp(self): - super(TestServiceShow, self).setUp() + super().setUp() - self.services_mock.get.side_effect = identity_exc.NotFound(None) - self.services_mock.find.return_value = self.service + self.identity_sdk_client.get_service.side_effect = ( + sdk_exceptions.ResourceNotFound + ) + self.identity_sdk_client.find_service.return_value = self.service # Get the command object to test self.cmd = service.ShowService(self.app, None) @@ -469,24 +448,25 @@ def test_service_show(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - # ServiceManager.get(id) - self.services_mock.find.assert_called_with( - name=self.service.name + self.identity_sdk_client.find_service.assert_called_with( + self.service.name, ignore_missing=False ) - collist = ('description', 'enabled', 'id', 'name', 'type') + collist = ('id', 'name', 'type', 'enabled', 'description') self.assertEqual(collist, columns) datalist = ( - self.service.description, - True, self.service.id, self.service.name, self.service.type, + True, + self.service.description, ) self.assertEqual(datalist, data) def test_service_show_nounique(self): - self.services_mock.find.side_effect = identity_exc.NoUniqueMatch(None) + self.identity_sdk_client.find_service.side_effect = ( + sdk_exceptions.DuplicateResource(None) + ) arglist = [ 'nounique_service', ] @@ -500,5 +480,6 @@ def test_service_show_nounique(self): self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual( - "Multiple service matches found for 'nounique_service'," - " use an ID to be more specific.", str(e)) + "DuplicateResource", + str(e), + ) diff --git a/openstackclient/tests/unit/identity/v3/test_service_provider.py b/openstackclient/tests/unit/identity/v3/test_service_provider.py index 57473ef956..185cdc6b03 100644 --- a/openstackclient/tests/unit/identity/v3/test_service_provider.py +++ b/openstackclient/tests/unit/identity/v3/test_service_provider.py @@ -12,92 +12,88 @@ # License for the specific language governing permissions and limitations # under the License. -import copy + +from openstack.identity.v3 import service_provider as _service_provider +from openstack.test import fakes as sdk_fakes from openstackclient.identity.v3 import service_provider -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as service_fakes -class TestServiceProvider(service_fakes.TestFederatedIdentity): - - def setUp(self): - super(TestServiceProvider, self).setUp() - - federation_lib = self.app.client_manager.identity.federation - self.service_providers_mock = federation_lib.service_providers - self.service_providers_mock.reset_mock() - - -class TestServiceProviderCreate(TestServiceProvider): - +class TestServiceProviderCreate(service_fakes.TestFederatedIdentity): columns = ( - 'auth_url', - 'description', - 'enabled', 'id', + 'enabled', + 'description', + 'auth_url', 'sp_url', - ) - datalist = ( - service_fakes.sp_auth_url, - service_fakes.sp_description, - True, - service_fakes.sp_id, - service_fakes.service_provider_url + 'relay_state_prefix', ) def setUp(self): - super(TestServiceProviderCreate, self).setUp() + super().setUp() - copied_sp = copy.deepcopy(service_fakes.SERVICE_PROVIDER) - resource = fakes.FakeResource(None, copied_sp, loaded=True) - self.service_providers_mock.create.return_value = resource + self.service_provider = sdk_fakes.generate_fake_resource( + _service_provider.ServiceProvider + ) + self.identity_sdk_client.create_service_provider.return_value = ( + self.service_provider + ) + self.data = ( + self.service_provider.id, + self.service_provider.is_enabled, + self.service_provider.description, + self.service_provider.auth_url, + self.service_provider.sp_url, + self.service_provider.relay_state_prefix, + ) self.cmd = service_provider.CreateServiceProvider(self.app, None) def test_create_service_provider_required_options_only(self): arglist = [ - '--auth-url', service_fakes.sp_auth_url, - '--service-provider-url', service_fakes.service_provider_url, - service_fakes.sp_id, + '--auth-url', + self.service_provider.auth_url, + '--service-provider-url', + self.service_provider.sp_url, + self.service_provider.id, ] verifylist = [ - ('auth_url', service_fakes.sp_auth_url), - ('service_provider_url', service_fakes.service_provider_url), - ('service_provider_id', service_fakes.sp_id), - + ('auth_url', self.service_provider.auth_url), + ('service_provider_url', self.service_provider.sp_url), + ('service_provider_id', self.service_provider.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, - 'description': None, - 'auth_url': service_fakes.sp_auth_url, - 'sp_url': service_fakes.service_provider_url + 'is_enabled': True, + 'auth_url': self.service_provider.auth_url, + 'sp_url': self.service_provider.sp_url, } - self.service_providers_mock.create.assert_called_with( - id=service_fakes.sp_id, - **kwargs + self.identity_sdk_client.create_service_provider.assert_called_with( + id=self.service_provider.id, **kwargs ) self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist, data) + self.assertEqual(self.data, data) def test_create_service_provider_description(self): - arglist = [ - '--description', service_fakes.sp_description, - '--auth-url', service_fakes.sp_auth_url, - '--service-provider-url', service_fakes.service_provider_url, - service_fakes.sp_id, + '--description', + self.service_provider.description, + '--auth-url', + self.service_provider.auth_url, + '--service-provider-url', + self.service_provider.sp_url, + self.service_provider.id, ] verifylist = [ - ('description', service_fakes.sp_description), - ('auth_url', service_fakes.sp_auth_url), - ('service_provider_url', service_fakes.service_provider_url), - ('service_provider_id', service_fakes.sp_id), + ('description', self.service_provider.description), + ('auth_url', self.service_provider.auth_url), + ('service_provider_url', self.service_provider.sp_url), + ('service_provider_id', self.service_provider.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -105,114 +101,85 @@ def test_create_service_provider_description(self): # Set expected values kwargs = { - 'description': service_fakes.sp_description, - 'auth_url': service_fakes.sp_auth_url, - 'sp_url': service_fakes.service_provider_url, - 'enabled': True, + 'description': self.service_provider.description, + 'auth_url': self.service_provider.auth_url, + 'sp_url': self.service_provider.sp_url, + 'is_enabled': self.service_provider.is_enabled, } - self.service_providers_mock.create.assert_called_with( - id=service_fakes.sp_id, - **kwargs + self.identity_sdk_client.create_service_provider.assert_called_with( + id=self.service_provider.id, **kwargs ) self.assertEqual(self.columns, columns) - self.assertEqual(self.datalist, data) + self.assertEqual(self.data, data) def test_create_service_provider_disabled(self): - - # Prepare FakeResource object - service_provider = copy.deepcopy(service_fakes.SERVICE_PROVIDER) - service_provider['enabled'] = False - service_provider['description'] = None - - resource = fakes.FakeResource(None, service_provider, loaded=True) - self.service_providers_mock.create.return_value = resource - arglist = [ - '--auth-url', service_fakes.sp_auth_url, - '--service-provider-url', service_fakes.service_provider_url, + '--auth-url', + self.service_provider.auth_url, + '--service-provider-url', + self.service_provider.sp_url, '--disable', - service_fakes.sp_id, + self.service_provider.id, ] verifylist = [ - ('auth_url', service_fakes.sp_auth_url), - ('service_provider_url', service_fakes.service_provider_url), - ('service_provider_id', service_fakes.sp_id), + ('auth_url', self.service_provider.auth_url), + ('service_provider_url', self.service_provider.sp_url), + ('service_provider_id', self.service_provider.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'auth_url': service_fakes.sp_auth_url, - 'sp_url': service_fakes.service_provider_url, - 'enabled': False, - 'description': None, + 'auth_url': self.service_provider.auth_url, + 'sp_url': self.service_provider.sp_url, + 'is_enabled': False, } - self.service_providers_mock.create.assert_called_with( - id=service_fakes.sp_id, - **kwargs + self.identity_sdk_client.create_service_provider.assert_called_with( + id=self.service_provider.id, **kwargs ) self.assertEqual(self.columns, columns) - datalist = ( - service_fakes.sp_auth_url, - None, - False, - service_fakes.sp_id, - service_fakes.service_provider_url - ) - self.assertEqual(datalist, data) - + self.assertEqual(self.data, data) -class TestServiceProviderDelete(TestServiceProvider): +class TestServiceProviderDelete(service_fakes.TestFederatedIdentity): def setUp(self): - super(TestServiceProviderDelete, self).setUp() + super().setUp() - # This is the return value for utils.find_resource() - self.service_providers_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True, + self.service_provider = sdk_fakes.generate_fake_resource( + _service_provider.ServiceProvider ) - - self.service_providers_mock.delete.return_value = None + self.identity_sdk_client.delete_service_provider.return_value = None self.cmd = service_provider.DeleteServiceProvider(self.app, None) def test_delete_service_provider(self): arglist = [ - service_fakes.sp_id, + self.service_provider.id, ] verifylist = [ - ('service_provider', [service_fakes.sp_id]), + ('service_provider', [self.service_provider.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.service_providers_mock.delete.assert_called_with( - service_fakes.sp_id, + self.identity_sdk_client.delete_service_provider.assert_called_with( + self.service_provider.id, ) self.assertIsNone(result) -class TestServiceProviderList(TestServiceProvider): - +class TestServiceProviderList(service_fakes.TestFederatedIdentity): def setUp(self): - super(TestServiceProviderList, self).setUp() + super().setUp() - self.service_providers_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True, + self.service_provider = sdk_fakes.generate_fake_resource( + _service_provider.ServiceProvider ) - self.service_providers_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True, - ), + self.identity_sdk_client.service_providers.return_value = [ + self.service_provider ] # Get the command object to test @@ -228,38 +195,56 @@ def test_service_provider_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.service_providers_mock.list.assert_called_with() + self.identity_sdk_client.service_providers.assert_called_with() - collist = ('ID', 'Enabled', 'Description', 'Auth URL') + collist = ( + 'ID', + 'Enabled', + 'Description', + 'Auth URL', + 'Service Provider URL', + 'Relay State Prefix', + ) self.assertEqual(collist, columns) - datalist = (( - service_fakes.sp_id, - True, - service_fakes.sp_description, - service_fakes.sp_auth_url - ), ) + datalist = ( + ( + self.service_provider.id, + True, + self.service_provider.description, + self.service_provider.auth_url, + self.service_provider.sp_url, + self.service_provider.relay_state_prefix, + ), + ) self.assertEqual(tuple(data), datalist) -class TestServiceProviderSet(TestServiceProvider): - +class TestServiceProviderSet(service_fakes.TestFederatedIdentity): columns = ( - 'auth_url', - 'description', - 'enabled', 'id', + 'enabled', + 'description', + 'auth_url', 'sp_url', - ) - datalist = ( - service_fakes.sp_auth_url, - service_fakes.sp_description, - False, - service_fakes.sp_id, - service_fakes.service_provider_url, + 'relay_state_prefix', ) def setUp(self): - super(TestServiceProviderSet, self).setUp() + super().setUp() + self.service_provider = sdk_fakes.generate_fake_resource( + _service_provider.ServiceProvider + ) + self.identity_sdk_client.update_service_provider.return_value = ( + self.service_provider + ) + self.data = ( + self.service_provider.id, + self.service_provider.is_enabled, + self.service_provider.description, + self.service_provider.auth_url, + self.service_provider.sp_url, + self.service_provider.relay_state_prefix, + ) self.cmd = service_provider.SetServiceProvider(self.app, None) def test_service_provider_disable(self): @@ -267,142 +252,107 @@ def test_service_provider_disable(self): Set Service Provider's ``enabled`` attribute to False. """ - - def prepare(self): - """Prepare fake return objects before the test is executed""" - updated_sp = copy.deepcopy(service_fakes.SERVICE_PROVIDER) - updated_sp['enabled'] = False - resources = fakes.FakeResource( - None, - updated_sp, - loaded=True - ) - self.service_providers_mock.update.return_value = resources - - prepare(self) arglist = [ - '--disable', service_fakes.sp_id, + '--disable', + self.service_provider.id, ] verifylist = [ - ('service_provider', service_fakes.sp_id), - ('enable', False), - ('disable', True), + ('service_provider', self.service_provider.id), + ('is_enabled', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - self.service_providers_mock.update.assert_called_with( - service_fakes.sp_id, - enabled=False, - description=None, - auth_url=None, - sp_url=None + columns, data = self.cmd.take_action(parsed_args) + self.identity_sdk_client.update_service_provider.assert_called_with( + self.service_provider.id, + is_enabled=False, ) + self.assertEqual(columns, self.columns) + self.assertEqual(data, self.data) def test_service_provider_enable(self): """Enable Service Provider. Set Service Provider's ``enabled`` attribute to True. """ - - def prepare(self): - """Prepare fake return objects before the test is executed""" - resources = fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True - ) - self.service_providers_mock.update.return_value = resources - - prepare(self) arglist = [ - '--enable', service_fakes.sp_id, + '--enable', + self.service_provider.id, ] verifylist = [ - ('service_provider', service_fakes.sp_id), - ('enable', True), - ('disable', False), + ('service_provider', self.service_provider.id), + ('is_enabled', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.cmd.take_action(parsed_args) - self.service_providers_mock.update.assert_called_with( - service_fakes.sp_id, enabled=True, description=None, - auth_url=None, sp_url=None) + columns, data = self.cmd.take_action(parsed_args) + self.identity_sdk_client.update_service_provider.assert_called_with( + self.service_provider.id, + is_enabled=True, + ) + self.assertEqual(columns, self.columns) + self.assertEqual(data, self.data) def test_service_provider_no_options(self): - def prepare(self): - """Prepare fake return objects before the test is executed""" - resources = fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True - ) - self.service_providers_mock.get.return_value = resources - - resources = fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True, - ) - self.service_providers_mock.update.return_value = resources - - prepare(self) arglist = [ - service_fakes.sp_id, + self.service_provider.id, ] verifylist = [ - ('service_provider', service_fakes.sp_id), + ('service_provider', self.service_provider.id), ('description', None), - ('enable', False), - ('disable', False), + ('is_enabled', None), ('auth_url', None), ('service_provider_url', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.assertEqual(columns, self.columns) + self.assertEqual(data, self.data) - self.cmd.take_action(parsed_args) - - -class TestServiceProviderShow(TestServiceProvider): +class TestServiceProviderShow(service_fakes.TestFederatedIdentity): def setUp(self): - super(TestServiceProviderShow, self).setUp() + super().setUp() - ret = fakes.FakeResource( - None, - copy.deepcopy(service_fakes.SERVICE_PROVIDER), - loaded=True, + self.service_provider = sdk_fakes.generate_fake_resource( + _service_provider.ServiceProvider + ) + self.identity_sdk_client.find_service_provider.return_value = ( + self.service_provider + ) + self.data = ( + self.service_provider.id, + self.service_provider.is_enabled, + self.service_provider.description, + self.service_provider.auth_url, + self.service_provider.sp_url, + self.service_provider.relay_state_prefix, ) - self.service_providers_mock.get.side_effect = [Exception("Not found"), - ret] - self.service_providers_mock.get.return_value = ret # Get the command object to test self.cmd = service_provider.ShowServiceProvider(self.app, None) def test_service_provider_show(self): arglist = [ - service_fakes.sp_id, + self.service_provider.id, ] verifylist = [ - ('service_provider', service_fakes.sp_id), + ('service_provider', self.service_provider.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.service_providers_mock.get.assert_called_with( - service_fakes.sp_id, - id='BETA' + self.identity_sdk_client.find_service_provider.assert_called_with( + self.service_provider.id, + ignore_missing=False, ) - collist = ('auth_url', 'description', 'enabled', 'id', 'sp_url') - self.assertEqual(collist, columns) - datalist = ( - service_fakes.sp_auth_url, - service_fakes.sp_description, - True, - service_fakes.sp_id, - service_fakes.service_provider_url + collist = ( + 'id', + 'enabled', + 'description', + 'auth_url', + 'sp_url', + 'relay_state_prefix', ) - self.assertEqual(data, datalist) + self.assertEqual(collist, columns) + self.assertEqual(data, self.data) diff --git a/openstackclient/tests/unit/identity/v3/test_token.py b/openstackclient/tests/unit/identity/v3/test_token.py index adb491b30f..f8d09b72cc 100644 --- a/openstackclient/tests/unit/identity/v3/test_token.py +++ b/openstackclient/tests/unit/identity/v3/test_token.py @@ -11,28 +11,14 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# - -from unittest import mock from openstackclient.identity.v3 import token from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestToken(identity_fakes.TestIdentityv3): - +class TestTokenIssue(identity_fakes.TestIdentityv3): def setUp(self): - super(TestToken, self).setUp() - - # Get a shortcut to the Auth Ref Mock - self.ar_mock = mock.PropertyMock() - type(self.app.client_manager).auth_ref = self.ar_mock - - -class TestTokenIssue(TestToken): - - def setUp(self): - super(TestTokenIssue, self).setUp() + super().setUp() self.cmd = token.IssueToken(self.app, None) @@ -40,8 +26,7 @@ def test_token_issue_with_project_id(self): auth_ref = identity_fakes.fake_auth_ref( identity_fakes.TOKEN_WITH_PROJECT_ID, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -66,8 +51,7 @@ def test_token_issue_with_domain_id(self): auth_ref = identity_fakes.fake_auth_ref( identity_fakes.TOKEN_WITH_DOMAIN_ID, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -92,8 +76,7 @@ def test_token_issue_with_unscoped(self): auth_ref = identity_fakes.fake_auth_ref( identity_fakes.UNSCOPED_TOKEN, ) - self.ar_mock = mock.PropertyMock(return_value=auth_ref) - type(self.app.client_manager).auth_ref = self.ar_mock + self.app.client_manager.auth_ref = auth_ref arglist = [] verifylist = [] @@ -116,13 +99,12 @@ def test_token_issue_with_unscoped(self): self.assertEqual(datalist, data) -class TestTokenRevoke(TestToken): - +class TestTokenRevoke(identity_fakes.TestIdentityv3): TOKEN = 'fob' def setUp(self): - super(TestTokenRevoke, self).setUp() - self.tokens_mock = self.app.client_manager.identity.tokens + super().setUp() + self.tokens_mock = self.identity_client.tokens self.tokens_mock.reset_mock() self.tokens_mock.revoke_token.return_value = True self.cmd = token.RevokeToken(self.app, None) diff --git a/openstackclient/tests/unit/identity/v3/test_trust.py b/openstackclient/tests/unit/identity/v3/test_trust.py index d530adf5a5..5c14b7ad98 100644 --- a/openstackclient/tests/unit/identity/v3/test_trust.py +++ b/openstackclient/tests/unit/identity/v3/test_trust.py @@ -11,77 +11,54 @@ # under the License. # -import copy from unittest import mock from osc_lib import exceptions -from osc_lib import utils + +from openstack import exceptions as sdk_exceptions +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import role as _role +from openstack.identity.v3 import trust as _trust +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from openstackclient.identity.v3 import trust -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestTrust(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestTrust, self).setUp() - - self.trusts_mock = self.app.client_manager.identity.trusts - self.trusts_mock.reset_mock() - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - self.roles_mock = self.app.client_manager.identity.roles - self.roles_mock.reset_mock() - - -class TestTrustCreate(TestTrust): - +class TestTrustCreate(identity_fakes.TestIdentityv3): def setUp(self): - super(TestTrustCreate, self).setUp() + super().setUp() - self.projects_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.PROJECT), - loaded=True, - ) + self.trust = sdk_fakes.generate_fake_resource(_trust.Trust) + self.identity_sdk_client.create_trust.return_value = self.trust - self.users_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.USER), - loaded=True, - ) + self.project = sdk_fakes.generate_fake_resource(_project.Project) + self.identity_sdk_client.find_project.return_value = self.project - self.roles_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.ROLE), - loaded=True, - ) + self.user = sdk_fakes.generate_fake_resource(_user.User) + self.identity_sdk_client.find_user.return_value = self.user - self.trusts_mock.create.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.TRUST), - loaded=True, - ) + self.role = sdk_fakes.generate_fake_resource(_role.Role) + self.identity_sdk_client.find_role.return_value = self.role # Get the command object to test self.cmd = trust.CreateTrust(self.app, None) def test_trust_create_basic(self): arglist = [ - '--project', identity_fakes.project_id, - '--role', identity_fakes.role_id, - identity_fakes.user_id, - identity_fakes.user_id + '--project', + self.project.id, + '--role', + self.role.id, + self.user.id, + self.user.id, ] verifylist = [ - ('project', identity_fakes.project_id), - ('impersonate', False), - ('role', [identity_fakes.role_id]), - ('trustor', identity_fakes.user_id), - ('trustee', identity_fakes.user_id), + ('project', self.project.id), + ('roles', [self.role.id]), + ('trustor', self.user.id), + ('trustee', self.user.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -92,72 +69,74 @@ def test_trust_create_basic(self): # Set expected values kwargs = { + 'project_id': self.project.id, + 'roles': [{'id': self.role.id}], 'impersonation': False, - 'project': identity_fakes.project_id, - 'role_ids': [identity_fakes.role_id], - 'expires_at': None, } # TrustManager.create(trustee_id, trustor_id, impersonation=, # project=, role_names=, expires_at=) - self.trusts_mock.create.assert_called_with( - identity_fakes.user_id, - identity_fakes.user_id, - **kwargs + self.identity_sdk_client.create_trust.assert_called_with( + trustor_user_id=self.user.id, + trustee_user_id=self.user.id, + **kwargs, ) - collist = ('expires_at', 'id', 'impersonation', 'project_id', - 'roles', 'trustee_user_id', 'trustor_user_id') + collist = ( + 'expires_at', + 'id', + 'is_impersonation', + 'project_id', + 'redelegated_trust_id', + 'redelegation_count', + 'remaining_uses', + 'roles', + 'trustee_user_id', + 'trustor_user_id', + ) self.assertEqual(collist, columns) datalist = ( - identity_fakes.trust_expires, - identity_fakes.trust_id, - identity_fakes.trust_impersonation, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.user_id, - identity_fakes.user_id + self.trust.expires_at, + self.trust.id, + self.trust.is_impersonation, + self.trust.project_id, + self.trust.redelegated_trust_id, + self.trust.redelegation_count, + self.trust.remaining_uses, + self.trust.roles, + self.trust.trustee_user_id, + self.trust.trustor_user_id, ) self.assertEqual(datalist, data) -class TestTrustDelete(TestTrust): - +class TestTrustDelete(identity_fakes.TestIdentityv3): def setUp(self): - super(TestTrustDelete, self).setUp() + super().setUp() - # This is the return value for utils.find_resource() - self.trusts_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.TRUST), - loaded=True, - ) - self.trusts_mock.delete.return_value = None + self.trust = sdk_fakes.generate_fake_resource(_trust.Trust) + self.identity_sdk_client.delete_trust.return_value = None + self.identity_sdk_client.find_trust.return_value = self.trust # Get the command object to test self.cmd = trust.DeleteTrust(self.app, None) def test_trust_delete(self): arglist = [ - identity_fakes.trust_id, - ] - verifylist = [ - ('trust', [identity_fakes.trust_id]) + self.trust.id, ] + verifylist = [('trust', [self.trust.id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.trusts_mock.delete.assert_called_with( - identity_fakes.trust_id, + self.identity_sdk_client.delete_trust.assert_called_with( + self.trust.id, ) self.assertIsNone(result) - @mock.patch.object(utils, 'find_resource') - def test_delete_multi_trusts_with_exception(self, find_mock): - find_mock.side_effect = [self.trusts_mock.get.return_value, - exceptions.CommandError] + def test_delete_multi_trusts_with_exception(self): arglist = [ - identity_fakes.trust_id, + self.trust.id, 'unexist_trust', ] verifylist = [ @@ -165,33 +144,37 @@ def test_delete_multi_trusts_with_exception(self, find_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.identity_sdk_client.find_trust.side_effect = [ + self.trust, + sdk_exceptions.ResourceNotFound, + ] + try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 trusts failed to delete.', - str(e)) - - find_mock.assert_any_call(self.trusts_mock, identity_fakes.trust_id) - find_mock.assert_any_call(self.trusts_mock, 'unexist_trust') - - self.assertEqual(2, find_mock.call_count) - self.trusts_mock.delete.assert_called_once_with( - identity_fakes.trust_id) + self.assertEqual('1 of 2 trusts failed to delete.', str(e)) + self.identity_sdk_client.find_trust.assert_has_calls( + [ + mock.call(self.trust.id, ignore_missing=False), + mock.call('unexist_trust', ignore_missing=False), + ] + ) + self.identity_sdk_client.delete_trust.assert_called_once_with( + self.trust.id + ) -class TestTrustList(TestTrust): +class TestTrustList(identity_fakes.TestIdentityv3): def setUp(self): - super(TestTrustList, self).setUp() + super().setUp() - self.trusts_mock.list.return_value = [ - fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.TRUST), - loaded=True, - ), - ] + self.trust = sdk_fakes.generate_fake_resource(_trust.Trust) + self.identity_sdk_client.trusts.return_value = [self.trust] + + self.user = sdk_fakes.generate_fake_resource(_user.User) + self.identity_sdk_client.find_user.return_value = self.user # Get the command object to test self.cmd = trust.ListTrust(self.app, None) @@ -206,27 +189,35 @@ def test_trust_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.trusts_mock.list.assert_called_with( - trustor_user=None, - trustee_user=None, + self.identity_sdk_client.trusts.assert_called_with( + trustor_user_id=None, + trustee_user_id=None, ) - collist = ('ID', 'Expires At', 'Impersonation', 'Project ID', - 'Trustee User ID', 'Trustor User ID') + collist = ( + 'ID', + 'Expires At', + 'Impersonation', + 'Project ID', + 'Trustee User ID', + 'Trustor User ID', + ) self.assertEqual(collist, columns) - datalist = (( - identity_fakes.trust_id, - identity_fakes.trust_expires, - identity_fakes.trust_impersonation, - identity_fakes.project_id, - identity_fakes.user_id, - identity_fakes.user_id - ), ) + datalist = ( + ( + self.trust.id, + self.trust.expires_at, + self.trust.is_impersonation, + self.trust.project_id, + self.trust.trustee_user_id, + self.trust.trustor_user_id, + ), + ) self.assertEqual(datalist, tuple(data)) def test_trust_list_auth_user(self): - auth_ref = self.app.client_manager.auth_ref = mock.Mock() - auth_ref.user_id.return_value = identity_fakes.user_id + self.app.client_manager.auth_ref = mock.Mock() + auth_ref = self.app.client_manager.auth_ref arglist = ['--auth-user'] verifylist = [ @@ -241,31 +232,39 @@ def test_trust_list_auth_user(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.trusts_mock.list.assert_any_call( - trustor_user=self.users_mock.get() - ) - self.trusts_mock.list.assert_any_call( - trustee_user=self.users_mock.get() + self.identity_sdk_client.trusts.assert_has_calls( + [ + mock.call(trustor_user_id=auth_ref.user_id), + mock.call(trustee_user_id=auth_ref.user_id), + ] ) - collist = ('ID', 'Expires At', 'Impersonation', 'Project ID', - 'Trustee User ID', 'Trustor User ID') + collist = ( + 'ID', + 'Expires At', + 'Impersonation', + 'Project ID', + 'Trustee User ID', + 'Trustor User ID', + ) self.assertEqual(collist, columns) - datalist = (( - identity_fakes.trust_id, - identity_fakes.trust_expires, - identity_fakes.trust_impersonation, - identity_fakes.project_id, - identity_fakes.user_id, - identity_fakes.user_id - ), ) + datalist = ( + ( + self.trust.id, + self.trust.expires_at, + self.trust.is_impersonation, + self.trust.project_id, + self.trust.trustee_user_id, + self.trust.trustor_user_id, + ), + ) self.assertEqual(datalist, tuple(data)) def test_trust_list_trustee(self): - arglist = ['--trustee', identity_fakes.user_name] + arglist = ['--trustee', self.user.name] verifylist = [ ('trustor', None), - ('trustee', identity_fakes.user_name), + ('trustee', self.user.name), ('authuser', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -275,30 +274,37 @@ def test_trust_list_trustee(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - print(self.trusts_mock.list.call_args_list) - self.trusts_mock.list.assert_any_call( - trustee_user=self.users_mock.get(), - trustor_user=None, + self.identity_sdk_client.trusts.assert_called_with( + trustee_user_id=self.user.id, + trustor_user_id=None, ) - collist = ('ID', 'Expires At', 'Impersonation', 'Project ID', - 'Trustee User ID', 'Trustor User ID') + collist = ( + 'ID', + 'Expires At', + 'Impersonation', + 'Project ID', + 'Trustee User ID', + 'Trustor User ID', + ) self.assertEqual(collist, columns) - datalist = (( - identity_fakes.trust_id, - identity_fakes.trust_expires, - identity_fakes.trust_impersonation, - identity_fakes.project_id, - identity_fakes.user_id, - identity_fakes.user_id - ), ) + datalist = ( + ( + self.trust.id, + self.trust.expires_at, + self.trust.is_impersonation, + self.trust.project_id, + self.trust.trustee_user_id, + self.trust.trustor_user_id, + ), + ) self.assertEqual(datalist, tuple(data)) def test_trust_list_trustor(self): - arglist = ['--trustor', identity_fakes.user_name] + arglist = ['--trustor', self.user.name] verifylist = [ ('trustee', None), - ('trustor', identity_fakes.user_name), + ('trustor', self.user.name), ('authuser', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -308,46 +314,49 @@ def test_trust_list_trustor(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - print(self.trusts_mock.list.call_args_list) - self.trusts_mock.list.assert_any_call( - trustor_user=self.users_mock.get(), - trustee_user=None, + self.identity_sdk_client.trusts.assert_called_once_with( + trustor_user_id=self.user.id, + trustee_user_id=None, ) - collist = ('ID', 'Expires At', 'Impersonation', 'Project ID', - 'Trustee User ID', 'Trustor User ID') + collist = ( + 'ID', + 'Expires At', + 'Impersonation', + 'Project ID', + 'Trustee User ID', + 'Trustor User ID', + ) self.assertEqual(collist, columns) - datalist = (( - identity_fakes.trust_id, - identity_fakes.trust_expires, - identity_fakes.trust_impersonation, - identity_fakes.project_id, - identity_fakes.user_id, - identity_fakes.user_id - ), ) + datalist = ( + ( + self.trust.id, + self.trust.expires_at, + self.trust.is_impersonation, + self.trust.project_id, + self.trust.trustee_user_id, + self.trust.trustor_user_id, + ), + ) self.assertEqual(datalist, tuple(data)) -class TestTrustShow(TestTrust): - +class TestTrustShow(identity_fakes.TestIdentityv3): def setUp(self): - super(TestTrustShow, self).setUp() + super().setUp() - self.trusts_mock.get.return_value = fakes.FakeResource( - None, - copy.deepcopy(identity_fakes.TRUST), - loaded=True, - ) + self.trust = sdk_fakes.generate_fake_resource(_trust.Trust) + self.identity_sdk_client.find_trust.return_value = self.trust # Get the command object to test self.cmd = trust.ShowTrust(self.app, None) def test_trust_show(self): arglist = [ - identity_fakes.trust_id, + self.trust.id, ] verifylist = [ - ('trust', identity_fakes.trust_id), + ('trust', self.trust.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -356,18 +365,33 @@ def test_trust_show(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.trusts_mock.get.assert_called_with(identity_fakes.trust_id) + self.identity_sdk_client.find_trust.assert_called_with( + self.trust.id, ignore_missing=False + ) - collist = ('expires_at', 'id', 'impersonation', 'project_id', - 'roles', 'trustee_user_id', 'trustor_user_id') + collist = ( + 'expires_at', + 'id', + 'is_impersonation', + 'project_id', + 'redelegated_trust_id', + 'redelegation_count', + 'remaining_uses', + 'roles', + 'trustee_user_id', + 'trustor_user_id', + ) self.assertEqual(collist, columns) datalist = ( - identity_fakes.trust_expires, - identity_fakes.trust_id, - identity_fakes.trust_impersonation, - identity_fakes.project_id, - identity_fakes.role_name, - identity_fakes.user_id, - identity_fakes.user_id + self.trust.expires_at, + self.trust.id, + self.trust.is_impersonation, + self.trust.project_id, + self.trust.redelegated_trust_id, + self.trust.redelegation_count, + self.trust.remaining_uses, + self.trust.roles, + self.trust.trustee_user_id, + self.trust.trustor_user_id, ) self.assertEqual(datalist, data) diff --git a/openstackclient/tests/unit/identity/v3/test_unscoped_saml.py b/openstackclient/tests/unit/identity/v3/test_unscoped_saml.py index 34655263f7..40b1991015 100644 --- a/openstackclient/tests/unit/identity/v3/test_unscoped_saml.py +++ b/openstackclient/tests/unit/identity/v3/test_unscoped_saml.py @@ -18,11 +18,10 @@ class TestUnscopedSAML(identity_fakes.TestFederatedIdentity): - def setUp(self): - super(TestUnscopedSAML, self).setUp() + super().setUp() - federation_lib = self.app.client_manager.identity.federation + federation_lib = self.identity_client.federation self.projects_mock = federation_lib.projects self.projects_mock.reset_mock() self.domains_mock = federation_lib.domains @@ -30,9 +29,8 @@ def setUp(self): class TestDomainList(TestUnscopedSAML): - def setUp(self): - super(TestDomainList, self).setUp() + super().setUp() self.domains_mock.list.return_value = [ fakes.FakeResource( @@ -59,19 +57,20 @@ def test_accessible_domains_list(self): collist = ('ID', 'Enabled', 'Name', 'Description') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.domain_id, - True, - identity_fakes.domain_name, - identity_fakes.domain_description, - ), ) + datalist = ( + ( + identity_fakes.domain_id, + True, + identity_fakes.domain_name, + identity_fakes.domain_description, + ), + ) self.assertEqual(datalist, tuple(data)) class TestProjectList(TestUnscopedSAML): - def setUp(self): - super(TestProjectList, self).setUp() + super().setUp() self.projects_mock.list.return_value = [ fakes.FakeResource( @@ -98,10 +97,12 @@ def test_accessible_projects_list(self): collist = ('ID', 'Domain ID', 'Enabled', 'Name') self.assertEqual(collist, columns) - datalist = (( - identity_fakes.project_id, - identity_fakes.domain_id, - True, - identity_fakes.project_name, - ), ) + datalist = ( + ( + identity_fakes.project_id, + identity_fakes.domain_id, + True, + identity_fakes.project_name, + ), + ) self.assertEqual(datalist, tuple(data)) diff --git a/openstackclient/tests/unit/identity/v3/test_user.py b/openstackclient/tests/unit/identity/v3/test_user.py index c71435bacf..134ba5a0bc 100644 --- a/openstackclient/tests/unit/identity/v3/test_user.py +++ b/openstackclient/tests/unit/identity/v3/test_user.py @@ -17,44 +17,23 @@ from unittest import mock from osc_lib import exceptions -from osc_lib import utils + +from openstack import exceptions as sdk_exc +from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import group as _group +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import role_assignment as _role_assignment +from openstack.identity.v3 import user as _user +from openstack.test import fakes as sdk_fakes from openstackclient.identity import common from openstackclient.identity.v3 import user from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -class TestUser(identity_fakes.TestIdentityv3): - - def setUp(self): - super(TestUser, self).setUp() - - # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains - self.domains_mock.reset_mock() - - # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - - # Get a shortcut to the GroupManager Mock - self.groups_mock = self.app.client_manager.identity.groups - self.groups_mock.reset_mock() - - # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - - # Shortcut for RoleAssignmentManager Mock - self.role_assignments_mock = self.app.client_manager.identity.\ - role_assignments - self.role_assignments_mock.reset_mock() - - -class TestUserCreate(TestUser): - - domain = identity_fakes.FakeDomain.create_one_domain() - project = identity_fakes.FakeProject.create_one_project() +class TestUserCreate(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) + project = sdk_fakes.generate_fake_resource(_project.Project) columns = ( 'default_project_id', @@ -63,14 +42,18 @@ class TestUserCreate(TestUser): 'enabled', 'id', 'name', + 'description', + 'password_expires_at', + 'options', ) def setUp(self): - super(TestUserCreate, self).setUp() + super().setUp() - self.user = identity_fakes.FakeUser.create_one_user( - attrs={'domain_id': self.domain.id, - 'default_project_id': self.project.id} + self.user = sdk_fakes.generate_fake_resource( + resource_type=_user.User, + domain_id=self.domain.id, + default_project_id=self.project.id, ) self.datalist = ( self.project.id, @@ -79,11 +62,14 @@ def setUp(self): True, self.user.id, self.user.name, + self.user.description, + self.user.password_expires_at, + getattr(self.user, 'options', {}), ) - self.domains_mock.get.return_value = self.domain - self.projects_mock.get.return_value = self.project - self.users_mock.create.return_value = self.user + self.identity_sdk_client.find_domain.return_value = self.domain + self.identity_sdk_client.find_project.return_value = self.project + self.identity_sdk_client.create_user.return_value = self.user # Get the command object to test self.cmd = user.CreateUser(self.app, None) @@ -107,27 +93,17 @@ def test_user_create_no_options(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'options': {}, - 'enabled': True, - 'password': None, + 'is_enabled': True, } - - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_user_create_password(self): arglist = [ - '--password', 'secret', + '--password', + 'secret', self.user.name, ] verifylist = [ @@ -147,19 +123,11 @@ def test_user_create_password(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'options': {}, - 'enabled': True, + 'is_enabled': True, 'password': 'secret', } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) + self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -169,7 +137,6 @@ def test_user_create_password_prompt(self): self.user.name, ] verifylist = [ - ('password', None), ('password_prompt', True), ('enable', False), ('disable', False), @@ -188,26 +155,64 @@ def test_user_create_password_prompt(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'options': {}, - 'enabled': True, + 'is_enabled': True, 'password': 'abc123', } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_user_create_password_prompt_no_warning(self): + arglist = [ + '--password-prompt', + self.user.name, + ] + verifylist = [ + ('password_prompt', True), + ('enable', False), + ('disable', False), + ('name', self.user.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + import logging + + # Mock the password prompt + mocker = mock.Mock() + mocker.return_value = 'abc123' + + # Use assertLogs to verify no warnings are logged + logger = 'openstackclient.identity.v3.user' + with mock.patch("osc_lib.utils.get_password", mocker): + with self.assertLogs(logger, level='WARNING') as log_ctx: + logging.getLogger(logger).warning( + "Dummy warning for test setup" + ) + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(1, len(log_ctx.records)) + self.assertIn( + "Dummy warning for test setup", log_ctx.output[0] + ) + self.assertNotIn( + "No password was supplied", ''.join(log_ctx.output) + ) + + # Set expected values + kwargs = { + 'name': self.user.name, + 'is_enabled': True, + 'password': 'abc123', + } + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_user_create_email(self): arglist = [ - '--email', 'barney@example.com', + '--email', + 'barney@example.com', self.user.name, ] verifylist = [ @@ -226,26 +231,18 @@ def test_user_create_email(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, 'email': 'barney@example.com', - 'enabled': True, - 'options': {}, - 'password': None, + 'is_enabled': True, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_user_create_project(self): arglist = [ - '--project', self.project.name, + '--project', + self.project.name, self.user.name, ] verifylist = [ @@ -264,19 +261,10 @@ def test_user_create_project(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': self.project.id, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, - 'options': {}, - 'password': None, + 'default_project_id': self.project.id, + 'is_enabled': True, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( @@ -286,13 +274,18 @@ def test_user_create_project(self): True, self.user.id, self.user.name, + self.user.description, + self.user.password_expires_at, + getattr(self.user, 'options', {}), ) self.assertEqual(datalist, data) def test_user_create_project_domain(self): arglist = [ - '--project', self.project.name, - '--project-domain', self.project.domain_id, + '--project', + self.project.name, + '--project-domain', + self.project.domain_id, self.user.name, ] verifylist = [ @@ -312,18 +305,12 @@ def test_user_create_project_domain(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': self.project.id, - 'description': None, - 'domain': None, - 'email': None, - 'options': {}, - 'enabled': True, - 'password': None, + 'default_project_id': self.project.id, + 'is_enabled': True, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs + self.identity_sdk_client.create_user.assert_called_once_with(**kwargs) + self.identity_sdk_client.find_domain.assert_called_once_with( + self.project.domain_id, ignore_missing=False ) self.assertEqual(self.columns, columns) @@ -334,12 +321,16 @@ def test_user_create_project_domain(self): True, self.user.id, self.user.name, + self.user.description, + self.user.password_expires_at, + getattr(self.user, 'options', {}), ) self.assertEqual(datalist, data) def test_user_create_domain(self): arglist = [ - '--domain', self.domain.name, + '--domain', + self.domain.name, self.user.name, ] verifylist = [ @@ -358,19 +349,10 @@ def test_user_create_domain(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': self.domain.id, - 'email': None, - 'options': {}, - 'enabled': True, - 'password': None, + 'domain_id': self.domain.id, + 'is_enabled': True, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -395,19 +377,9 @@ def test_user_create_enable(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'options': {}, - 'enabled': True, - 'password': None, + 'is_enabled': True, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -432,18 +404,10 @@ def test_user_create_disable(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'options': {}, - 'enabled': False, - 'password': None, + 'is_enabled': False, } - # users.create(name=, password, email, tenant_id=None, enabled=True) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) + self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -468,19 +432,10 @@ def test_user_create_ignore_lockout_failure_attempts(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_lockout_failure_attempts': True}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -506,19 +461,10 @@ def test_user_create_no_ignore_lockout_failure_attempts(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_lockout_failure_attempts': False}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -544,19 +490,10 @@ def test_user_create_ignore_password_expiry(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_password_expiry': True}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -582,19 +519,10 @@ def test_user_create_no_ignore_password_expiry(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_password_expiry': False}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -620,19 +548,10 @@ def test_user_create_ignore_change_password_upon_first_use(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_change_password_upon_first_use': True}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -658,19 +577,10 @@ def test_user_create_no_ignore_change_password_upon_first_use(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_change_password_upon_first_use': False}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -696,19 +606,10 @@ def test_user_create_enables_lock_password(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'lock_password': True}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -734,19 +635,10 @@ def test_user_create_disables_lock_password(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'lock_password': False}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -772,19 +664,10 @@ def test_user_create_enable_multi_factor_auth(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'multi_factor_auth_enabled': True}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -810,32 +693,27 @@ def test_user_create_disable_multi_factor_auth(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, + 'is_enabled': True, 'options': {'multi_factor_auth_enabled': False}, - 'password': None, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) def test_user_create_option_with_multi_factor_auth_rule(self): arglist = [ - '--multi-factor-auth-rule', identity_fakes.mfa_opt1, - '--multi-factor-auth-rule', identity_fakes.mfa_opt2, + '--multi-factor-auth-rule', + identity_fakes.mfa_opt1, + '--multi-factor-auth-rule', + identity_fakes.mfa_opt2, self.user.name, ] verifylist = [ - ('multi_factor_auth_rule', [identity_fakes.mfa_opt1, - identity_fakes.mfa_opt2]), + ( + 'multi_factor_auth_rules', + [identity_fakes.mfa_opt1, identity_fakes.mfa_opt2], + ), ('enable', False), ('disable', False), ('name', self.user.name), @@ -850,20 +728,12 @@ def test_user_create_option_with_multi_factor_auth_rule(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, - 'options': {'multi_factor_auth_rules': [["password", "totp"], - ["password"]]}, - 'password': None, + 'is_enabled': True, + 'options': { + 'multi_factor_auth_rules': [["password", "totp"], ["password"]] + }, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) @@ -872,13 +742,14 @@ def test_user_create_with_multiple_options(self): arglist = [ '--ignore-password-expiry', '--disable-multi-factor-auth', - '--multi-factor-auth-rule', identity_fakes.mfa_opt1, + '--multi-factor-auth-rule', + identity_fakes.mfa_opt1, self.user.name, ] verifylist = [ ('ignore_password_expiry', True), ('disable_multi_factor_auth', True), - ('multi_factor_auth_rule', [identity_fakes.mfa_opt1]), + ('multi_factor_auth_rules', [identity_fakes.mfa_opt1]), ('enable', False), ('disable', False), ('name', self.user.name), @@ -893,36 +764,27 @@ def test_user_create_with_multiple_options(self): # Set expected values kwargs = { 'name': self.user.name, - 'default_project': None, - 'description': None, - 'domain': None, - 'email': None, - 'enabled': True, - 'options': {'ignore_password_expiry': True, - 'multi_factor_auth_enabled': False, - 'multi_factor_auth_rules': [["password", "totp"]]}, - 'password': None, + 'is_enabled': True, + 'options': { + 'ignore_password_expiry': True, + 'multi_factor_auth_enabled': False, + 'multi_factor_auth_rules': [["password", "totp"]], + }, } - # UserManager.create(name=, domain=, project=, password=, email=, - # description=, enabled=, default_project=) - self.users_mock.create.assert_called_with( - **kwargs - ) + self.identity_sdk_client.create_user.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, data) -class TestUserDelete(TestUser): - - user = identity_fakes.FakeUser.create_one_user() +class TestUserDelete(identity_fakes.TestIdentityv3): + user = sdk_fakes.generate_fake_resource(_user.User) def setUp(self): - super(TestUserDelete, self).setUp() + super().setUp() - # This is the return value for utils.find_resource() - self.users_mock.get.return_value = self.user - self.users_mock.delete.return_value = None + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.delete_user.return_value = None # Get the command object to test self.cmd = user.DeleteUser(self.app, None) @@ -938,15 +800,18 @@ def test_user_delete_no_options(self): result = self.cmd.take_action(parsed_args) - self.users_mock.delete.assert_called_with( + self.identity_sdk_client.delete_user.assert_called_with( self.user.id, + ignore_missing=False, ) self.assertIsNone(result) - @mock.patch.object(utils, 'find_resource') + @mock.patch.object(_user.User, 'find') def test_delete_multi_users_with_exception(self, find_mock): - find_mock.side_effect = [self.user, - exceptions.CommandError] + self.identity_sdk_client.find_user.side_effect = [ + self.user, + sdk_exc.ResourceNotFound, + ] arglist = [ self.user.id, 'unexist_user', @@ -960,33 +825,35 @@ def test_delete_multi_users_with_exception(self, find_mock): self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 users failed to delete.', - str(e)) - - find_mock.assert_any_call(self.users_mock, self.user.id) - find_mock.assert_any_call(self.users_mock, 'unexist_user') + self.assertEqual('1 of 2 users failed to delete.', str(e)) - self.assertEqual(2, find_mock.call_count) - self.users_mock.delete.assert_called_once_with(self.user.id) + self.identity_sdk_client.find_user.assert_has_calls( + [ + mock.call(name_or_id=self.user.id, ignore_missing=False), + mock.call(name_or_id='unexist_user', ignore_missing=False), + ] + ) + self.assertEqual(2, self.identity_sdk_client.find_user.call_count) + self.identity_sdk_client.delete_user.assert_called_once_with( + self.user.id, ignore_missing=False + ) -class TestUserList(TestUser): - domain = identity_fakes.FakeDomain.create_one_domain() - project = identity_fakes.FakeProject.create_one_project() - user = identity_fakes.FakeUser.create_one_user( - attrs={'domain_id': domain.id, - 'default_project_id': project.id} +class TestUserList(identity_fakes.TestIdentityv3): + domain = sdk_fakes.generate_fake_resource(_domain.Domain) + project = sdk_fakes.generate_fake_resource(_project.Project) + user = sdk_fakes.generate_fake_resource( + resource_type=_user.User, + domain_id=domain.id, + default_project_id=project.id, ) - group = identity_fakes.FakeGroup.create_one_group() - role_assignment = ( - identity_fakes.FakeRoleAssignment.create_one_role_assignment( - attrs={'user': {'id': user.id}})) - - columns = [ - 'ID', - 'Name' - ] + group = sdk_fakes.generate_fake_resource(_group.Group) + role_assignment = sdk_fakes.generate_fake_resource( + resource_type=_role_assignment.RoleAssignment, user={'id': user.id} + ) + + columns = ['ID', 'Name'] datalist = ( ( user.id, @@ -995,14 +862,17 @@ class TestUserList(TestUser): ) def setUp(self): - super(TestUserList, self).setUp() + super().setUp() - self.users_mock.get.return_value = self.user - self.users_mock.list.return_value = [self.user] - self.domains_mock.get.return_value = self.domain - self.groups_mock.get.return_value = self.group - self.projects_mock.get.return_value = self.project - self.role_assignments_mock.list.return_value = [self.role_assignment] + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.users.return_value = [self.user] + self.identity_sdk_client.group_users.return_value = [self.user] + self.identity_sdk_client.find_domain.return_value = self.domain + self.identity_sdk_client.find_group.return_value = self.group + self.identity_sdk_client.find_project.return_value = self.project + self.identity_sdk_client.role_assignments.return_value = [ + self.role_assignment + ] # Get the command object to test self.cmd = user.ListUser(self.app, None) @@ -1019,20 +889,18 @@ def test_user_list_no_options(self): # Set expected values kwargs = { - 'domain': None, - 'group': None, + 'domain_id': None, } - self.users_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.users.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) def test_user_list_domain(self): arglist = [ - '--domain', self.domain.id, + '--domain', + self.domain.id, ] verifylist = [ ('domain', self.domain.id), @@ -1046,20 +914,18 @@ def test_user_list_domain(self): # Set expected values kwargs = { - 'domain': self.domain.id, - 'group': None, + 'domain_id': self.domain.id, } - self.users_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.users.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) def test_user_list_group(self): arglist = [ - '--group', self.group.name, + '--group', + self.group.name, ] verifylist = [ ('group', self.group.name), @@ -1073,13 +939,11 @@ def test_user_list_group(self): # Set expected values kwargs = { - 'domain': None, + 'domain_id': None, 'group': self.group.id, } - self.users_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.group_users.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) @@ -1100,13 +964,10 @@ def test_user_list_long(self): # Set expected values kwargs = { - 'domain': None, - 'group': None, + 'domain_id': None, } - self.users_mock.list.assert_called_with( - **kwargs - ) + self.identity_sdk_client.users.assert_called_with(**kwargs) collist = [ 'ID', @@ -1124,7 +985,7 @@ def test_user_list_long(self): self.user.name, self.project.id, self.domain.id, - '', + self.user.description, self.user.email, True, ), @@ -1133,7 +994,8 @@ def test_user_list_long(self): def test_user_list_project(self): arglist = [ - '--project', self.project.name, + '--project', + self.project.name, ] verifylist = [ ('project', self.project.name), @@ -1146,34 +1008,51 @@ def test_user_list_project(self): columns, data = self.cmd.take_action(parsed_args) kwargs = { - 'project': self.project.id, + 'scope_project_id': self.project.id, } - self.role_assignments_mock.list.assert_called_with(**kwargs) - self.users_mock.get.assert_called_with(self.user.id) + self.identity_sdk_client.role_assignments.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) + def test_user_list_with_option_enabled(self): + arglist = ['--enabled'] + verifylist = [('is_enabled', True)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + kwargs = {'domain_id': None, 'is_enabled': True} + self.identity_sdk_client.users.assert_called_with(**kwargs) + self.identity_sdk_client.find_user.assert_not_called() + self.identity_sdk_client.group_users.assert_not_called() + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, tuple(data)) -class TestUserSet(TestUser): - project = identity_fakes.FakeProject.create_one_project() - domain = identity_fakes.FakeDomain.create_one_domain() - user = identity_fakes.FakeUser.create_one_user( - attrs={'default_project_id': project.id} +class TestUserSet(identity_fakes.TestIdentityv3): + project = sdk_fakes.generate_fake_resource(_project.Project) + domain = sdk_fakes.generate_fake_resource(_domain.Domain) + user = sdk_fakes.generate_fake_resource( + resource_type=_user.User, default_project_id=project.id ) - user2 = identity_fakes.FakeUser.create_one_user( - attrs={'default_project_id': project.id, - 'domain_id': domain.id} + user2 = sdk_fakes.generate_fake_resource( + resource_type=_user.User, + default_project_id=project.id, + domain_id=domain.id, ) def setUp(self): - super(TestUserSet, self).setUp() + super().setUp() - self.projects_mock.get.return_value = self.project - self.users_mock.get.return_value = self.user - self.users_mock.update.return_value = self.user + self.identity_sdk_client.find_project.return_value = self.project + self.identity_sdk_client.find_user.return_value = self.user + self.identity_sdk_client.update_user.return_value = self.user # Get the command object to test self.cmd = user.SetUser(self.app, None) @@ -1184,7 +1063,6 @@ def test_user_set_no_options(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('project', None), ('enable', False), @@ -1199,12 +1077,12 @@ def test_user_set_no_options(self): def test_user_set_name(self): arglist = [ - '--name', 'qwerty', + '--name', + 'qwerty', self.user.name, ] verifylist = [ ('name', 'qwerty'), - ('password', None), ('email', None), ('project', None), ('enable', False), @@ -1217,26 +1095,24 @@ def test_user_set_name(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'name': 'qwerty', } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) def test_user_set_specify_domain(self): arglist = [ - '--name', 'qwerty', - '--domain', self.domain.id, - self.user2.name + '--name', + 'qwerty', + '--domain', + self.domain.id, + self.user2.name, ] verifylist = [ ('name', 'qwerty'), - ('password', None), ('domain', self.domain.id), ('email', None), ('project', None), @@ -1248,20 +1124,17 @@ def test_user_set_specify_domain(self): result = self.cmd.take_action(parsed_args) - kwargs = { - 'enabled': True, - 'name': 'qwerty' - } + kwargs = {'is_enabled': True, 'name': 'qwerty'} - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) def test_user_set_password(self): arglist = [ - '--password', 'secret', + '--password', + 'secret', self.user.name, ] verifylist = [ @@ -1280,14 +1153,11 @@ def test_user_set_password(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'password': 'secret', } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1298,7 +1168,6 @@ def test_user_set_password_prompt(self): ] verifylist = [ ('name', None), - ('password', None), ('password_prompt', True), ('email', None), ('project', None), @@ -1315,25 +1184,22 @@ def test_user_set_password_prompt(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'password': 'abc123', } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) def test_user_set_email(self): arglist = [ - '--email', 'barney@example.com', + '--email', + 'barney@example.com', self.user.name, ] verifylist = [ ('name', None), - ('password', None), ('email', 'barney@example.com'), ('project', None), ('enable', False), @@ -1346,25 +1212,22 @@ def test_user_set_email(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'email': 'barney@example.com', } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) def test_user_set_project(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, self.user.name, ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('project', self.project.id), ('enable', False), @@ -1377,26 +1240,35 @@ def test_user_set_project(self): # Set expected values kwargs = { - 'enabled': True, - 'default_project': self.project.id, + 'is_enabled': True, + 'default_project_id': self.project.id, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs + ) + self.identity_sdk_client.find_domain.assert_not_called() + + # Set expected values + kwargs = { + 'ignore_missing': False, + 'domain_id': None, + } + self.identity_sdk_client.find_project.assert_called_once_with( + name_or_id=self.project.id, **kwargs ) + self.assertIsNone(result) def test_user_set_project_domain(self): arglist = [ - '--project', self.project.id, - '--project-domain', self.project.domain_id, + '--project', + self.project.id, + '--project-domain', + self.project.domain_id, self.user.name, ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('project', self.project.id), ('project_domain', self.project.domain_id), @@ -1410,15 +1282,17 @@ def test_user_set_project_domain(self): # Set expected values kwargs = { - 'enabled': True, - 'default_project': self.project.id, + 'is_enabled': True, + 'default_project_id': self.project.id, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) + + self.identity_sdk_client.find_domain.assert_called_once_with( + name_or_id=self.project.domain_id, ignore_missing=False + ) + self.assertIsNone(result) def test_user_set_enable(self): @@ -1428,7 +1302,6 @@ def test_user_set_enable(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('project', None), ('enable', True), @@ -1441,13 +1314,10 @@ def test_user_set_enable(self): # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1458,7 +1328,6 @@ def test_user_set_disable(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('project', None), ('enable', False), @@ -1471,13 +1340,10 @@ def test_user_set_disable(self): # Set expected values kwargs = { - 'enabled': False, + 'is_enabled': False, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1488,7 +1354,6 @@ def test_user_set_ignore_lockout_failure_attempts(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('ignore_lockout_failure_attempts', True), ('project', None), @@ -1501,14 +1366,11 @@ def test_user_set_ignore_lockout_failure_attempts(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_lockout_failure_attempts': True}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1519,7 +1381,6 @@ def test_user_set_no_ignore_lockout_failure_attempts(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('no_ignore_lockout_failure_attempts', True), ('project', None), @@ -1532,14 +1393,11 @@ def test_user_set_no_ignore_lockout_failure_attempts(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_lockout_failure_attempts': False}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1550,7 +1408,6 @@ def test_user_set_ignore_password_expiry(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('ignore_password_expiry', True), ('project', None), @@ -1563,14 +1420,11 @@ def test_user_set_ignore_password_expiry(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_password_expiry': True}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1581,7 +1435,6 @@ def test_user_set_no_ignore_password_expiry(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('no_ignore_password_expiry', True), ('project', None), @@ -1594,14 +1447,11 @@ def test_user_set_no_ignore_password_expiry(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_password_expiry': False}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1612,7 +1462,6 @@ def test_user_set_ignore_change_password_upon_first_use(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('ignore_change_password_upon_first_use', True), ('project', None), @@ -1625,14 +1474,11 @@ def test_user_set_ignore_change_password_upon_first_use(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_change_password_upon_first_use': True}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1643,7 +1489,6 @@ def test_user_set_no_ignore_change_password_upon_first_use(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('no_ignore_change_password_upon_first_use', True), ('project', None), @@ -1656,14 +1501,11 @@ def test_user_set_no_ignore_change_password_upon_first_use(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'ignore_change_password_upon_first_use': False}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1674,7 +1516,6 @@ def test_user_set_enable_lock_password(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('enable_lock_password', True), ('project', None), @@ -1687,14 +1528,11 @@ def test_user_set_enable_lock_password(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'lock_password': True}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1705,7 +1543,6 @@ def test_user_set_disable_lock_password(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('disable_lock_password', True), ('project', None), @@ -1718,14 +1555,11 @@ def test_user_set_disable_lock_password(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'lock_password': False}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1736,7 +1570,6 @@ def test_user_set_enable_multi_factor_auth(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('enable_multi_factor_auth', True), ('project', None), @@ -1749,14 +1582,11 @@ def test_user_set_enable_multi_factor_auth(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'multi_factor_auth_enabled': True}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1767,7 +1597,6 @@ def test_user_set_disable_multi_factor_auth(self): ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('disable_multi_factor_auth', True), ('project', None), @@ -1780,27 +1609,24 @@ def test_user_set_disable_multi_factor_auth(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, + 'is_enabled': True, 'options': {'multi_factor_auth_enabled': False}, } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) def test_user_set_option_multi_factor_auth_rule(self): arglist = [ - '--multi-factor-auth-rule', identity_fakes.mfa_opt1, + '--multi-factor-auth-rule', + identity_fakes.mfa_opt1, self.user.name, ] verifylist = [ ('name', None), - ('password', None), ('email', None), - ('multi_factor_auth_rule', [identity_fakes.mfa_opt1]), + ('multi_factor_auth_rules', [identity_fakes.mfa_opt1]), ('project', None), ('enable', False), ('disable', False), @@ -1811,14 +1637,12 @@ def test_user_set_option_multi_factor_auth_rule(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, - 'options': {'multi_factor_auth_rules': [["password", "totp"]]}} + 'is_enabled': True, + 'options': {'multi_factor_auth_rules': [["password", "totp"]]}, + } - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) @@ -1826,16 +1650,16 @@ def test_user_set_with_multiple_options(self): arglist = [ '--ignore-password-expiry', '--enable-multi-factor-auth', - '--multi-factor-auth-rule', identity_fakes.mfa_opt1, + '--multi-factor-auth-rule', + identity_fakes.mfa_opt1, self.user.name, ] verifylist = [ ('name', None), - ('password', None), ('email', None), ('ignore_password_expiry', True), ('enable_multi_factor_auth', True), - ('multi_factor_auth_rule', [identity_fakes.mfa_opt1]), + ('multi_factor_auth_rules', [identity_fakes.mfa_opt1]), ('project', None), ('enable', False), ('disable', False), @@ -1846,24 +1670,23 @@ def test_user_set_with_multiple_options(self): result = self.cmd.take_action(parsed_args) # Set expected values kwargs = { - 'enabled': True, - 'options': {'ignore_password_expiry': True, - 'multi_factor_auth_enabled': True, - 'multi_factor_auth_rules': [["password", "totp"]]}} - - # UserManager.update(user, name=, domain=, project=, password=, - # email=, description=, enabled=, default_project=) - self.users_mock.update.assert_called_with( - self.user.id, - **kwargs + 'is_enabled': True, + 'options': { + 'ignore_password_expiry': True, + 'multi_factor_auth_enabled': True, + 'multi_factor_auth_rules': [["password", "totp"]], + }, + } + + self.identity_sdk_client.update_user.assert_called_with( + user=self.user, **kwargs ) self.assertIsNone(result) -class TestUserSetPassword(TestUser): - +class TestUserSetPassword(identity_fakes.TestIdentityv3): def setUp(self): - super(TestUserSetPassword, self).setUp() + super().setUp() self.cmd = user.SetPasswordUser(self.app, None) @staticmethod @@ -1877,7 +1700,8 @@ def test_user_password_change(self): current_pass = 'old_pass' new_pass = 'new_pass' arglist = [ - '--password', new_pass, + '--password', + new_pass, ] verifylist = [ ('password', new_pass), @@ -1887,11 +1711,14 @@ def test_user_password_change(self): # Mock getting user current password. with self._mock_get_password(current_pass): result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) - self.users_mock.update_password.assert_called_with( - current_pass, new_pass + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + + self.identity_sdk_client.update_user.assert_called_with( + user=user_id, current_password=current_pass, password=new_pass ) - self.assertIsNone(result) def test_user_create_password_prompt(self): current_pass = 'old_pass' @@ -1901,18 +1728,23 @@ def test_user_create_password_prompt(self): # Mock getting user current and new password. with self._mock_get_password(current_pass, new_pass): result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) - self.users_mock.update_password.assert_called_with( - current_pass, new_pass + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + + self.identity_sdk_client.update_user.assert_called_with( + user=user_id, current_password=current_pass, password=new_pass ) - self.assertIsNone(result) def test_user_password_change_no_prompt(self): current_pass = 'old_pass' new_pass = 'new_pass' arglist = [ - '--password', new_pass, - '--original-password', current_pass, + '--password', + new_pass, + '--original-password', + current_pass, ] verifylist = [ ('password', new_pass), @@ -1921,35 +1753,38 @@ def test_user_password_change_no_prompt(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - self.users_mock.update_password.assert_called_with( - current_pass, new_pass - ) self.assertIsNone(result) + conn = self.app.client_manager.sdk_connection + user_id = conn.config.get_auth().get_user_id(conn.identity) + + self.identity_sdk_client.update_user.assert_called_with( + user=user_id, current_password=current_pass, password=new_pass + ) -class TestUserShow(TestUser): - user = identity_fakes.FakeUser.create_one_user() +class TestUserShow(identity_fakes.TestIdentityv3): + user = sdk_fakes.generate_fake_resource(_user.User) def setUp(self): - super(TestUserShow, self).setUp() + super().setUp() - self.users_mock.get.return_value = self.user + self.identity_sdk_client.find_user.return_value = self.user # Get the command object to test self.cmd = user.ShowUser(self.app, None) - self.app.client_manager.identity.auth.client.get_user_id.\ - return_value = self.user.id - self.app.client_manager.identity.tokens.get_token_data.return_value = \ - {'token': - {'user': - {'domain': {'id': self.user.domain_id}, - 'id': self.user.id, - 'name': self.user.name, - } - } - } + self.identity_client.auth.client.get_user_id.return_value = ( # noqa: E501 + self.user.id + ) + self.identity_client.tokens.get_token_data.return_value = { + 'token': { + 'user': { + 'domain_id': {'id': self.user.domain_id}, + 'id': self.user.id, + 'name': self.user.name, + } + } + } def test_user_show(self): arglist = [ @@ -1965,10 +1800,21 @@ def test_user_show(self): # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.users_mock.get.assert_called_with(self.user.id) + self.identity_sdk_client.find_user.assert_called_with( + name_or_id=self.user.id, ignore_missing=False + ) - collist = ('default_project_id', 'domain_id', 'email', - 'enabled', 'id', 'name') + collist = ( + 'default_project_id', + 'domain_id', + 'email', + 'enabled', + 'id', + 'name', + 'description', + 'password_expires_at', + 'options', + ) self.assertEqual(collist, columns) datalist = ( self.user.default_project_id, @@ -1977,16 +1823,20 @@ def test_user_show(self): True, self.user.id, self.user.name, + self.user.description, + self.user.password_expires_at, + getattr(self.user, 'options', {}), ) self.assertEqual(datalist, data) def test_user_show_with_domain(self): - user = identity_fakes.FakeUser.create_one_user( - {"name": self.user.name}) - identity_client = self.app.client_manager.identity + user = sdk_fakes.generate_fake_resource( + resource_type=_user.User, name=self.user.name + ) arglist = [ - "--domain", self.user.domain_id, + "--domain", + self.user.domain_id, user.name, ] verifylist = [ @@ -1995,13 +1845,17 @@ def test_user_show_with_domain(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - user_str = common._get_token_resource(identity_client, 'user', - parsed_args.user, - parsed_args.domain) - self.assertEqual(self.user.id, user_str) + user_str = common._get_token_resource( + self.identity_sdk_client, + 'user', + parsed_args.user, + parsed_args.domain, + ) + self.assertEqual(self.user.name, user_str) arglist = [ - "--domain", user.domain_id, + "--domain", + user.domain_id, user.name, ] verifylist = [ @@ -2010,7 +1864,10 @@ def test_user_show_with_domain(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - user_str = common._get_token_resource(identity_client, 'user', - parsed_args.user, - parsed_args.domain) + user_str = common._get_token_resource( + self.identity_sdk_client, + 'user', + parsed_args.user, + parsed_args.domain, + ) self.assertEqual(user.name, user_str) diff --git a/openstackclient/tests/unit/image/v1/fakes.py b/openstackclient/tests/unit/image/v1/fakes.py index 164050c00e..30503e1f02 100644 --- a/openstackclient/tests/unit/image/v1/fakes.py +++ b/openstackclient/tests/unit/image/v1/fakes.py @@ -15,45 +15,38 @@ from unittest import mock import uuid +from openstack.image.v1 import _proxy from openstack.image.v1 import image from openstackclient.tests.unit import fakes from openstackclient.tests.unit import utils -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes +from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes -class FakeImagev1Client: - - def __init__(self, **kwargs): - self.images = mock.Mock() - - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - self.version = 1.0 +class FakeClientMixin: + def setUp(self): + super().setUp() + self.app.client_manager.image = mock.Mock(spec=_proxy.Proxy) + self.image_client = self.app.client_manager.image -class TestImagev1(utils.TestCommand): +class TestImagev1(FakeClientMixin, utils.TestCommand): def setUp(self): super().setUp() - self.app.client_manager.image = FakeImagev1Client( + self.app.client_manager.volume = volume_fakes.FakeVolumeClient( endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN, ) - self.app.client_manager.volume = volume_fakes.FakeVolumev1Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - - self.client = self.app.client_manager.image + self.volume_client = self.app.client_manager.volume def create_one_image(attrs=None): """Create a fake image. :param Dictionary attrs: - A dictionary with all attrbutes of image + A dictionary with all attributes of image :return: A FakeResource object with id, name, owner, protected, visibility and tags attrs @@ -71,11 +64,8 @@ def create_one_image(attrs=None): 'min_ram': 0, 'is_public': True, 'protected': False, - 'properties': { - 'Alpha': 'a', - 'Beta': 'b', - 'Gamma': 'g'}, - 'status': 'status' + uuid.uuid4().hex + 'properties': {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'}, + 'status': 'status' + uuid.uuid4().hex, } # Overwrite default attributes if there are some attributes set diff --git a/openstackclient/tests/unit/image/v1/test_image.py b/openstackclient/tests/unit/image/v1/test_image.py index 6c65f9a395..2d870ff89e 100644 --- a/openstackclient/tests/unit/image/v1/test_image.py +++ b/openstackclient/tests/unit/image/v1/test_image.py @@ -11,7 +11,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# import copy from unittest import mock @@ -23,13 +22,7 @@ from openstackclient.tests.unit.image.v1 import fakes as image_fakes -class TestImage(image_fakes.TestImagev1): - - pass - - -class TestImageCreate(TestImage): - +class TestImageCreate(image_fakes.TestImagev1): new_image = image_fakes.create_one_image() columns = ( 'container_format', @@ -42,7 +35,7 @@ class TestImageCreate(TestImage): 'owner', 'properties', 'protected', - 'size' + 'size', ) data = ( new_image.container_format, @@ -55,15 +48,15 @@ class TestImageCreate(TestImage): new_image.owner_id, format_columns.DictColumn(new_image.properties), new_image.is_protected, - new_image.size + new_image.size, ) def setUp(self): - super(TestImageCreate, self).setUp() + super().setUp() - self.client.create_image = mock.Mock(return_value=self.new_image) - self.client.find_image = mock.Mock(return_value=self.new_image) - self.client.update_image = mock.Mock(return_image=self.new_image) + self.image_client.create_image.return_value = self.new_image + self.image_client.find_image.return_value = self.new_image + self.image_client.update_image.return_value = self.new_image # Get the command object to test self.cmd = image.CreateImage(self.app, None) @@ -86,14 +79,14 @@ def test_image_reserve_no_options(self, raw_input): columns, data = self.cmd.take_action(parsed_args) # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, container_format=image.DEFAULT_CONTAINER_FORMAT, - disk_format=image.DEFAULT_DISK_FORMAT + disk_format=image.DEFAULT_DISK_FORMAT, ) # Verify update() was not called, if it was show the args - self.assertEqual(self.client.update_image.call_args_list, []) + self.assertEqual(self.image_client.update_image.call_args_list, []) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -101,13 +94,18 @@ def test_image_reserve_no_options(self, raw_input): @mock.patch('sys.stdin', side_effect=[None]) def test_image_reserve_options(self, raw_input): arglist = [ - '--container-format', 'ovf', - '--disk-format', 'ami', - '--min-disk', '10', - '--min-ram', '4', + '--container-format', + 'ovf', + '--disk-format', + 'ami', + '--min-disk', + '10', + '--min-ram', + '4', '--protected', '--private', - '--project', 'q', + '--project', + 'q', self.new_image.name, ] verifylist = [ @@ -130,7 +128,7 @@ def test_image_reserve_options(self, raw_input): columns, data = self.cmd.take_action(parsed_args) # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, container_format='ovf', disk_format='ami', @@ -142,23 +140,26 @@ def test_image_reserve_options(self, raw_input): ) # Verify update() was not called, if it was show the args - self.assertEqual(self.client.update_image.call_args_list, []) + self.assertEqual(self.image_client.update_image.call_args_list, []) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) - @mock.patch('openstackclient.image.v1.image.io.open', name='Open') + @mock.patch('openstackclient.image.v1.image.open', name='Open') def test_image_create_file(self, mock_open): mock_file = mock.Mock(name='File') mock_open.return_value = mock_file mock_open.read.return_value = self.data arglist = [ - '--file', 'filer', + '--file', + 'filer', '--unprotected', '--public', - '--property', 'Alpha=1', - '--property', 'Beta=2', + '--property', + 'Alpha=1', + '--property', + 'Beta=2', self.new_image.name, ] verifylist = [ @@ -184,7 +185,7 @@ def test_image_create_file(self, mock_open): mock_file.close.assert_called_with() # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, container_format=image.DEFAULT_CONTAINER_FORMAT, disk_format=image.DEFAULT_DISK_FORMAT, @@ -198,22 +199,21 @@ def test_image_create_file(self, mock_open): ) # Verify update() was not called, if it was show the args - self.assertEqual(self.client.update_image.call_args_list, []) + self.assertEqual(self.image_client.update_image.call_args_list, []) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) -class TestImageDelete(TestImage): - +class TestImageDelete(image_fakes.TestImagev1): _image = image_fakes.create_one_image() def setUp(self): - super(TestImageDelete, self).setUp() + super().setUp() # This is the return value for utils.find_resource() - self.client.find_image = mock.Mock(return_value=self._image) - self.client.delete_image = mock.Mock(return_value=None) + self.image_client.find_image.return_value = self._image + self.image_client.delete_image.return_value = None # Get the command object to test self.cmd = image.DeleteImage(self.app, None) @@ -229,12 +229,11 @@ def test_image_delete_no_options(self): result = self.cmd.take_action(parsed_args) - self.client.delete_image.assert_called_with(self._image.id) + self.image_client.delete_image.assert_called_with(self._image.id) self.assertIsNone(result) -class TestImageList(TestImage): - +class TestImageList(image_fakes.TestImagev1): _image = image_fakes.create_one_image() columns = ( @@ -242,13 +241,7 @@ class TestImageList(TestImage): 'Name', 'Status', ) - datalist = ( - ( - _image.id, - _image.name, - _image.status - ), - ) + datalist = ((_image.id, _image.name, _image.status),) # create a image_info as the side_effect of the fake image_list() info = { @@ -266,11 +259,11 @@ class TestImageList(TestImage): image_info = copy.deepcopy(info) def setUp(self): - super(TestImageList, self).setUp() + super().setUp() - self.client.images = mock.Mock() - self.client.images.side_effect = [ - [self._image], [], + self.image_client.images.side_effect = [ + [self._image], + [], ] # Get the command object to test @@ -289,7 +282,7 @@ def test_image_list_no_options(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with() + self.image_client.images.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) @@ -309,7 +302,7 @@ def test_image_list_public_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( is_public=True, ) @@ -331,7 +324,7 @@ def test_image_list_private_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( is_private=True, ) @@ -351,7 +344,7 @@ def test_image_list_long_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with() + self.image_client.images.assert_called_with() collist = ( 'ID', @@ -368,30 +361,35 @@ def test_image_list_long_option(self): ) self.assertEqual(collist, columns) - datalist = (( - self._image.id, - self._image.name, - self._image.disk_format, - self._image.container_format, - self._image.size, - self._image.checksum, - self._image.status, - image.VisibilityColumn(self._image.is_public), - self._image.is_protected, - self._image.owner_id, - format_columns.DictColumn( - {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'}), - ), ) + datalist = ( + ( + self._image.id, + self._image.name, + self._image.disk_format, + self._image.container_format, + self._image.size, + self._image.checksum, + self._image.status, + image.VisibilityColumn(self._image.is_public), + self._image.is_protected, + self._image.owner_id, + format_columns.DictColumn( + {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'} + ), + ), + ) self.assertCountEqual(datalist, tuple(data)) @mock.patch('osc_lib.api.utils.simple_filter') def test_image_list_property_option(self, sf_mock): sf_mock.side_effect = [ - [self.image_info], [], + [self.image_info], + [], ] arglist = [ - '--property', 'a=1', + '--property', + 'a=1', ] verifylist = [ ('property', {'a': '1'}), @@ -402,7 +400,7 @@ def test_image_list_property_option(self, sf_mock): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with() + self.image_client.images.assert_called_with() sf_mock.assert_called_with( [self._image], attr='a', @@ -416,7 +414,8 @@ def test_image_list_property_option(self, sf_mock): @mock.patch('osc_lib.utils.sort_items') def test_image_list_sort_option(self, si_mock): si_mock.side_effect = [ - [self._image], [], + [self._image], + [], ] arglist = ['--sort', 'name:asc'] @@ -427,26 +426,22 @@ def test_image_list_sort_option(self, si_mock): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with() - si_mock.assert_called_with( - [self._image], - 'name:asc' - ) + self.image_client.images.assert_called_with() + si_mock.assert_called_with([self._image], 'name:asc') self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, tuple(data)) -class TestImageSet(TestImage): - +class TestImageSet(image_fakes.TestImagev1): _image = image_fakes.create_one_image() def setUp(self): - super(TestImageSet, self).setUp() + super().setUp() # This is the return value for utils.find_resource() - self.client.find_image = mock.Mock(return_value=self._image) - self.client.update_image = mock.Mock(return_value=self._image) + self.image_client.find_image.return_value = self._image + self.image_client.update_image.return_value = self._image # Get the command object to test self.cmd = image.SetImage(self.app, None) @@ -462,18 +457,25 @@ def test_image_set_no_options(self): result = self.cmd.take_action(parsed_args) - self.client.update_image.assert_called_with(self._image.id, **{}) + self.image_client.update_image.assert_called_with(self._image.id, **{}) self.assertIsNone(result) def test_image_set_options(self): arglist = [ - '--name', 'new-name', - '--min-disk', '2', - '--min-ram', '4', - '--container-format', 'ovf', - '--disk-format', 'vmdk', - '--size', '35165824', - '--project', 'new-owner', + '--name', + 'new-name', + '--min-disk', + '2', + '--min-ram', + '4', + '--container-format', + 'ovf', + '--disk-format', + 'vmdk', + '--size', + '35165824', + '--project', + 'new-owner', self._image.name, ] verifylist = [ @@ -497,12 +499,11 @@ def test_image_set_options(self): 'min_ram': 4, 'container_format': 'ovf', 'disk_format': 'vmdk', - 'size': 35165824 + 'size': 35165824, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) @@ -528,9 +529,8 @@ def test_image_set_bools1(self): 'is_public': False, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) @@ -556,16 +556,17 @@ def test_image_set_bools2(self): 'is_public': True, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_properties(self): arglist = [ - '--property', 'Alpha=1', - '--property', 'Beta=2', + '--property', + 'Alpha=1', + '--property', + 'Beta=2', self._image.name, ] verifylist = [ @@ -584,15 +585,14 @@ def test_image_set_properties(self): }, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_update_volume(self): # Set up VolumeManager Mock - volumes_mock = self.app.client_manager.volume.volumes + volumes_mock = self.volume_client.volumes volumes_mock.reset_mock() volumes_mock.get.return_value = fakes.FakeResource( None, @@ -614,8 +614,10 @@ def test_image_update_volume(self): volumes_mock.upload_to_image.return_value = (201, full_response) arglist = [ - '--volume', 'volly', - '--name', 'updated_image', + '--volume', + 'volly', + '--name', + 'updated_image', self._image.name, ] verifylist = [ @@ -642,7 +644,7 @@ def test_image_update_volume(self): '', ) # ImageManager.update(image_id, remove_props=, **) - self.client.update_image.assert_called_with( + self.image_client.update_image.assert_called_with( self._image.id, name='updated_image', volume='volly', @@ -651,8 +653,10 @@ def test_image_update_volume(self): def test_image_set_numeric_options_to_zero(self): arglist = [ - '--min-disk', '0', - '--min-ram', '0', + '--min-disk', + '0', + '--min-ram', + '0', self._image.name, ] verifylist = [ @@ -669,15 +673,13 @@ def test_image_set_numeric_options_to_zero(self): 'min_ram': 0, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) -class TestImageShow(TestImage): - +class TestImageShow(image_fakes.TestImagev1): _image = image_fakes.create_one_image(attrs={'size': 2000}) columns = ( 'container_format', @@ -707,9 +709,9 @@ class TestImageShow(TestImage): ) def setUp(self): - super(TestImageShow, self).setUp() + super().setUp() - self.client.find_image = mock.Mock(return_value=self._image) + self.image_client.find_image.return_value = self._image # Get the command object to test self.cmd = image.ShowImage(self.app, None) @@ -727,8 +729,8 @@ def test_image_show(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.find_image.assert_called_with( - self._image.id, + self.image_client.find_image.assert_called_with( + self._image.id, ignore_missing=False ) self.assertEqual(self.columns, columns) @@ -749,9 +751,56 @@ def test_image_show_human_readable(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.find_image.assert_called_with( - self._image.id, + self.image_client.find_image.assert_called_with( + self._image.id, ignore_missing=False ) size_index = columns.index('size') self.assertEqual(data[size_index].human_readable(), '2K') + + +class TestImageSave(image_fakes.TestImagev1): + image = image_fakes.create_one_image({}) + + def setUp(self): + super().setUp() + + self.image_client.find_image.return_value = self.image + self.image_client.download_image.return_value = self.image + + # Get the command object to test + self.cmd = image.SaveImage(self.app, None) + + def test_save_data(self): + arglist = ['--file', '/path/to/file', self.image.id] + + verifylist = [('file', '/path/to/file'), ('image', self.image.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + + self.image_client.download_image.assert_called_once_with( + self.image.id, output='/path/to/file', stream=True, chunk_size=1024 + ) + + def test_save_data_with_chunk_size(self): + arglist = [ + '--file', + '/path/to/file', + '--chunk-size', + '2048', + self.image.id, + ] + + verifylist = [ + ('file', '/path/to/file'), + ('chunk_size', 2048), + ('image', self.image.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + + self.image_client.download_image.assert_called_once_with( + self.image.id, output='/path/to/file', stream=True, chunk_size=2048 + ) diff --git a/openstackclient/tests/unit/image/v2/fakes.py b/openstackclient/tests/unit/image/v2/fakes.py index 8ddd9a0992..27f4777bc8 100644 --- a/openstackclient/tests/unit/image/v2/fakes.py +++ b/openstackclient/tests/unit/image/v2/fakes.py @@ -16,65 +16,34 @@ from unittest import mock import uuid +from openstack.image.v2 import _proxy +from openstack.image.v2 import cache from openstack.image.v2 import image from openstack.image.v2 import member from openstack.image.v2 import metadef_namespace +from openstack.image.v2 import metadef_object +from openstack.image.v2 import metadef_property +from openstack.image.v2 import metadef_resource_type from openstack.image.v2 import service_info as _service_info from openstack.image.v2 import task -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit import utils -class FakeImagev2Client: - - def __init__(self, **kwargs): - self.images = mock.Mock() - self.create_image = mock.Mock() - self.delete_image = mock.Mock() - self.update_image = mock.Mock() - self.find_image = mock.Mock() - self.get_image = mock.Mock() - self.download_image = mock.Mock() - self.reactivate_image = mock.Mock() - self.deactivate_image = mock.Mock() - self.stage_image = mock.Mock() - self.import_image = mock.Mock() - - self.members = mock.Mock() - self.add_member = mock.Mock() - self.remove_member = mock.Mock() - self.update_member = mock.Mock() - - self.remove_tag = mock.Mock() - self.metadef_namespaces = mock.Mock() - - self.tasks = mock.Mock() - self.tasks.resource_class = fakes.FakeResource(None, {}) - self.get_task = mock.Mock() - - self.get_import_info = mock.Mock() - - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - self.version = 2.0 - - -class TestImagev2(utils.TestCommand): - +class FakeClientMixin: def setUp(self): super().setUp() - self.app.client_manager.image = FakeImagev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) + self.app.client_manager.image = mock.Mock(spec=_proxy.Proxy) + self.image_client = self.app.client_manager.image - self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) + +class TestImagev2( + identity_fakes.FakeClientMixin, + FakeClientMixin, + utils.TestCommand, +): ... def create_one_image(attrs=None): @@ -162,7 +131,7 @@ def create_one_import_info(attrs=None): 'web-download', 'glance-download', 'copy-image', - ] + ], } } import_info.update(attrs) @@ -170,6 +139,38 @@ def create_one_import_info(attrs=None): return _service_info.Import(**import_info) +def create_one_stores_info(attrs=None): + """Create a fake stores info. + + :param attrs: A dictionary with all attributes of stores + :type attrs: dict + :return: A fake Store object list. + :rtype: `openstack.image.v2.service_info.Store` + """ + attrs = attrs or {} + + stores_info = { + "stores": [ + { + "id": "reliable", + "description": "More expensive store with data redundancy", + }, + { + "id": "fast", + "description": "Provides quick access to your image data", + "default": True, + }, + { + "id": "cheap", + "description": "Less expensive store for seldom-used images", + }, + ] + } + stores_info.update(attrs) + + return _service_info.Store(**stores_info) + + def create_one_task(attrs=None): """Create a fake task. @@ -188,10 +189,10 @@ def create_one_task(attrs=None): 'input': { 'image_properties': { 'container_format': 'ovf', - 'disk_format': 'vhd' + 'disk_format': 'vhd', }, 'import_from': 'https://apps.openstack.org/excellent-image', - 'import_from_format': 'qcow2' + 'import_from_format': 'qcow2', }, 'message': '', 'owner': str(uuid.uuid4()), @@ -236,34 +237,26 @@ def create_tasks(attrs=None, count=2): return tasks -class FakeMetadefNamespaceClient: - - def __init__(self, **kwargs): - self.create_metadef_namespace = mock.Mock() - self.delete_metadef_namespace = mock.Mock() - self.metadef_namespaces = mock.Mock() - self.get_metadef_namespace = mock.Mock() - self.update_metadef_namespace = mock.Mock() - - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - self.version = 2.0 - - -class TestMetadefNamespaces(utils.TestCommand): - - def setUp(self): - super().setUp() - - self.app.client_manager.image = FakeMetadefNamespaceClient( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) +def create_cache(attrs=None): + attrs = attrs or {} + cache_info = { + 'cached_images': [ + { + 'hits': 0, + 'image_id': '1a56983c-f71f-490b-a7ac-6b321a18935a', + 'last_accessed': 1671699579.444378, + 'last_modified': 1671699579.444378, + 'size': 0, + }, + ], + 'queued_images': [ + '3a4560a1-e585-443e-9b39-553b46ec92d1', + '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + ], + } + cache_info.update(attrs) - self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) + return cache.Cache(**cache_info) def create_one_metadef_namespace(attrs=None): @@ -289,3 +282,139 @@ def create_one_metadef_namespace(attrs=None): # Overwrite default attributes if there are some attributes set metadef_namespace_list.update(attrs) return metadef_namespace.MetadefNamespace(**metadef_namespace_list) + + +def create_one_metadef_property(attrs=None): + attrs = attrs or {} + + metadef_property_list = { + 'name': 'cpu_cores', + 'title': 'vCPU Cores', + 'type': 'integer', + } + + # Overwrite default attributes if there are some attributes set + metadef_property_list.update(attrs) + return metadef_property.MetadefProperty(**metadef_property_list) + + +def create_one_resource_type(attrs=None): + """Create a fake MetadefResourceType member. + + :param attrs: A dictionary with all attributes of + metadef_resource_type member + :type attrs: dict + :return: a fake MetadefResourceType object + :rtype: A `metadef_resource_type.MetadefResourceType` + """ + attrs = attrs or {} + + metadef_resource_type_info = { + 'name': 'OS::Compute::Quota', + 'properties_target': 'image', + } + + metadef_resource_type_info.update(attrs) + return metadef_resource_type.MetadefResourceType( + **metadef_resource_type_info + ) + + +def create_resource_types(attrs=None, count=2): + """Create multiple fake resource types. + + :param attrs: A dictionary with all attributes of + metadef_resource_type member + :type attrs: dict + :return: A list of fake MetadefResourceType objects + :rtype: list + """ + metadef_resource_types = [] + for n in range(0, count): + metadef_resource_types.append(create_one_resource_type(attrs)) + + return metadef_resource_types + + +def create_one_metadef_object(attrs=None): + """Create a fake MetadefNamespace member. + + :param attrs: A dictionary with all attributes of metadef_namespace member + :type attrs: dict + :return: a list of MetadefNamespace objects + :rtype: list of `metadef_namespace.MetadefNamespace` + """ + attrs = attrs or {} + + metadef_objects_list = { + 'created_at': '2014-09-19T18:20:56Z', + 'description': 'The CPU limits with control parameters.', + 'name': 'CPU Limits', + 'properties': { + 'quota:cpu_period': { + 'description': 'The enforcement interval', + 'maximum': 1000000, + 'minimum': 1000, + 'title': 'Quota: CPU Period', + 'type': 'integer', + }, + 'quota:cpu_quota': { + 'description': 'The maximum allowed bandwidth', + 'title': 'Quota: CPU Quota', + 'type': 'integer', + }, + 'quota:cpu_shares': { + 'description': 'The proportional weighted', + 'title': 'Quota: CPU Shares', + 'type': 'integer', + }, + }, + 'required': [], + 'schema': '/v2/schemas/metadefs/object', + 'updated_at': '2014-09-19T18:20:56Z', + } + + # Overwrite default attributes if there are some attributes set + metadef_objects_list.update(attrs) + return metadef_object.MetadefObject(**metadef_objects_list) + + +def create_one_resource_type_association(attrs=None): + """Create a fake MetadefResourceTypeAssociation. + + :param attrs: A dictionary with all attributes of + metadef_resource_type_association member + :type attrs: dict + :return: A fake MetadefResourceTypeAssociation object + :rtype: A `metadef_resource_type_association. + MetadefResourceTypeAssociation` + """ + attrs = attrs or {} + + metadef_resource_type_association_info = { + 'namespace_name': 'OS::Compute::Quota', + 'name': 'OS::Nova::Flavor', + } + + metadef_resource_type_association_info.update(attrs) + return metadef_resource_type.MetadefResourceTypeAssociation( + **metadef_resource_type_association_info + ) + + +def create_resource_type_associations(attrs=None, count=2): + """Create mutiple fake resource type associations/ + + :param attrs: A dictionary with all attributes of + metadef_resource_type_association member + :type attrs: dict + :return: A list of fake MetadefResourceTypeAssociation objects + :rtype: list + """ + resource_type_associations = [] + for n in range(0, count): + resource_type_associations.append( + create_one_resource_type_association(attrs) + ) + + return resource_type_associations diff --git a/openstackclient/tests/unit/image/v2/test_cache.py b/openstackclient/tests/unit/image/v2/test_cache.py new file mode 100644 index 0000000000..3624d7867b --- /dev/null +++ b/openstackclient/tests/unit/image/v2/test_cache.py @@ -0,0 +1,214 @@ +# Copyright 2023 Red Hat. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest.mock import call + +from openstack import exceptions as sdk_exceptions +from osc_lib import exceptions + +from openstackclient.image.v2 import cache +from openstackclient.tests.unit.image.v2 import fakes + + +class TestCacheList(fakes.TestImagev2): + _cache = fakes.create_cache() + columns = [ + "ID", + "State", + "Last Accessed (UTC)", + "Last Modified (UTC)", + "Size", + "Hits", + ] + + cache_list = cache._format_image_cache(dict(fakes.create_cache())) + datalist = ( + ( + image['image_id'], + image['state'], + image['last_accessed'], + image['last_modified'], + image['size'], + image['hits'], + ) + for image in cache_list + ) + + def setUp(self): + super().setUp() + + # Get the command object to test + self.image_client.get_image_cache.return_value = self._cache + self.cmd = cache.ListCachedImage(self.app, None) + + def test_image_cache_list(self): + arglist = [] + parsed_args = self.check_parser(self.cmd, arglist, []) + columns, data = self.cmd.take_action(parsed_args) + + self.image_client.get_image_cache.assert_called() + self.assertEqual(self.columns, columns) + self.assertEqual(tuple(self.datalist), tuple(data)) + + +class TestQueueCache(fakes.TestImagev2): + def setUp(self): + super().setUp() + + self.image_client.queue_image.return_value = None + self.cmd = cache.QueueCachedImage(self.app, None) + + def test_cache_queue(self): + images = fakes.create_images(count=1) + arglist = [ + images[0].id, + ] + + verifylist = [ + ('images', [images[0].id]), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.image_client.find_image.side_effect = images + + self.cmd.take_action(parsed_args) + + self.image_client.queue_image.assert_called_once_with(images[0].id) + + def test_cache_queue_multiple_images(self): + images = fakes.create_images(count=3) + arglist = [i.id for i in images] + + verifylist = [ + ('images', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.image_client.find_image.side_effect = images + + self.cmd.take_action(parsed_args) + calls = [call(i.id) for i in images] + self.image_client.queue_image.assert_has_calls(calls) + + +class TestCacheDelete(fakes.TestImagev2): + def setUp(self): + super().setUp() + + self.image_client.cache_delete_image.return_value = None + self.cmd = cache.DeleteCachedImage(self.app, None) + + def test_cache_delete(self): + images = fakes.create_images(count=1) + arglist = [ + images[0].id, + ] + + verifylist = [ + ('images', [images[0].id]), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.image_client.find_image.side_effect = images + + self.cmd.take_action(parsed_args) + + self.image_client.find_image.assert_called_once_with( + images[0].id, ignore_missing=False + ) + self.image_client.cache_delete_image.assert_called_once_with( + images[0].id + ) + + def test_cache_delete_multiple_images(self): + images = fakes.create_images(count=3) + arglist = [i.id for i in images] + + verifylist = [ + ('images', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.image_client.find_image.side_effect = images + + self.cmd.take_action(parsed_args) + calls = [call(i.id) for i in images] + self.image_client.cache_delete_image.assert_has_calls(calls) + + def test_cache_delete_multiple_images_exception(self): + images = fakes.create_images(count=2) + arglist = [ + images[0].id, + images[1].id, + 'x-y-x', + ] + verifylist = [ + ('images', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + ret_find = [images[0], images[1], sdk_exceptions.ResourceNotFound()] + + self.image_client.find_image.side_effect = ret_find + + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + calls = [call(i.id) for i in images] + self.image_client.cache_delete_image.assert_has_calls(calls) + + +class TestCacheClear(fakes.TestImagev2): + def setUp(self): + super().setUp() + + self.image_client.clear_cache.return_value = None + self.cmd = cache.ClearCachedImage(self.app, None) + + def test_cache_clear_no_option(self): + arglist = [] + + verifylist = [('target', 'both')] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.assertIsNone( + self.image_client.clear_cache.assert_called_with('both') + ) + + def test_cache_clear_queue_option(self): + arglist = ['--queue'] + + verifylist = [('target', 'queue')] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.image_client.clear_cache.assert_called_once_with('queue') + + def test_cache_clear_cache_option(self): + arglist = ['--cache'] + + verifylist = [('target', 'cache')] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.image_client.clear_cache.assert_called_once_with('cache') diff --git a/openstackclient/tests/unit/image/v2/test_image.py b/openstackclient/tests/unit/image/v2/test_image.py index 019b4d9d37..e6de9f2ebd 100644 --- a/openstackclient/tests/unit/image/v2/test_image.py +++ b/openstackclient/tests/unit/image/v2/test_image.py @@ -17,8 +17,9 @@ import tempfile from unittest import mock -from cinderclient import api_versions +from openstack.block_storage.v2 import volume as _volume from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions @@ -29,34 +30,17 @@ class TestImage(image_fakes.TestImagev2, volume_fakes.TestVolume): - def setUp(self): super().setUp() - # Get shortcuts to mocked image client - self.client = self.app.client_manager.image - # Get shortcut to the Mocks in identity client - self.project_mock = self.app.client_manager.identity.projects + self.project_mock = self.identity_client.projects self.project_mock.reset_mock() - self.domain_mock = self.app.client_manager.identity.domains + self.domain_mock = self.identity_client.domains self.domain_mock.reset_mock() - self.volumes_mock = self.app.client_manager.volume.volumes - fake_body = { - 'os-volume_upload_image': - {'volume_type': {'name': 'fake_type'}}} - self.volumes_mock.upload_to_image.return_value = ( - 200, fake_body) - self.volumes_mock.reset_mock() - - def setup_images_mock(self, count): - images = image_fakes.create_images(count=count) - - return images class TestImageCreate(TestImage): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() @@ -64,25 +48,24 @@ def setUp(self): super().setUp() self.new_image = image_fakes.create_one_image() - self.client.create_image.return_value = self.new_image + self.image_client.create_image.return_value = self.new_image + self.image_client.update_image.return_value = self.new_image + self.image_client.get_image.return_value = self.new_image self.project_mock.get.return_value = self.project self.domain_mock.get.return_value = self.domain - self.client.update_image.return_value = self.new_image - (self.expected_columns, self.expected_data) = zip( - *sorted(_image._format_image(self.new_image).items())) + *sorted(_image._format_image(self.new_image).items()) + ) # Get the command object to test self.cmd = _image.CreateImage(self.app, None) @mock.patch("sys.stdin", side_effect=[None]) def test_image_reserve_no_options(self, raw_input): - arglist = [ - self.new_image.name - ] + arglist = [self.new_image.name] verifylist = [ ('container_format', _image.DEFAULT_CONTAINER_FORMAT), ('disk_format', _image.DEFAULT_DISK_FORMAT), @@ -96,12 +79,13 @@ def test_image_reserve_no_options(self, raw_input): columns, data = self.cmd.take_action(parsed_args) # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, allow_duplicates=True, container_format=_image.DEFAULT_CONTAINER_FORMAT, disk_format=_image.DEFAULT_DISK_FORMAT, ) + self.image_client.get_image.assert_called_once_with(self.new_image) self.assertEqual(self.expected_columns, columns) self.assertCountEqual(self.expected_data, data) @@ -109,17 +93,24 @@ def test_image_reserve_no_options(self, raw_input): @mock.patch('sys.stdin', side_effect=[None]) def test_image_reserve_options(self, raw_input): arglist = [ - '--container-format', 'ovf', - '--disk-format', 'ami', - '--min-disk', '10', - '--min-ram', '4', + '--container-format', + 'ovf', + '--disk-format', + 'ami', + '--min-disk', + '10', + '--min-ram', + '4', '--protected' if self.new_image.is_protected else '--unprotected', ( '--private' - if self.new_image.visibility == 'private' else '--public' + if self.new_image.visibility == 'private' + else '--public' ), - '--project', self.new_image.owner_id, - '--project-domain', self.domain.id, + '--project', + self.new_image.owner_id, + '--project-domain', + self.domain.id, self.new_image.name, ] verifylist = [ @@ -141,7 +132,7 @@ def test_image_reserve_options(self, raw_input): columns, data = self.cmd.take_action(parsed_args) # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, allow_duplicates=True, container_format='ovf', @@ -152,6 +143,7 @@ def test_image_reserve_options(self, raw_input): is_protected=self.new_image.is_protected, visibility=self.new_image.visibility, ) + self.image_client.get_image.assert_called_once_with(self.new_image) self.assertEqual(self.expected_columns, columns) self.assertCountEqual(self.expected_data, data) @@ -161,13 +153,18 @@ def test_image_create_with_unexist_project(self): self.project_mock.find.side_effect = exceptions.NotFound(None) arglist = [ - '--container-format', 'ovf', - '--disk-format', 'ami', - '--min-disk', '10', - '--min-ram', '4', + '--container-format', + 'ovf', + '--disk-format', + 'ami', + '--min-disk', + '10', + '--min-ram', + '4', '--protected', '--private', - '--project', 'unexist_owner', + '--project', + 'unexist_owner', 'graven', ] verifylist = [ @@ -194,15 +191,26 @@ def test_image_create_file(self): imagefile.close() arglist = [ - '--file', imagefile.name, - ('--unprotected' - if not self.new_image.is_protected else '--protected'), - ('--public' - if self.new_image.visibility == 'public' else '--private'), - '--property', 'Alpha=1', - '--property', 'Beta=2', - '--tag', self.new_image.tags[0], - '--tag', self.new_image.tags[1], + '--file', + imagefile.name, + ( + '--unprotected' + if not self.new_image.is_protected + else '--protected' + ), + ( + '--public' + if self.new_image.visibility == 'public' + else '--private' + ), + '--property', + 'Alpha=1', + '--property', + 'Beta=2', + '--tag', + self.new_image.tags[0], + '--tag', + self.new_image.tags[1], self.new_image.name, ] verifylist = [ @@ -221,7 +229,7 @@ def test_image_create_file(self): columns, data = self.cmd.take_action(parsed_args) # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, allow_duplicates=True, container_format=_image.DEFAULT_CONTAINER_FORMAT, @@ -233,17 +241,15 @@ def test_image_create_file(self): tags=self.new_image.tags, filename=imagefile.name, ) + self.image_client.get_image.assert_called_once_with(self.new_image) - self.assertEqual( - self.expected_columns, - columns) - self.assertCountEqual( - self.expected_data, - data) + self.assertEqual(self.expected_columns, columns) + self.assertCountEqual(self.expected_data, data) @mock.patch('openstackclient.image.v2.image.get_data_from_stdin') def test_image_create__progress_ignore_with_stdin( - self, mock_get_data_from_stdin, + self, + mock_get_data_from_stdin, ): fake_stdin = io.BytesIO(b'some fake data') mock_get_data_from_stdin.return_value = fake_stdin @@ -260,7 +266,7 @@ def test_image_create__progress_ignore_with_stdin( columns, data = self.cmd.take_action(parsed_args) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, allow_duplicates=True, container_format=_image.DEFAULT_CONTAINER_FORMAT, @@ -268,14 +274,15 @@ def test_image_create__progress_ignore_with_stdin( data=fake_stdin, validate_checksum=False, ) + self.image_client.get_image.assert_called_once_with(self.new_image) self.assertEqual(self.expected_columns, columns) self.assertCountEqual(self.expected_data, data) def test_image_create_dead_options(self): - arglist = [ - '--store', 'somewhere', + '--store', + 'somewhere', self.new_image.name, ] verifylist = [ @@ -284,12 +291,11 @@ def test_image_create_dead_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) @mock.patch('sys.stdin', side_effect=[None]) def test_image_create_import(self, raw_input): - arglist = [ '--import', self.new_image.name, @@ -301,29 +307,28 @@ def test_image_create_import(self, raw_input): columns, data = self.cmd.take_action(parsed_args) - # ImageManager.create(name=, **) - self.client.create_image.assert_called_with( + self.image_client.create_image.assert_called_with( name=self.new_image.name, allow_duplicates=True, container_format=_image.DEFAULT_CONTAINER_FORMAT, disk_format=_image.DEFAULT_DISK_FORMAT, - use_import=True + use_import=True, ) + self.image_client.get_image.assert_called_once_with(self.new_image) - @mock.patch('osc_lib.utils.find_resource') @mock.patch('openstackclient.image.v2.image.get_data_from_stdin') - def test_image_create_from_volume(self, mock_get_data_f, mock_get_vol): - - fake_vol_id = 'fake-volume-id' + def test_image_create_from_volume(self, mock_get_data_f): mock_get_data_f.return_value = None - class FakeVolume: - id = fake_vol_id - - mock_get_vol.return_value = FakeVolume() + volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = volume + self.volume_sdk_client.upload_volume_to_image.return_value = { + 'volume_type': {'name': 'fake_type'} + } arglist = [ - '--volume', fake_vol_id, + '--volume', + volume.id, self.new_image.name, ] verifylist = [ @@ -333,60 +338,59 @@ class FakeVolume: columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.upload_to_image.assert_called_with( - fake_vol_id, - False, + self.volume_sdk_client.upload_volume_to_image.assert_called_once_with( + volume.id, self.new_image.name, - 'bare', - 'raw' + force=False, + disk_format='raw', + container_format='bare', + visibility=None, + protected=None, ) - @mock.patch('osc_lib.utils.find_resource') @mock.patch('openstackclient.image.v2.image.get_data_from_stdin') - def test_image_create_from_volume_fail(self, mock_get_data_f, - mock_get_vol): - - fake_vol_id = 'fake-volume-id' + def test_image_create_from_volume_pre_v31(self, mock_get_data_f): mock_get_data_f.return_value = None - class FakeVolume: - id = fake_vol_id - - mock_get_vol.return_value = FakeVolume() + volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = volume + self.volume_sdk_client.upload_volume_to_image.return_value = { + 'volume_type': {'name': 'fake_type'} + } arglist = [ - '--volume', fake_vol_id, + '--volume', + volume.id, self.new_image.name, - '--public' + '--public', ] verifylist = [ ('name', self.new_image.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn('--os-volume-api-version 3.1 or greater ', str(exc)) - @mock.patch('osc_lib.utils.find_resource') @mock.patch('openstackclient.image.v2.image.get_data_from_stdin') - def test_image_create_from_volume_v31(self, mock_get_data_f, - mock_get_vol): + def test_image_create_from_volume_v31(self, mock_get_data_f): + self.set_volume_api_version('3.1') - self.app.client_manager.volume.api_version = ( - api_versions.APIVersion('3.1')) - - fake_vol_id = 'fake-volume-id' mock_get_data_f.return_value = None - class FakeVolume: - id = fake_vol_id - - mock_get_vol.return_value = FakeVolume() + volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = volume + self.volume_sdk_client.upload_volume_to_image.return_value = { + 'volume_type': {'name': 'fake_type'} + } arglist = [ - '--volume', fake_vol_id, + '--volume', + volume.id, self.new_image.name, - '--public' + '--public', ] verifylist = [ ('name', self.new_image.name), @@ -395,25 +399,23 @@ class FakeVolume: columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.upload_to_image.assert_called_with( - fake_vol_id, - False, + self.volume_sdk_client.upload_volume_to_image.assert_called_once_with( + volume.id, self.new_image.name, - 'bare', - 'raw', + force=False, + disk_format='raw', + container_format='bare', visibility='public', - protected=False + protected=False, ) class TestAddProjectToImage(TestImage): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() _image = image_fakes.create_one_image() new_member = image_fakes.create_one_image_member( - attrs={'image_id': _image.id, - 'member_id': project.id} + attrs={'image_id': _image.id, 'member_id': project.id} ) columns = ( @@ -422,7 +424,7 @@ class TestAddProjectToImage(TestImage): 'member_id', 'schema', 'status', - 'updated_at' + 'updated_at', ) datalist = ( @@ -431,17 +433,17 @@ class TestAddProjectToImage(TestImage): new_member.member_id, new_member.schema, new_member.status, - new_member.updated_at + new_member.updated_at, ) def setUp(self): super().setUp() # This is the return value for utils.find_resource() - self.client.find_image.return_value = self._image + self.image_client.find_image.return_value = self._image # Update the image_id in the MEMBER dict - self.client.add_member.return_value = self.new_member + self.image_client.add_member.return_value = self.new_member self.project_mock.get.return_value = self.project self.domain_mock.get.return_value = self.domain # Get the command object to test @@ -462,9 +464,8 @@ def test_add_project_to_image_no_option(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.add_member.assert_called_with( - image=self._image.id, - member_id=self.project.id + self.image_client.add_member.assert_called_with( + image=self._image.id, member_id=self.project.id ) self.assertEqual(self.columns, columns) @@ -474,7 +475,8 @@ def test_add_project_to_image_with_option(self): arglist = [ self._image.id, self.project.id, - '--project-domain', self.domain.id, + '--project-domain', + self.domain.id, ] verifylist = [ ('image', self._image.id), @@ -487,9 +489,8 @@ def test_add_project_to_image_with_option(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.add_member.assert_called_with( - image=self._image.id, - member_id=self.project.id + self.image_client.add_member.assert_called_with( + image=self._image.id, member_id=self.project.id ) self.assertEqual(self.columns, columns) @@ -497,17 +498,16 @@ def test_add_project_to_image_with_option(self): class TestImageDelete(TestImage): - def setUp(self): super().setUp() - self.client.delete_image.return_value = None + self.image_client.delete_image.return_value = None # Get the command object to test self.cmd = _image.DeleteImage(self.app, None) def test_image_delete_no_options(self): - images = self.setup_images_mock(count=1) + images = image_fakes.create_images(count=1) arglist = [ images[0].id, @@ -517,15 +517,37 @@ def test_image_delete_no_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.client.find_image.side_effect = images + self.image_client.find_image.side_effect = images + + result = self.cmd.take_action(parsed_args) + + self.image_client.delete_image.assert_called_with( + images[0].id, store=parsed_args.store, ignore_missing=False + ) + self.assertIsNone(result) + + def test_image_delete_from_store(self): + images = image_fakes.create_images(count=1) + + arglist = [ + images[0].id, + '--store', + 'store1', + ] + verifylist = [('images', [images[0].id]), ('store', 'store1')] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.image_client.find_image.side_effect = images result = self.cmd.take_action(parsed_args) - self.client.delete_image.assert_called_with(images[0].id) + self.image_client.delete_image.assert_called_with( + images[0].id, store=parsed_args.store, ignore_missing=False + ) self.assertIsNone(result) def test_image_delete_multi_images(self): - images = self.setup_images_mock(count=3) + images = image_fakes.create_images(count=3) arglist = [i.id for i in images] verifylist = [ @@ -533,25 +555,47 @@ def test_image_delete_multi_images(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.client.find_image.side_effect = images + self.image_client.find_image.side_effect = images result = self.cmd.take_action(parsed_args) - calls = [mock.call(i.id) for i in images] - self.client.delete_image.assert_has_calls(calls) + calls = [ + mock.call(i.id, store=parsed_args.store, ignore_missing=False) + for i in images + ] + self.image_client.delete_image.assert_has_calls(calls) self.assertIsNone(result) - def test_image_delete_multi_images_exception(self): + def test_image_delete_from_store_without_multi_backend(self): + images = image_fakes.create_images(count=1) + + arglist = [images[0].id, '--store', 'store1'] + verifylist = [('images', [images[0].id]), ('store', 'store1')] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.image_client.find_image.side_effect = images + self.image_client.delete_image.side_effect = ( + sdk_exceptions.ResourceNotFound + ) + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + "Multi Backend support not enabled", + str(exc), + ) + + def test_image_delete_multi_images_exception(self): images = image_fakes.create_images(count=2) arglist = [ images[0].id, images[1].id, 'x-y-x', ] - verifylist = [ - ('images', arglist) - ] + verifylist = [('images', arglist)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # Fake exception in utils.find_resource() @@ -559,22 +603,21 @@ def test_image_delete_multi_images_exception(self): # It calls get() several times, but find() only one time. So we # choose to fake get() always raise exception, then pass through. # And fake find() to find the real network or not. - ret_find = [ - images[0], - images[1], - sdk_exceptions.ResourceNotFound() - ] + ret_find = [images[0], images[1], sdk_exceptions.ResourceNotFound()] - self.client.find_image.side_effect = ret_find + self.image_client.find_image.side_effect = ret_find - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) - calls = [mock.call(i.id) for i in images] - self.client.delete_image.assert_has_calls(calls) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + calls = [ + mock.call(i.id, store=parsed_args.store, ignore_missing=False) + for i in images + ] + self.image_client.delete_image.assert_has_calls(calls) class TestImageList(TestImage): - _image = image_fakes.create_one_image() columns = ( @@ -584,15 +627,17 @@ class TestImageList(TestImage): ) datalist = ( - _image.id, - _image.name, - None, - ), + ( + _image.id, + _image.name, + None, + ), + ) def setUp(self): super().setUp() - self.client.images.side_effect = [[self._image], []] + self.image_client.images.side_effect = [[self._image], []] # Get the command object to test self.cmd = _image.ListImage(self.app, None) @@ -609,7 +654,7 @@ def test_image_list_no_options(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( # marker=self._image.id, ) @@ -630,7 +675,7 @@ def test_image_list_public_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( visibility='public', ) @@ -651,7 +696,7 @@ def test_image_list_private_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( visibility='private', ) @@ -672,7 +717,7 @@ def test_image_list_community_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( visibility='community', ) @@ -693,7 +738,7 @@ def test_image_list_shared_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( visibility='shared', ) @@ -714,7 +759,7 @@ def test_image_list_all_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( visibility='all', ) @@ -722,14 +767,11 @@ def test_image_list_all_option(self): self.assertCountEqual(self.datalist, tuple(data)) def test_image_list_shared_member_status_option(self): - arglist = [ - '--shared', - '--member-status', 'all' - ] + arglist = ['--shared', '--member-status', 'all'] verifylist = [ ('visibility', 'shared'), ('long', False), - ('member_status', 'all') + ('member_status', 'all'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -737,7 +779,7 @@ def test_image_list_shared_member_status_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( visibility='shared', member_status='all', ) @@ -746,14 +788,11 @@ def test_image_list_shared_member_status_option(self): self.assertEqual(self.datalist, tuple(data)) def test_image_list_shared_member_status_lower(self): - arglist = [ - '--shared', - '--member-status', 'ALl' - ] + arglist = ['--shared', '--member-status', 'ALl'] verifylist = [ ('visibility', 'shared'), ('long', False), - ('member_status', 'all') + ('member_status', 'all'), ] self.check_parser(self.cmd, arglist, verifylist) @@ -770,8 +809,7 @@ def test_image_list_long_option(self): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( - ) + self.image_client.images.assert_called_with() collist = ( 'ID', @@ -784,23 +822,29 @@ def test_image_list_long_option(self): 'Visibility', 'Protected', 'Project', + 'Hash Algorithm', + 'Hash Value', 'Tags', ) self.assertEqual(collist, columns) - datalist = (( - self._image.id, - self._image.name, - None, - None, - None, - None, - None, - self._image.visibility, - self._image.is_protected, - self._image.owner_id, - format_columns.ListColumn(self._image.tags), - ), ) + datalist = ( + ( + self._image.id, + self._image.name, + self._image.disk_format, + self._image.container_format, + self._image.size, + self._image.checksum, + self._image.status, + self._image.visibility, + self._image.is_protected, + self._image.owner_id, + self._image.hash_algo, + self._image.hash_value, + format_columns.ListColumn(self._image.tags), + ), + ) self.assertCountEqual(datalist, tuple(data)) @mock.patch('osc_lib.api.utils.simple_filter') @@ -808,7 +852,8 @@ def test_image_list_property_option(self, sf_mock): sf_mock.return_value = [copy.deepcopy(self._image)] arglist = [ - '--property', 'a=1', + '--property', + 'a=1', ] verifylist = [ ('property', {'a': '1'}), @@ -819,8 +864,7 @@ def test_image_list_property_option(self, sf_mock): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( - ) + self.image_client.images.assert_called_with() sf_mock.assert_called_with( [self._image], attr='a', @@ -843,8 +887,7 @@ def test_image_list_sort_option(self, si_mock): # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( - ) + self.image_client.images.assert_called_with() si_mock.assert_called_with( [self._image], 'name:asc', @@ -856,7 +899,8 @@ def test_image_list_sort_option(self, si_mock): def test_image_list_limit_option(self): ret_limit = 1 arglist = [ - '--limit', str(ret_limit), + '--limit', + str(ret_limit), ] verifylist = [ ('limit', ret_limit), @@ -864,9 +908,9 @@ def test_image_list_limit_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( limit=ret_limit, - paginated=False + paginated=False, # marker=None ) @@ -874,9 +918,10 @@ def test_image_list_limit_option(self): self.assertEqual(ret_limit, len(tuple(data))) def test_image_list_project_option(self): - self.client.find_image = mock.Mock(return_value=self._image) + self.image_client.find_image.return_value = self._image arglist = [ - '--project', 'nova', + '--project', + 'nova', ] verifylist = [ ('project', 'nova'), @@ -890,10 +935,11 @@ def test_image_list_project_option(self): @mock.patch('osc_lib.utils.find_resource') def test_image_list_marker_option(self, fr_mock): - self.client.find_image = mock.Mock(return_value=self._image) + self.image_client.find_image.return_value = self._image arglist = [ - '--marker', 'graven', + '--marker', + 'graven', ] verifylist = [ ('marker', 'graven'), @@ -901,18 +947,19 @@ def test_image_list_marker_option(self, fr_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( marker=self._image.id, ) - self.client.find_image.assert_called_with( + self.image_client.find_image.assert_called_with( 'graven', ignore_missing=False, ) def test_image_list_name_option(self): arglist = [ - '--name', 'abc', + '--name', + 'abc', ] verifylist = [ ('name', 'abc'), @@ -920,14 +967,15 @@ def test_image_list_name_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( + self.image_client.images.assert_called_with( name='abc', # marker=self._image.id ) def test_image_list_status_option(self): arglist = [ - '--status', 'active', + '--status', + 'active', ] verifylist = [ ('status', 'active'), @@ -935,9 +983,7 @@ def test_image_list_status_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( - status='active' - ) + self.image_client.images.assert_called_with(status='active') def test_image_list_hidden_option(self): arglist = [ @@ -949,74 +995,58 @@ def test_image_list_hidden_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( - is_hidden=True - ) + self.image_client.images.assert_called_with(is_hidden=True) def test_image_list_tag_option(self): - arglist = [ - '--tag', 'abc', - '--tag', 'cba' - ] + arglist = ['--tag', 'abc', '--tag', 'cba'] verifylist = [ ('tag', ['abc', 'cba']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.images.assert_called_with( - tag=['abc', 'cba'] - ) + self.image_client.images.assert_called_with(tag=['abc', 'cba']) class TestListImageProjects(TestImage): - project = identity_fakes.FakeProject.create_one_project() _image = image_fakes.create_one_image() member = image_fakes.create_one_image_member( - attrs={'image_id': _image.id, - 'member_id': project.id} + attrs={'image_id': _image.id, 'member_id': project.id} ) - columns = ( - "Image ID", - "Member ID", - "Status" - ) + columns = ("Image ID", "Member ID", "Status") - datalist = [( - _image.id, - member.member_id, - member.status, - )] + datalist = [ + ( + _image.id, + member.member_id, + member.status, + ) + ] def setUp(self): super().setUp() - self.client.find_image.return_value = self._image - self.client.members.return_value = [self.member] + self.image_client.find_image.return_value = self._image + self.image_client.members.return_value = [self.member] self.cmd = _image.ListImageProjects(self.app, None) def test_image_member_list(self): - arglist = [ - self._image.id - ] - verifylist = [ - ('image', self._image.id) - ] + arglist = [self._image.id] + verifylist = [('image', self._image.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.client.members.assert_called_with(image=self._image.id) + self.image_client.members.assert_called_with(image=self._image.id) self.assertEqual(self.columns, columns) self.assertEqual(self.datalist, list(data)) class TestRemoveProjectImage(TestImage): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() @@ -1025,11 +1055,11 @@ def setUp(self): self._image = image_fakes.create_one_image() # This is the return value for utils.find_resource() - self.client.find_image.return_value = self._image + self.image_client.find_image.return_value = self._image self.project_mock.get.return_value = self.project self.domain_mock.get.return_value = self.domain - self.client.remove_member.return_value = None + self.image_client.remove_member.return_value = None # Get the command object to test self.cmd = _image.RemoveProjectImage(self.app, None) @@ -1046,11 +1076,11 @@ def test_remove_project_image_no_options(self): result = self.cmd.take_action(parsed_args) - self.client.find_image.assert_called_with( - self._image.id, - ignore_missing=False) + self.image_client.find_image.assert_called_with( + self._image.id, ignore_missing=False + ) - self.client.remove_member.assert_called_with( + self.image_client.remove_member.assert_called_with( member=self.project.id, image=self._image.id, ) @@ -1060,7 +1090,8 @@ def test_remove_project_image_with_options(self): arglist = [ self._image.id, self.project.id, - '--project-domain', self.domain.id, + '--project-domain', + self.domain.id, ] verifylist = [ ('image', self._image.id), @@ -1071,15 +1102,74 @@ def test_remove_project_image_with_options(self): result = self.cmd.take_action(parsed_args) - self.client.remove_member.assert_called_with( + self.image_client.remove_member.assert_called_with( member=self.project.id, image=self._image.id, ) self.assertIsNone(result) -class TestImageSet(TestImage): +class TestShowProjectImage(TestImage): + _image = image_fakes.create_one_image() + new_member = image_fakes.create_one_image_member( + attrs={'image_id': _image.id, 'member_id': 'member1'} + ) + columns = ( + 'created_at', + 'image_id', + 'member_id', + 'schema', + 'status', + 'updated_at', + ) + + datalist = ( + new_member.created_at, + _image.id, + new_member.member_id, + new_member.schema, + new_member.status, + new_member.updated_at, + ) + + def setUp(self): + super().setUp() + + # This is the return value for utils.find_resource() + self.image_client.find_image.return_value = self._image + + self.image_client.get_member.return_value = self.new_member + # Get the command object to test + self.cmd = _image.ShowProjectImage(self.app, None) + + def test_show_project_image(self): + arglist = [ + self._image.id, + 'member1', + ] + verifylist = [ + ('image', self._image.id), + ('member', 'member1'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.image_client.find_image.assert_called_with( + self._image.id, ignore_missing=False + ) + + self.image_client.get_member.assert_called_with( + member='member1', + image=self._image.id, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + +class TestImageSet(TestImage): project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() _image = image_fakes.create_one_image({'tags': []}) @@ -1091,7 +1181,7 @@ def setUp(self): self.domain_mock.get.return_value = self.domain - self.client.find_image.return_value = self._image + self.image_client.find_image.return_value = self._image self.app.client_manager.auth_ref = mock.Mock( project_id=self.project.id, @@ -1104,39 +1194,36 @@ def test_image_set_no_options(self): arglist = [ '0f41529e-7c12-4de8-be2d-181abb825b3c', ] - verifylist = [ - ('image', '0f41529e-7c12-4de8-be2d-181abb825b3c') - ] + verifylist = [('image', '0f41529e-7c12-4de8-be2d-181abb825b3c')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.assertIsNone(result) # we'll have called this but not set anything - self.app.client_manager.image.update_image.assert_called_once_with( + self.image_client.update_image.assert_called_once_with( self._image.id, ) def test_image_set_membership_option_accept(self): membership = image_fakes.create_one_image_member( - attrs={'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', - 'member_id': self.project.id} + attrs={ + 'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', + 'member_id': self.project.id, + } ) - self.client.update_member.return_value = membership + self.image_client.update_member.return_value = membership arglist = [ '--accept', self._image.id, ] - verifylist = [ - ('membership', 'accepted'), - ('image', self._image.id) - ] + verifylist = [('membership', 'accepted'), ('image', self._image.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.client.update_member.assert_called_once_with( + self.image_client.update_member.assert_called_once_with( image=self._image.id, member=self.app.client_manager.auth_ref.project_id, status='accepted', @@ -1144,14 +1231,16 @@ def test_image_set_membership_option_accept(self): # Assert that the 'update image" route is also called, in addition to # the 'update membership' route. - self.client.update_image.assert_called_with(self._image.id) + self.image_client.update_image.assert_called_with(self._image.id) def test_image_set_membership_option_reject(self): membership = image_fakes.create_one_image_member( - attrs={'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', - 'member_id': self.project.id} + attrs={ + 'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', + 'member_id': self.project.id, + } ) - self.client.update_member.return_value = membership + self.image_client.update_member.return_value = membership arglist = [ '--reject', @@ -1159,13 +1248,13 @@ def test_image_set_membership_option_reject(self): ] verifylist = [ ('membership', 'rejected'), - ('image', '0f41529e-7c12-4de8-be2d-181abb825b3c') + ('image', '0f41529e-7c12-4de8-be2d-181abb825b3c'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.client.update_member.assert_called_once_with( + self.image_client.update_member.assert_called_once_with( image=self._image.id, member=self.app.client_manager.auth_ref.project_id, status='rejected', @@ -1173,14 +1262,16 @@ def test_image_set_membership_option_reject(self): # Assert that the 'update image" route is also called, in addition to # the 'update membership' route. - self.client.update_image.assert_called_with(self._image.id) + self.image_client.update_image.assert_called_with(self._image.id) def test_image_set_membership_option_pending(self): membership = image_fakes.create_one_image_member( - attrs={'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', - 'member_id': self.project.id} + attrs={ + 'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', + 'member_id': self.project.id, + } ) - self.client.update_member.return_value = membership + self.image_client.update_member.return_value = membership arglist = [ '--pending', @@ -1188,13 +1279,13 @@ def test_image_set_membership_option_pending(self): ] verifylist = [ ('membership', 'pending'), - ('image', '0f41529e-7c12-4de8-be2d-181abb825b3c') + ('image', '0f41529e-7c12-4de8-be2d-181abb825b3c'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.client.update_member.assert_called_once_with( + self.image_client.update_member.assert_called_once_with( image=self._image.id, member=self.app.client_manager.auth_ref.project_id, status='pending', @@ -1202,17 +1293,135 @@ def test_image_set_membership_option_pending(self): # Assert that the 'update image" route is also called, in addition to # the 'update membership' route. - self.client.update_image.assert_called_with(self._image.id) + self.image_client.update_image.assert_called_with(self._image.id) + + def test_image_set_membership_accept_with_project_no_owner_change(self): + """Test that --project with --accept doesn't change image owner.""" + membership = image_fakes.create_one_image_member( + attrs={ + 'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', + 'member_id': self.project.id, + } + ) + self.image_client.update_member.return_value = membership + + arglist = [ + '--project', + self.project.name, + '--accept', + self._image.id, + ] + verifylist = [ + ('project', self.project.name), + ('membership', 'accepted'), + ('image', self._image.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.image_client.update_member.assert_called_once_with( + image=self._image.id, + member=self.project.id, + status='accepted', + ) + + self.image_client.update_image.assert_called() + call_args = self.image_client.update_image.call_args + if call_args: + args, kwargs = call_args + self.assertNotIn('owner_id', kwargs) + + def test_image_set_membership_reject_with_project_no_owner_change(self): + """Test that --project with --reject doesn't change image owner.""" + membership = image_fakes.create_one_image_member( + attrs={ + 'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', + 'member_id': self.project.id, + } + ) + self.image_client.update_member.return_value = membership + + arglist = [ + '--project', + self.project.name, + '--reject', + self._image.id, + ] + verifylist = [ + ('project', self.project.name), + ('membership', 'rejected'), + ('image', self._image.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.image_client.update_member.assert_called_once_with( + image=self._image.id, + member=self.project.id, + status='rejected', + ) + + self.image_client.update_image.assert_called() + call_args = self.image_client.update_image.call_args + if call_args: + args, kwargs = call_args + self.assertNotIn('owner_id', kwargs) + + def test_image_set_membership_pending_with_project_no_owner_change(self): + """Test that --project with --pending doesn't change image owner.""" + membership = image_fakes.create_one_image_member( + attrs={ + 'image_id': '0f41529e-7c12-4de8-be2d-181abb825b3c', + 'member_id': self.project.id, + } + ) + self.image_client.update_member.return_value = membership + + arglist = [ + '--project', + self.project.name, + '--pending', + self._image.id, + ] + verifylist = [ + ('project', self.project.name), + ('membership', 'pending'), + ('image', self._image.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.cmd.take_action(parsed_args) + + self.image_client.update_member.assert_called_once_with( + image=self._image.id, + member=self.project.id, + status='pending', + ) + + self.image_client.update_image.assert_called() + call_args = self.image_client.update_image.call_args + if call_args: + args, kwargs = call_args + self.assertNotIn('owner_id', kwargs) def test_image_set_options(self): arglist = [ - '--name', 'new-name', - '--min-disk', '2', - '--min-ram', '4', - '--container-format', 'ovf', - '--disk-format', 'vmdk', - '--project', self.project.name, - '--project-domain', self.domain.id, + '--name', + 'new-name', + '--min-disk', + '2', + '--min-ram', + '4', + '--container-format', + 'ovf', + '--disk-format', + 'vmdk', + '--project', + self.project.name, + '--project-domain', + self.domain.id, self._image.id, ] verifylist = [ @@ -1238,8 +1447,9 @@ def test_image_set_options(self): 'disk_format': 'vmdk', } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, **kwargs) + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs + ) self.assertIsNone(result) def test_image_set_with_unexist_project(self): @@ -1247,7 +1457,8 @@ def test_image_set_with_unexist_project(self): self.project_mock.find.side_effect = exceptions.NotFound(None) arglist = [ - '--project', 'unexist_owner', + '--project', + 'unexist_owner', '0f41529e-7c12-4de8-be2d-181abb825b3c', ] verifylist = [ @@ -1257,18 +1468,20 @@ def test_image_set_with_unexist_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) - def test_image_set_bools1(self): + def test_image_set_bools_true(self): arglist = [ '--protected', '--private', + '--hidden', 'graven', ] verifylist = [ ('is_protected', True), ('visibility', 'private'), + ('is_hidden', True), ('image', 'graven'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1278,23 +1491,25 @@ def test_image_set_bools1(self): kwargs = { 'is_protected': True, 'visibility': 'private', + 'is_hidden': True, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) - def test_image_set_bools2(self): + def test_image_set_bools_false(self): arglist = [ '--unprotected', '--public', + '--unhidden', 'graven', ] verifylist = [ ('is_protected', False), ('visibility', 'public'), + ('is_hidden', False), ('image', 'graven'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1304,18 +1519,20 @@ def test_image_set_bools2(self): kwargs = { 'is_protected': False, 'visibility': 'public', + 'is_hidden': False, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_properties(self): arglist = [ - '--property', 'Alpha=1', - '--property', 'Beta=2', + '--property', + 'Alpha=1', + '--property', + 'Beta=2', 'graven', ] verifylist = [ @@ -1331,20 +1548,25 @@ def test_image_set_properties(self): 'Beta': '2', } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_fake_properties(self): arglist = [ - '--architecture', 'z80', - '--instance-id', '12345', - '--kernel-id', '67890', - '--os-distro', 'cpm', - '--os-version', '2.2H', - '--ramdisk-id', 'xyzpdq', + '--architecture', + 'z80', + '--instance-id', + '12345', + '--kernel-id', + '67890', + '--os-distro', + 'cpm', + '--os-version', + '2.2H', + '--ramdisk-id', + 'xyzpdq', 'graven', ] verifylist = [ @@ -1369,15 +1591,15 @@ def test_image_set_fake_properties(self): 'ramdisk_id': 'xyzpdq', } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_tag(self): arglist = [ - '--tag', 'test-tag', + '--tag', + 'test-tag', 'graven', ] verifylist = [ @@ -1392,15 +1614,15 @@ def test_image_set_tag(self): 'tags': ['test-tag'], } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_activate(self): arglist = [ - '--tag', 'test-tag', + '--tag', + 'test-tag', '--activate', 'graven', ] @@ -1416,19 +1638,19 @@ def test_image_set_activate(self): 'tags': ['test-tag'], } - self.client.reactivate_image.assert_called_with( + self.image_client.reactivate_image.assert_called_with( self._image.id, ) # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_deactivate(self): arglist = [ - '--tag', 'test-tag', + '--tag', + 'test-tag', '--deactivate', 'graven', ] @@ -1444,22 +1666,22 @@ def test_image_set_deactivate(self): 'tags': ['test-tag'], } - self.client.deactivate_image.assert_called_with( + self.image_client.deactivate_image.assert_called_with( self._image.id, ) # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) def test_image_set_tag_merge(self): old_image = self._image old_image['tags'] = ['old1', 'new2'] - self.client.find_image.return_value = old_image + self.image_client.find_image.return_value = old_image arglist = [ - '--tag', 'test-tag', + '--tag', + 'test-tag', 'graven', ] verifylist = [ @@ -1474,7 +1696,7 @@ def test_image_set_tag_merge(self): 'tags': ['old1', 'new2', 'test-tag'], } # ImageManager.update(image, **kwargs) - a, k = self.client.update_image.call_args + a, k = self.image_client.update_image.call_args self.assertEqual(self._image.id, a[0]) self.assertIn('tags', k) self.assertEqual(set(kwargs['tags']), set(k['tags'])) @@ -1483,9 +1705,10 @@ def test_image_set_tag_merge(self): def test_image_set_tag_merge_dupe(self): old_image = self._image old_image['tags'] = ['old1', 'new2'] - self.client.find_image.return_value = old_image + self.image_client.find_image.return_value = old_image arglist = [ - '--tag', 'old1', + '--tag', + 'old1', 'graven', ] verifylist = [ @@ -1500,16 +1723,16 @@ def test_image_set_tag_merge_dupe(self): 'tags': ['new2', 'old1'], } # ImageManager.update(image, **kwargs) - a, k = self.client.update_image.call_args + a, k = self.image_client.update_image.call_args self.assertEqual(self._image.id, a[0]) self.assertIn('tags', k) self.assertEqual(set(kwargs['tags']), set(k['tags'])) self.assertIsNone(result) def test_image_set_dead_options(self): - arglist = [ - '--visibility', '1-mile', + '--visibility', + '1-mile', 'graven', ] verifylist = [ @@ -1519,13 +1742,15 @@ def test_image_set_dead_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_image_set_numeric_options_to_zero(self): arglist = [ - '--min-disk', '0', - '--min-ram', '0', + '--min-disk', + '0', + '--min-ram', + '0', 'graven', ] verifylist = [ @@ -1542,9 +1767,8 @@ def test_image_set_numeric_options_to_zero(self): 'min_ram': 0, } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) @@ -1568,9 +1792,8 @@ def test_image_set_hidden(self): 'visibility': 'public', } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) @@ -1594,23 +1817,18 @@ def test_image_set_unhidden(self): 'visibility': 'public', } # ImageManager.update(image, **kwargs) - self.client.update_image.assert_called_with( - self._image.id, - **kwargs + self.image_client.update_image.assert_called_with( + self._image.id, **kwargs ) self.assertIsNone(result) class TestImageShow(TestImage): - - new_image = image_fakes.create_one_image( - attrs={'size': 1000}) + new_image = image_fakes.create_one_image(attrs={'size': 1000}) _data = image_fakes.create_one_image() - columns = ( - 'id', 'name', 'owner', 'protected', 'tags', 'visibility' - ) + columns = ('id', 'name', 'owner', 'protected', 'tags', 'visibility') data = ( _data.id, @@ -1618,13 +1836,13 @@ class TestImageShow(TestImage): _data.owner_id, _data.is_protected, format_columns.ListColumn(_data.tags), - _data.visibility + _data.visibility, ) def setUp(self): super().setUp() - self.client.find_image = mock.Mock(return_value=self._data) + self.image_client.find_image.return_value = self._data # Get the command object to test self.cmd = _image.ShowImage(self.app, None) @@ -1642,16 +1860,15 @@ def test_image_show(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.find_image.assert_called_with( - '0f41529e-7c12-4de8-be2d-181abb825b3c', - ignore_missing=False + self.image_client.find_image.assert_called_with( + '0f41529e-7c12-4de8-be2d-181abb825b3c', ignore_missing=False ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_image_show_human_readable(self): - self.client.find_image.return_value = self.new_image + self.image_client.find_image.return_value = self.new_image arglist = [ '--human-readable', self.new_image.id, @@ -1666,9 +1883,8 @@ def test_image_show_human_readable(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.find_image.assert_called_with( - self.new_image.id, - ignore_missing=False + self.image_client.find_image.assert_called_with( + self.new_image.id, ignore_missing=False ) size_index = columns.index('size') @@ -1676,7 +1892,6 @@ def test_image_show_human_readable(self): class TestImageUnset(TestImage): - def setUp(self): super().setUp() @@ -1685,11 +1900,12 @@ def setUp(self): attrs['hw_rng_model'] = 'virtio' attrs['prop'] = 'test' attrs['prop2'] = 'fake' + attrs['os_secure_boot'] = 'required' self.image = image_fakes.create_one_image(attrs) - self.client.find_image.return_value = self.image - self.client.remove_tag.return_value = self.image - self.client.update_image.return_value = self.image + self.image_client.find_image.return_value = self.image + self.image_client.remove_tag.return_value = self.image + self.image_client.update_image.return_value = self.image # Get the command object to test self.cmd = _image.UnsetImage(self.app, None) @@ -1698,9 +1914,7 @@ def test_image_unset_no_options(self): arglist = [ '0f41529e-7c12-4de8-be2d-181abb825b3c', ] - verifylist = [ - ('image', '0f41529e-7c12-4de8-be2d-181abb825b3c') - ] + verifylist = [('image', '0f41529e-7c12-4de8-be2d-181abb825b3c')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) @@ -1708,9 +1922,9 @@ def test_image_unset_no_options(self): self.assertIsNone(result) def test_image_unset_tag_option(self): - arglist = [ - '--tag', 'test', + '--tag', + 'test', self.image.id, ] @@ -1721,65 +1935,72 @@ def test_image_unset_tag_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.client.remove_tag.assert_called_with( - self.image.id, 'test' - ) + self.image_client.remove_tag.assert_called_with(self.image.id, 'test') self.assertIsNone(result) def test_image_unset_property_option(self): - arglist = [ - '--property', 'hw_rng_model', - '--property', 'prop', + '--property', + 'hw_rng_model', + '--property', + 'prop', + '--property', + 'os_secure_boot', self.image.id, ] + # openstacksdk translates 'os_secure_boot' property to + # 'needs_secure_boot' Image attribute. This is true for + # all IMAGE_ATTRIBUTES_CUSTOM_NAMES keys + self.assertEqual(self.image.needs_secure_boot, 'required') + self.assertFalse(hasattr(self.image, 'os_secure_boot')) verifylist = [ - ('properties', ['hw_rng_model', 'prop']), - ('image', self.image.id) + ('properties', ['hw_rng_model', 'prop', 'os_secure_boot']), + ('image', self.image.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.client.update_image.assert_called_with( - self.image, properties={'prop2': 'fake'}) + self.image_client.update_image.assert_called_with( + self.image, properties={'prop2': 'fake'} + ) self.assertIsNone(result) def test_image_unset_mixed_option(self): - arglist = [ - '--tag', 'test', - '--property', 'hw_rng_model', - '--property', 'prop', + '--tag', + 'test', + '--property', + 'hw_rng_model', + '--property', + 'prop', self.image.id, ] verifylist = [ ('tags', ['test']), ('properties', ['hw_rng_model', 'prop']), - ('image', self.image.id) + ('image', self.image.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.client.update_image.assert_called_with( - self.image, properties={'prop2': 'fake'}) - - self.client.remove_tag.assert_called_with( - self.image.id, 'test' + self.image_client.update_image.assert_called_with( + self.image, properties={'prop2': 'fake'} ) + + self.image_client.remove_tag.assert_called_with(self.image.id, 'test') self.assertIsNone(result) class TestImageStage(TestImage): - image = image_fakes.create_one_image({}) def setUp(self): super().setUp() - self.client.find_image.return_value = self.image + self.image_client.find_image.return_value = self.image self.cmd = _image.StageImage(self.app, None) @@ -1789,7 +2010,8 @@ def test_stage_image__from_file(self): imagefile.close() arglist = [ - '--file', imagefile.name, + '--file', + imagefile.name, self.image.name, ] verifylist = [ @@ -1800,7 +2022,7 @@ def test_stage_image__from_file(self): self.cmd.take_action(parsed_args) - self.client.stage_image.assert_called_once_with( + self.image_client.stage_image.assert_called_once_with( self.image, filename=imagefile.name, ) @@ -1820,14 +2042,13 @@ def test_stage_image__from_stdin(self, mock_get_data_from_stdin): self.cmd.take_action(parsed_args) - self.client.stage_image.assert_called_once_with( + self.image_client.stage_image.assert_called_once_with( self.image, data=fake_stdin, ) class TestImageImport(TestImage): - image = image_fakes.create_one_image( { 'container_format': 'bare', @@ -1839,8 +2060,8 @@ class TestImageImport(TestImage): def setUp(self): super().setUp() - self.client.find_image.return_value = self.image - self.client.get_import_info.return_value = self.import_info + self.image_client.find_image.return_value = self.image + self.image_client.get_import_info.return_value = self.import_info self.cmd = _image.ImportImage(self.app, None) @@ -1856,12 +2077,12 @@ def test_import_image__glance_direct(self): self.cmd.take_action(parsed_args) - self.client.import_image.assert_called_once_with( + self.image_client.import_image.assert_called_once_with( self.image, method='glance-direct', uri=None, remote_region=None, - remote_image=None, + remote_image_id=None, remote_service_interface=None, stores=None, all_stores=None, @@ -1872,8 +2093,10 @@ def test_import_image__web_download(self): self.image.status = 'queued' arglist = [ self.image.name, - '--method', 'web-download', - '--uri', 'https://example.com/', + '--method', + 'web-download', + '--uri', + 'https://example.com/', ] verifylist = [ ('image', self.image.name), @@ -1884,12 +2107,12 @@ def test_import_image__web_download(self): self.cmd.take_action(parsed_args) - self.client.import_image.assert_called_once_with( + self.image_client.import_image.assert_called_once_with( self.image, method='web-download', uri='https://example.com/', remote_region=None, - remote_image=None, + remote_image_id=None, remote_service_interface=None, stores=None, all_stores=None, @@ -1901,7 +2124,8 @@ def test_import_image__web_download(self): def test_import_image__web_download_missing_options(self): arglist = [ self.image.name, - '--method', 'web-download', + '--method', + 'web-download', ] verifylist = [ ('image', self.image.name), @@ -1917,14 +2141,16 @@ def test_import_image__web_download_missing_options(self): ) self.assertIn("The '--uri' option is required ", str(exc)) - self.client.import_image.assert_not_called() + self.image_client.import_image.assert_not_called() # NOTE(stephenfin): Ditto def test_import_image__web_download_invalid_options(self): arglist = [ self.image.name, - '--method', 'glance-direct', # != web-download - '--uri', 'https://example.com/', + '--method', + 'glance-direct', # != web-download + '--uri', + 'https://example.com/', ] verifylist = [ ('image', self.image.name), @@ -1940,14 +2166,46 @@ def test_import_image__web_download_invalid_options(self): ) self.assertIn("The '--uri' option is only supported ", str(exc)) - self.client.import_image.assert_not_called() + self.image_client.import_image.assert_not_called() + + def test_import_image__web_download_invalid_url(self): + arglist = [ + self.image.name, + '--method', + 'web-download', + '--uri', + 'invalid:1234', + ] + + verifylist = [ + ('image', self.image.name), + ('import_method', 'web-download'), + ('uri', 'invalid:1234'), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + self.assertIn( + "'invalid:1234' is not a valid url", + str(exc), + ) + + self.image_client.import_image.assert_not_called() def test_import_image__web_download_invalid_image_state(self): self.image.status = 'uploading' # != 'queued' arglist = [ self.image.name, - '--method', 'web-download', - '--uri', 'https://example.com/', + '--method', + 'web-download', + '--uri', + 'https://example.com/', ] verifylist = [ ('image', self.image.name), @@ -1967,14 +2225,16 @@ def test_import_image__web_download_invalid_image_state(self): str(exc), ) - self.client.import_image.assert_not_called() + self.image_client.import_image.assert_not_called() def test_import_image__copy_image(self): self.image.status = 'active' arglist = [ self.image.name, - '--method', 'copy-image', - '--store', 'fast', + '--method', + 'copy-image', + '--store', + 'fast', ] verifylist = [ ('image', self.image.name), @@ -1985,25 +2245,61 @@ def test_import_image__copy_image(self): self.cmd.take_action(parsed_args) - self.client.import_image.assert_called_once_with( + self.image_client.import_image.assert_called_once_with( self.image, method='copy-image', uri=None, remote_region=None, - remote_image=None, + remote_image_id=None, remote_service_interface=None, stores=['fast'], all_stores=None, all_stores_must_succeed=False, ) + def test_import_image__copy_image_disallow_failure(self): + self.image.status = 'active' + arglist = [ + self.image.name, + '--method', + 'copy-image', + '--store', + 'fast', + '--disallow-failure', + ] + verifylist = [ + ('image', self.image.name), + ('import_method', 'copy-image'), + ('stores', ['fast']), + ('allow_failure', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + + self.image_client.import_image.assert_called_once_with( + self.image, + method='copy-image', + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=['fast'], + all_stores=None, + all_stores_must_succeed=True, + ) + def test_import_image__glance_download(self): arglist = [ self.image.name, - '--method', 'glance-download', - '--remote-region', 'eu/dublin', - '--remote-image', 'remote-image-id', - '--remote-service-interface', 'private', + '--method', + 'glance-download', + '--remote-region', + 'eu/dublin', + '--remote-image', + 'remote-image-id', + '--remote-service-interface', + 'private', ] verifylist = [ ('image', self.image.name), @@ -2016,12 +2312,12 @@ def test_import_image__glance_download(self): self.cmd.take_action(parsed_args) - self.client.import_image.assert_called_once_with( + self.image_client.import_image.assert_called_once_with( self.image, method='glance-download', uri=None, remote_region='eu/dublin', - remote_image='remote-image-id', + remote_image_id='remote-image-id', remote_service_interface='private', stores=None, all_stores=None, @@ -2030,20 +2326,18 @@ def test_import_image__glance_download(self): class TestImageSave(TestImage): - image = image_fakes.create_one_image({}) def setUp(self): super().setUp() - self.client.find_image.return_value = self.image - self.client.download_image.return_value = self.image + self.image_client.find_image.return_value = self.image + self.image_client.download_image.return_value = self.image # Get the command object to test self.cmd = _image.SaveImage(self.app, None) def test_save_data(self): - arglist = ['--file', '/path/to/file', self.image.id] verifylist = [ @@ -2054,14 +2348,34 @@ def test_save_data(self): self.cmd.take_action(parsed_args) - self.client.download_image.assert_called_once_with( + self.image_client.download_image.assert_called_once_with( + self.image.id, output='/path/to/file', stream=True, chunk_size=1024 + ) + + def test_save_data_with_chunk_size(self): + arglist = [ + '--file', + '/path/to/file', + '--chunk-size', + '2048', self.image.id, - stream=True, - output='/path/to/file') + ] + verifylist = [ + ('filename', '/path/to/file'), + ('chunk_size', 2048), + ('image', self.image.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + + self.image_client.download_image.assert_called_once_with( + self.image.id, output='/path/to/file', stream=True, chunk_size=2048 + ) -class TestImageGetData(TestImage): +class TestImageGetData(TestImage): def test_get_data_from_stdin(self): fd = io.BytesIO(b"some initial binary data: \x00\x01") @@ -2085,3 +2399,45 @@ def test_get_data_from_stdin__interactive(self): test_fd = _image.get_data_from_stdin() self.assertIsNone(test_fd) + + +class TestStoresInfo(TestImage): + stores_info = image_fakes.create_one_stores_info() + + def setUp(self): + super().setUp() + + self.image_client.stores.return_value = self.stores_info + + self.cmd = _image.StoresInfo(self.app, None) + + def test_stores_info(self): + arglist = [] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.cmd.take_action(parsed_args) + + self.image_client.stores.assert_called() + + def test_stores_info_with_detail(self): + arglist = ['--detail'] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.cmd.take_action(parsed_args) + + self.image_client.stores.assert_called_with(details=True) + + def test_stores_info_neg(self): + arglist = [] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.image_client.stores.side_effect = ( + sdk_exceptions.ResourceNotFound() + ) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + "Multi Backend support not enabled", + str(exc), + ) diff --git a/openstackclient/tests/unit/image/v2/test_info.py b/openstackclient/tests/unit/image/v2/test_info.py new file mode 100644 index 0000000000..86f7bd759f --- /dev/null +++ b/openstackclient/tests/unit/image/v2/test_info.py @@ -0,0 +1,35 @@ +# Copyright 2023 Red Hat. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstackclient.image.v2 import info +from openstackclient.tests.unit.image.v2 import fakes as image_fakes + + +class TestImportInfo(image_fakes.TestImagev2): + import_info = image_fakes.create_one_import_info() + + def setUp(self): + super().setUp() + + self.image_client.get_import_info.return_value = self.import_info + + self.cmd = info.ImportInfo(self.app, None) + + def test_import_info(self): + arglist = [] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.cmd.take_action(parsed_args) + + self.image_client.get_import_info.assert_called() diff --git a/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py b/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py index 7ed1183866..68b3076d55 100644 --- a/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py +++ b/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py @@ -13,71 +13,40 @@ # under the License. from openstackclient.image.v2 import metadef_namespaces -from openstackclient.tests.unit.image.v2 import fakes as md_namespace_fakes +from openstackclient.tests.unit.image.v2 import fakes as image_fakes -class TestMetadefNamespaces(md_namespace_fakes.TestMetadefNamespaces): - def setUp(self): - super().setUp() - - # Get shortcuts to mocked image client - self.client = self.app.client_manager.image - - # Get shortcut to the Mocks in identity client - self.project_mock = self.app.client_manager.identity.projects - self.project_mock.reset_mock() - self.domain_mock = self.app.client_manager.identity.domains - self.domain_mock.reset_mock() - - -class TestMetadefNamespaceCreate(TestMetadefNamespaces): - _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace() +class TestMetadefNamespaceCreate(image_fakes.TestImagev2): + _metadef_namespace = image_fakes.create_one_metadef_namespace() expected_columns = ( 'created_at', - 'description', 'display_name', - 'id', - 'is_protected', - 'location', - 'name', 'namespace', 'owner', - 'resource_type_associations', - 'updated_at', - 'visibility' + 'visibility', ) expected_data = ( _metadef_namespace.created_at, - _metadef_namespace.description, _metadef_namespace.display_name, - _metadef_namespace.id, - _metadef_namespace.is_protected, - _metadef_namespace.location, - _metadef_namespace.name, _metadef_namespace.namespace, _metadef_namespace.owner, - _metadef_namespace.resource_type_associations, - _metadef_namespace.updated_at, - _metadef_namespace.visibility + _metadef_namespace.visibility, ) def setUp(self): super().setUp() - self.client.create_metadef_namespace.return_value \ - = self._metadef_namespace - self.cmd = metadef_namespaces.CreateMetadefNameSpace(self.app, None) + self.image_client.create_metadef_namespace.return_value = ( + self._metadef_namespace + ) + self.cmd = metadef_namespaces.CreateMetadefNamespace(self.app, None) self.datalist = self._metadef_namespace def test_namespace_create(self): - arglist = [ - self._metadef_namespace.namespace - ] - - verifylist = [ + arglist = [self._metadef_namespace.namespace] - ] + verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -86,25 +55,22 @@ def test_namespace_create(self): self.assertEqual(self.expected_data, data) -class TestMetadefNamespaceDelete(TestMetadefNamespaces): - _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace() +class TestMetadefNamespaceDelete(image_fakes.TestImagev2): + _metadef_namespace = image_fakes.create_one_metadef_namespace() def setUp(self): super().setUp() - self.client.delete_metadef_namespace.return_value \ - = self._metadef_namespace - self.cmd = metadef_namespaces.DeleteMetadefNameSpace(self.app, None) + self.image_client.delete_metadef_namespace.return_value = ( + self._metadef_namespace + ) + self.cmd = metadef_namespaces.DeleteMetadefNamespace(self.app, None) self.datalist = self._metadef_namespace def test_namespace_create(self): - arglist = [ - self._metadef_namespace.namespace - ] - - verifylist = [ + arglist = [self._metadef_namespace.namespace] - ] + verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) @@ -112,26 +78,26 @@ def test_namespace_create(self): self.assertIsNone(result) -class TestMetadefNamespaceList(TestMetadefNamespaces): - _metadef_namespace = [md_namespace_fakes.create_one_metadef_namespace()] +class TestMetadefNamespaceList(image_fakes.TestImagev2): + _metadef_namespace = [image_fakes.create_one_metadef_namespace()] - columns = [ - 'namespace' - ] + columns = ['namespace'] datalist = [] def setUp(self): super().setUp() - self.client.metadef_namespaces.side_effect = [ - self._metadef_namespace, []] + self.image_client.metadef_namespaces.side_effect = [ + self._metadef_namespace, + [], + ] # Get the command object to test - self.client.metadef_namespaces.return_value = iter( + self.image_client.metadef_namespaces.return_value = iter( self._metadef_namespace ) - self.cmd = metadef_namespaces.ListMetadefNameSpaces(self.app, None) + self.cmd = metadef_namespaces.ListMetadefNamespace(self.app, None) self.datalist = self._metadef_namespace def test_namespace_list_no_options(self): @@ -144,25 +110,23 @@ def test_namespace_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - self.assertEqual(getattr(self.datalist[0], 'namespace'), - next(data)[0]) + self.assertEqual(getattr(self.datalist[0], 'namespace'), next(data)[0]) -class TestMetadefNamespaceSet(TestMetadefNamespaces): - _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace() +class TestMetadefNamespaceSet(image_fakes.TestImagev2): + _metadef_namespace = image_fakes.create_one_metadef_namespace() def setUp(self): super().setUp() - self.client.update_metadef_namespace.return_value \ - = self._metadef_namespace - self.cmd = metadef_namespaces.SetMetadefNameSpace(self.app, None) + self.image_client.update_metadef_namespace.return_value = ( + self._metadef_namespace + ) + self.cmd = metadef_namespaces.SetMetadefNamespace(self.app, None) self.datalist = self._metadef_namespace def test_namespace_set_no_options(self): - arglist = [ - self._metadef_namespace.namespace - ] + arglist = [self._metadef_namespace.namespace] verifylist = [ ('namespace', self._metadef_namespace.namespace), ] @@ -174,39 +138,36 @@ def test_namespace_set_no_options(self): self.assertIsNone(result) -class TestMetadefNamespaceShow(TestMetadefNamespaces): - _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace() +class TestMetadefNamespaceShow(image_fakes.TestImagev2): + _metadef_namespace = image_fakes.create_one_metadef_namespace() expected_columns = ( 'created_at', 'display_name', 'namespace', 'owner', - 'visibility' + 'visibility', ) expected_data = ( _metadef_namespace.created_at, _metadef_namespace.display_name, _metadef_namespace.namespace, _metadef_namespace.owner, - _metadef_namespace.visibility + _metadef_namespace.visibility, ) def setUp(self): super().setUp() - self.client.get_metadef_namespace.return_value \ - = self._metadef_namespace - self.cmd = metadef_namespaces.ShowMetadefNameSpace(self.app, None) + self.image_client.get_metadef_namespace.return_value = ( + self._metadef_namespace + ) + self.cmd = metadef_namespaces.ShowMetadefNamespace(self.app, None) def test_namespace_show_no_options(self): - arglist = [ - self._metadef_namespace.namespace - ] + arglist = [self._metadef_namespace.namespace] - verifylist = [ - - ] + verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) diff --git a/openstackclient/tests/unit/image/v2/test_metadef_objects.py b/openstackclient/tests/unit/image/v2/test_metadef_objects.py new file mode 100644 index 0000000000..6306e23eb1 --- /dev/null +++ b/openstackclient/tests/unit/image/v2/test_metadef_objects.py @@ -0,0 +1,283 @@ +# Copyright 2023 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from osc_lib import exceptions + +from openstackclient.image.v2 import metadef_objects +from openstackclient.tests.unit.image.v2 import fakes + + +class TestMetadefObjectsCreate(fakes.TestImagev2): + _metadef_namespace = fakes.create_one_metadef_namespace() + _metadef_objects = fakes.create_one_metadef_object() + + expected_columns = ( + 'created_at', + 'description', + 'name', + 'namespace_name', + 'properties', + 'required', + 'updated_at', + ) + expected_data = ( + _metadef_objects.created_at, + _metadef_objects.description, + _metadef_objects.name, + _metadef_objects.namespace_name, + _metadef_objects.properties, + _metadef_objects.required, + _metadef_objects.updated_at, + ) + + def setUp(self): + super().setUp() + self.image_client.create_metadef_object.return_value = ( + self._metadef_objects + ) + self.cmd = metadef_objects.CreateMetadefObjects(self.app, None) + + def test_namespace_create(self): + arglist = [ + '--namespace', + self._metadef_namespace.namespace, + self._metadef_objects.name, + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + +class TestMetadefObjectsShow(fakes.TestImagev2): + _metadef_namespace = fakes.create_one_metadef_namespace() + _metadef_objects = fakes.create_one_metadef_object() + + expected_columns = ( + 'created_at', + 'description', + 'name', + 'namespace_name', + 'properties', + 'required', + 'updated_at', + ) + expected_data = ( + _metadef_objects.created_at, + _metadef_objects.description, + _metadef_objects.name, + _metadef_objects.namespace_name, + _metadef_objects.properties, + _metadef_objects.required, + _metadef_objects.updated_at, + ) + + def setUp(self): + super().setUp() + + self.image_client.get_metadef_object.return_value = ( + self._metadef_objects + ) + self.cmd = metadef_objects.ShowMetadefObjects(self.app, None) + + def test_object_show(self): + arglist = [ + self._metadef_namespace.namespace, + self._metadef_objects.name, + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + +class TestMetadefObjectDelete(fakes.TestImagev2): + _metadef_namespace = fakes.create_one_metadef_namespace() + _metadef_objects = fakes.create_one_metadef_object() + + def setUp(self): + super().setUp() + + self.image_client.delete_metadef_object.return_value = None + self.image_client.delete_all_metadef_objects.return_value = None + self.cmd = metadef_objects.DeleteMetadefObject(self.app, None) + + def test_object_delete(self): + arglist = [ + self._metadef_namespace.namespace, + self._metadef_objects.name, + ] + + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.image_client.delete_metadef_object.assert_called_once_with( + self.image_client.get_metadef_object(), + self._metadef_namespace.namespace, + ) + self.image_client.delete_all_metadef_objects.assert_not_called() + self.assertIsNone(result) + + def test_object_delete_all(self): + arglist = [ + self._metadef_namespace.namespace, + ] + + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.image_client.delete_all_metadef_objects.assert_called_with( + self._metadef_namespace.namespace, + ) + self.image_client.delete_metadef_object.assert_not_called() + + +class TestMetadefObjectList(fakes.TestImagev2): + _metadef_namespace = fakes.create_one_metadef_namespace() + _metadef_objects = [fakes.create_one_metadef_object()] + columns = ['name', 'description'] + + datalist = [] + + def setUp(self): + super().setUp() + + self.image_client.metadef_objects.side_effect = [ + self._metadef_objects, + [], + ] + + # Get the command object to test + self.image_client.metadef_objects.return_value = iter( + self._metadef_objects + ) + self.cmd = metadef_objects.ListMetadefObjects(self.app, None) + self.datalist = self._metadef_objects + + def test_metadef_objects_list(self): + arglist = [self._metadef_namespace.namespace] + parsed_args = self.check_parser(self.cmd, arglist, []) + + # In base command class Lister in cliff, abstract method take_action() + # returns a tuple containing the column names and an iterable + # containing the data to be listed. + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertEqual(getattr(self.datalist[0], 'name'), next(data)[0]) + + +class TestMetadefObjectSet(fakes.TestImagev2): + _metadef_namespace = fakes.create_one_metadef_namespace() + _metadef_objects = fakes.create_one_metadef_object() + new_metadef_object = fakes.create_one_metadef_object( + attrs={'name': 'new_object_name'} + ) + + def setUp(self): + super().setUp() + + self.image_client.update_metadef_object.return_value = None + self.cmd = metadef_objects.SetMetadefObject(self.app, None) + + def test_object_set_no_options(self): + arglist = [ + self._metadef_namespace.namespace, + self._metadef_objects.name, + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + + def test_object_set(self): + arglist = [ + self._metadef_namespace.namespace, + self._metadef_objects.name, + '--name', + 'new_object_name', + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + + +class TestMetadefObjectPropertyShow(fakes.TestImagev2): + _metadef_namespace = fakes.create_one_metadef_namespace() + _metadef_objects = fakes.create_one_metadef_object() + md_property = _metadef_objects['properties']['quota:cpu_quota'] + md_property['name'] = 'quota:cpu_quota' + + expected_columns = ( + 'description', + 'name', + 'title', + 'type', + ) + expected_data = ( + md_property['description'], + md_property['name'], + md_property['title'], + md_property['type'], + ) + + def setUp(self): + super().setUp() + + self.image_client.get_metadef_object.return_value = ( + self._metadef_objects + ) + self.cmd = metadef_objects.ShowMetadefObjectProperty(self.app, None) + + def test_object_property_show(self): + arglist = [ + self._metadef_namespace.namespace, + self._metadef_objects.name, + 'quota:cpu_quota', + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_neg_object_property_show(self): + arglist = [ + self._metadef_namespace.namespace, + self._metadef_objects.name, + 'prop1', + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + f'Property {parsed_args.property} not found in object {parsed_args.object}.', + str(exc), + ) diff --git a/openstackclient/tests/unit/image/v2/test_metadef_properties.py b/openstackclient/tests/unit/image/v2/test_metadef_properties.py new file mode 100644 index 0000000000..e2f83d6296 --- /dev/null +++ b/openstackclient/tests/unit/image/v2/test_metadef_properties.py @@ -0,0 +1,241 @@ +# Copyright 2023 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions as sdk_exceptions +from osc_lib import exceptions + +from openstackclient.image.v2 import metadef_properties +from openstackclient.tests.unit.image.v2 import fakes as image_fakes +from openstackclient.tests.unit import utils as tests_utils + + +class TestMetadefPropertyCreate(image_fakes.TestImagev2): + _metadef_namespace = image_fakes.create_one_metadef_namespace() + _metadef_property = image_fakes.create_one_metadef_property() + expected_columns = ( + 'name', + 'title', + 'type', + ) + expected_data = ( + _metadef_property.name, + _metadef_property.title, + _metadef_property.type, + ) + + def setUp(self): + super().setUp() + self.image_client.create_metadef_property.return_value = ( + self._metadef_property + ) + self.cmd = metadef_properties.CreateMetadefProperty(self.app, None) + + def test_metadef_property_create_missing_arguments(self): + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) + + def test_metadef_property_create(self): + arglist = [ + '--name', + 'cpu_cores', + '--schema', + '{}', + '--title', + 'vCPU Cores', + '--type', + 'integer', + self._metadef_namespace.namespace, + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_metadef_property_create_invalid_schema(self): + arglist = [ + '--name', + 'cpu_cores', + '--schema', + '{invalid}', + '--title', + 'vCPU Cores', + '--type', + 'integer', + self._metadef_namespace.namespace, + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + +class TestMetadefPropertyDelete(image_fakes.TestImagev2): + def setUp(self): + super().setUp() + + self.cmd = metadef_properties.DeleteMetadefProperty(self.app, None) + self.image_client.delete_all_metadef_properties.return_value = None + + def test_metadef_property_delete(self): + arglist = ['namespace', 'property'] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.image_client.delete_metadef_property.assert_called_with( + 'property', 'namespace', ignore_missing=False + ) + self.image_client.delete_all_metadef_properties.assert_not_called() + + def test_metadef_property_delete_missing_arguments(self): + arglist = [] + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + self.image_client.delete_all_metadef_properties.assert_not_called() + self.image_client.delete_metadef_property.assert_not_called() + + def test_metadef_property_delete_exception(self): + arglist = ['namespace', 'property'] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.image_client.delete_metadef_property.side_effect = ( + sdk_exceptions.ResourceNotFound + ) + + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.image_client.delete_metadef_property.assert_called_with( + 'property', 'namespace', ignore_missing=False + ) + self.image_client.delete_all_metadef_properties.assert_not_called() + + def test_metadef_property_delete_all(self): + arglist = ['namespace'] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.image_client.delete_all_metadef_properties.assert_called_with( + 'namespace' + ) + self.image_client.delete_metadef_property.assert_not_called() + + +class TestMetadefPropertyList(image_fakes.TestImagev2): + _metadef_property = [image_fakes.create_one_metadef_property()] + + columns = ['name', 'title', 'type'] + + def setUp(self): + super().setUp() + + self.image_client.metadef_properties.side_effect = [ + self._metadef_property, + [], + ] + + self.cmd = metadef_properties.ListMetadefProperties(self.app, None) + self.datalist = self._metadef_property + + def test_metadef_property_list(self): + arglist = ['my-namespace'] + parsed_args = self.check_parser(self.cmd, arglist, []) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertEqual(getattr(self.datalist[0], 'name'), next(data)[0]) + + +class TestMetadefPropertySet(image_fakes.TestImagev2): + _metadef_property = image_fakes.create_one_metadef_property() + + def setUp(self): + super().setUp() + + self.cmd = metadef_properties.SetMetadefProperty(self.app, None) + self.image_client.get_metadef_property.return_value = ( + self._metadef_property + ) + + def test_metadef_property_set(self): + arglist = ['--title', 'new title', 'namespace', 'property'] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + def test_metadef_property_set_invalid_schema(self): + arglist = [ + '--title', + 'new title', + '--schema', + '{invalid}', + 'namespace', + 'property', + ] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + +class TestMetadefPropertyShow(image_fakes.TestImagev2): + _metadef_property = image_fakes.create_one_metadef_property() + + expected_columns = ( + 'name', + 'title', + 'type', + ) + expected_data = ( + _metadef_property.name, + _metadef_property.title, + _metadef_property.type, + ) + + def setUp(self): + super().setUp() + + self.image_client.get_metadef_property.return_value = ( + self._metadef_property + ) + + self.cmd = metadef_properties.ShowMetadefProperty(self.app, None) + + def test_metadef_property_show(self): + arglist = ['my-namespace', 'my-property'] + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) diff --git a/openstackclient/tests/unit/image/v2/test_metadef_resource_type_association.py b/openstackclient/tests/unit/image/v2/test_metadef_resource_type_association.py new file mode 100644 index 0000000000..d7c1ee686b --- /dev/null +++ b/openstackclient/tests/unit/image/v2/test_metadef_resource_type_association.py @@ -0,0 +1,127 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstackclient.image.v2 import metadef_resource_type_association +from openstackclient.tests.unit.image.v2 import fakes as resource_type_fakes + + +class TestMetadefResourceTypeAssociationCreate( + resource_type_fakes.TestImagev2 +): + resource_type_association = ( + resource_type_fakes.create_one_resource_type_association() + ) + + columns = ( + 'created_at', + 'id', + 'name', + 'prefix', + 'properties_target', + 'updated_at', + ) + + data = ( + resource_type_association.created_at, + resource_type_association.id, + resource_type_association.name, + resource_type_association.prefix, + resource_type_association.properties_target, + resource_type_association.updated_at, + ) + + def setUp(self): + super().setUp() + + self.image_client.create_metadef_resource_type_association.return_value = self.resource_type_association + self.cmd = metadef_resource_type_association.CreateMetadefResourceTypeAssociation( + self.app, None + ) + + def test_resource_type_association_create(self): + arglist = [ + self.resource_type_association.namespace_name, + self.resource_type_association.name, + ] + + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + +class TestMetadefResourceTypeAssociationDelete( + resource_type_fakes.TestImagev2 +): + resource_type_association = ( + resource_type_fakes.create_one_resource_type_association() + ) + + def setUp(self): + super().setUp() + + self.image_client.delete_metadef_resource_type_association.return_value = self.resource_type_association + self.cmd = metadef_resource_type_association.DeleteMetadefResourceTypeAssociation( + self.app, None + ) + + def test_resource_type_association_delete(self): + arglist = [ + self.resource_type_association.namespace_name, + self.resource_type_association.name, + ] + + verifylist = [] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + + +class TestMetadefResourceTypeAssociationList(resource_type_fakes.TestImagev2): + resource_type_associations = ( + resource_type_fakes.create_resource_type_associations() + ) + + columns = ['Name'] + + datalist = [ + (resource_type_association.name,) + for resource_type_association in resource_type_associations + ] + + def setUp(self): + super().setUp() + + self.image_client.metadef_resource_type_associations.side_effect = [ + self.resource_type_associations, + [], + ] + + self.cmd = metadef_resource_type_association.ListMetadefResourceTypeAssociations( + self.app, None + ) + + def test_resource_type_association_list(self): + arglist = [ + self.resource_type_associations[0].namespace_name, + ] + parsed_args = self.check_parser(self.cmd, arglist, []) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.datalist, data) diff --git a/openstackclient/tests/unit/image/v2/test_metadef_resource_types.py b/openstackclient/tests/unit/image/v2/test_metadef_resource_types.py new file mode 100644 index 0000000000..f70f5e340d --- /dev/null +++ b/openstackclient/tests/unit/image/v2/test_metadef_resource_types.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstackclient.image.v2 import metadef_resource_types +from openstackclient.tests.unit.image.v2 import fakes as image_fakes + + +class TestMetadefResourceTypeList(image_fakes.TestImagev2): + resource_types = image_fakes.create_resource_types() + + columns = ['Name'] + + datalist = [(resource_type.name,) for resource_type in resource_types] + + def setUp(self): + super().setUp() + + self.image_client.metadef_resource_types.side_effect = [ + self.resource_types, + [], + ] + + self.cmd = metadef_resource_types.ListMetadefResourceTypes( + self.app, None + ) + + def test_resource_type_list_no_options(self): + arglist = [] + parsed_args = self.check_parser(self.cmd, arglist, []) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.datalist, data) diff --git a/openstackclient/tests/unit/image/v2/test_task.py b/openstackclient/tests/unit/image/v2/test_task.py index e077e2b140..891ad1a72b 100644 --- a/openstackclient/tests/unit/image/v2/test_task.py +++ b/openstackclient/tests/unit/image/v2/test_task.py @@ -16,16 +16,7 @@ from openstackclient.tests.unit.image.v2 import fakes as image_fakes -class TestTask(image_fakes.TestImagev2): - def setUp(self): - super().setUp() - - # Get shortcuts to mocked image client - self.client = self.app.client_manager.image - - -class TestTaskShow(TestTask): - +class TestTaskShow(image_fakes.TestImagev2): task = image_fakes.create_one_task() columns = ( @@ -58,7 +49,7 @@ class TestTaskShow(TestTask): def setUp(self): super().setUp() - self.client.get_task.return_value = self.task + self.image_client.get_task.return_value = self.task # Get the command object to test self.cmd = task.ShowTask(self.app, None) @@ -74,14 +65,13 @@ def test_task_show(self): # returns a two-part tuple with a tuple of column names and a tuple of # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.client.get_task.assert_called_with(self.task.id) + self.image_client.get_task.assert_called_with(self.task.id) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) -class TestTaskList(TestTask): - +class TestTaskList(image_fakes.TestImagev2): tasks = image_fakes.create_tasks() columns = ( @@ -103,7 +93,7 @@ class TestTaskList(TestTask): def setUp(self): super().setUp() - self.client.tasks.side_effect = [self.tasks, []] + self.image_client.tasks.side_effect = [self.tasks, []] # Get the command object to test self.cmd = task.ListTask(self.app, None) @@ -122,7 +112,7 @@ def test_task_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) - self.client.tasks.assert_called_with() + self.image_client.tasks.assert_called_with() self.assertEqual(self.columns, columns) self.assertCountEqual(self.datalist, data) @@ -134,7 +124,7 @@ def test_task_list_sort_key_option(self): columns, data = self.cmd.take_action(parsed_args) - self.client.tasks.assert_called_with( + self.image_client.tasks.assert_called_with( sort_key=parsed_args.sort_key, ) @@ -148,7 +138,7 @@ def test_task_list_sort_dir_option(self): self.cmd.take_action(parsed_args) - self.client.tasks.assert_called_with( + self.image_client.tasks.assert_called_with( sort_dir=parsed_args.sort_dir, ) @@ -159,7 +149,7 @@ def test_task_list_pagination_options(self): self.cmd.take_action(parsed_args) - self.client.tasks.assert_called_with( + self.image_client.tasks.assert_called_with( limit=parsed_args.limit, marker=parsed_args.marker, ) @@ -171,7 +161,7 @@ def test_task_list_type_option(self): self.cmd.take_action(parsed_args) - self.client.tasks.assert_called_with( + self.image_client.tasks.assert_called_with( type=self.tasks[0].type, ) @@ -182,6 +172,6 @@ def test_task_list_status_option(self): self.cmd.take_action(parsed_args) - self.client.tasks.assert_called_with( + self.image_client.tasks.assert_called_with( status=self.tasks[0].status, ) diff --git a/openstackclient/tests/unit/integ/base.py b/openstackclient/tests/unit/integ/base.py index caed4f8902..e61945a771 100644 --- a/openstackclient/tests/unit/integ/base.py +++ b/openstackclient/tests/unit/integ/base.py @@ -18,7 +18,7 @@ HOST = "192.168.5.41" -URL_BASE = "http://%s/identity" % HOST +URL_BASE = f"http://{HOST}/identity" V2_AUTH_URL = URL_BASE + "/v2.0/" V2_VERSION_RESP = { @@ -51,15 +51,19 @@ "version": { "status": "stable", "updated": "2016-04-04T00:00:00Z", - "media-types": [{ - "base": "application/json", - "type": "application/vnd.openstack.identity-v3+json", - }], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.identity-v3+json", + } + ], "id": "v3.6", - "links": [{ - "href": V3_AUTH_URL, - "rel": "self", - }] + "links": [ + { + "href": V3_AUTH_URL, + "rel": "self", + } + ], } } @@ -114,8 +118,7 @@ def make_v3_token(req_mock): class TestInteg(utils.TestCase): - def setUp(self): - super(TestInteg, self).setUp() + super().setUp() self.requests_mock = self.useFixture(fixture.Fixture()) diff --git a/openstackclient/tests/unit/integ/cli/test_project.py b/openstackclient/tests/unit/integ/cli/test_project.py index 4e707a3762..b536a764df 100644 --- a/openstackclient/tests/unit/integ/cli/test_project.py +++ b/openstackclient/tests/unit/integ/cli/test_project.py @@ -20,9 +20,8 @@ class TestIntegV2ProjectID(test_base.TestInteg): - def setUp(self): - super(TestIntegV2ProjectID, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V2_AUTH_URL, "OS_PROJECT_ID": test_shell.DEFAULT_PROJECT_ID, @@ -78,9 +77,8 @@ def test_project_id_arg(self): class TestIntegV2ProjectName(test_base.TestInteg): - def setUp(self): - super(TestIntegV2ProjectName, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V2_AUTH_URL, "OS_PROJECT_NAME": test_shell.DEFAULT_PROJECT_NAME, @@ -136,9 +134,8 @@ def test_project_name_arg(self): class TestIntegV3ProjectID(test_base.TestInteg): - def setUp(self): - super(TestIntegV3ProjectID, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V3_AUTH_URL, "OS_PROJECT_ID": test_shell.DEFAULT_PROJECT_NAME, @@ -192,9 +189,8 @@ def test_project_id_arg(self): class TestIntegV3ProjectName(test_base.TestInteg): - def setUp(self): - super(TestIntegV3ProjectName, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V3_AUTH_URL, "OS_PROJECT_NAME": test_shell.DEFAULT_PROJECT_NAME, diff --git a/openstackclient/tests/unit/integ/cli/test_shell.py b/openstackclient/tests/unit/integ/cli/test_shell.py index 5788b47375..6ac17be5f2 100644 --- a/openstackclient/tests/unit/integ/cli/test_shell.py +++ b/openstackclient/tests/unit/integ/cli/test_shell.py @@ -22,9 +22,8 @@ class TestIntegShellCliNoAuth(test_base.TestInteg): - def setUp(self): - super(TestIntegShellCliNoAuth, self).setUp() + super().setUp() env = {} self.useFixture(osc_lib_utils.EnvFixture(copy.deepcopy(env))) @@ -67,9 +66,8 @@ def test_shell_args_cacert_insecure(self): class TestIntegShellCliV2(test_base.TestInteg): - def setUp(self): - super(TestIntegShellCliV2, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V2_AUTH_URL, "OS_PROJECT_NAME": test_shell.DEFAULT_PROJECT_NAME, @@ -155,9 +153,8 @@ def test_shell_args_cacert_insecure(self): class TestIntegShellCliV2Ignore(test_base.TestInteg): - def setUp(self): - super(TestIntegShellCliV2Ignore, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V2_AUTH_URL, "OS_PROJECT_NAME": test_shell.DEFAULT_PROJECT_NAME, @@ -202,9 +199,8 @@ def test_shell_args_ignore_v3(self): class TestIntegShellCliV3(test_base.TestInteg): - def setUp(self): - super(TestIntegShellCliV3, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V3_AUTH_URL, "OS_PROJECT_DOMAIN_ID": test_shell.DEFAULT_PROJECT_DOMAIN_ID, @@ -293,9 +289,8 @@ def test_shell_args_cacert_insecure(self): class TestIntegShellCliV3Prompt(test_base.TestInteg): - def setUp(self): - super(TestIntegShellCliV3Prompt, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V3_AUTH_URL, "OS_PROJECT_DOMAIN_ID": test_shell.DEFAULT_PROJECT_DOMAIN_ID, @@ -318,8 +313,7 @@ def test_shell_callback(self, mock_prompt): # Check password callback set correctly self.assertEqual( - mock_prompt, - _shell.cloud._openstack_config._pw_callback + mock_prompt, _shell.cloud._openstack_config._pw_callback ) # Check auth request @@ -345,7 +339,7 @@ class TestIntegShellCliPrecedence(test_base.TestInteg): """ def setUp(self): - super(TestIntegShellCliPrecedence, self).setUp() + super().setUp() env = { "OS_AUTH_URL": test_base.V3_AUTH_URL, "OS_PROJECT_DOMAIN_ID": test_shell.DEFAULT_PROJECT_DOMAIN_ID, @@ -358,16 +352,16 @@ def setUp(self): self.token = test_base.make_v3_token(self.requests_mock) # Patch a v3 auth URL into the o-c-c data - test_shell.PUBLIC_1['public-clouds']['megadodo']['auth']['auth_url'] \ - = test_base.V3_AUTH_URL + test_shell.PUBLIC_1['public-clouds']['megadodo']['auth'][ + 'auth_url' + ] = test_base.V3_AUTH_URL def test_shell_args_options(self): """Verify command line options override environment variables""" _shell = shell.OpenStackShell() _shell.run( - "--os-username zarquon --os-password qaz " - "extension list".split(), + "--os-username zarquon --os-password qaz extension list".split(), ) # Check general calls @@ -417,7 +411,7 @@ class TestIntegShellCliPrecedenceOCC(test_base.TestInteg): """ def setUp(self): - super(TestIntegShellCliPrecedenceOCC, self).setUp() + super().setUp() env = { "OS_CLOUD": "megacloud", "OS_AUTH_URL": test_base.V3_AUTH_URL, @@ -432,8 +426,9 @@ def setUp(self): self.token = test_base.make_v3_token(self.requests_mock) # Patch a v3 auth URL into the o-c-c data - test_shell.PUBLIC_1['public-clouds']['megadodo']['auth']['auth_url'] \ - = test_base.V3_AUTH_URL + test_shell.PUBLIC_1['public-clouds']['megadodo']['auth'][ + 'auth_url' + ] = test_base.V3_AUTH_URL def get_temp_file_path(self, filename): """Returns an absolute path for a temporary file. @@ -457,10 +452,12 @@ def config_mock_return(): log_file = self.get_temp_file_path('test_log_file') cloud2 = test_shell.get_cloud(log_file) return ('file.yaml', cloud2) + config_mock.side_effect = config_mock_return def vendor_mock_return(): return ('file.yaml', copy.deepcopy(test_shell.PUBLIC_1)) + vendor_mock.side_effect = vendor_mock_return _shell = shell.OpenStackShell() @@ -504,7 +501,6 @@ def vendor_mock_return(): ) # +env, -cli, +occ - print("auth_req: %s" % auth_req['auth']) self.assertEqual( test_shell.DEFAULT_USERNAME, auth_req['auth']['identity']['password']['user']['name'], @@ -528,10 +524,12 @@ def config_mock_return(): log_file = self.get_temp_file_path('test_log_file') cloud2 = test_shell.get_cloud(log_file) return ('file.yaml', cloud2) + config_mock.side_effect = config_mock_return def vendor_mock_return(): return ('file.yaml', copy.deepcopy(test_shell.PUBLIC_1)) + vendor_mock.side_effect = vendor_mock_return _shell = shell.OpenStackShell() @@ -559,7 +557,6 @@ def vendor_mock_return(): ) # +env, +cli, +occ - print("auth_req: %s" % auth_req['auth']) self.assertEqual( 'zarquon', auth_req['auth']['identity']['password']['user']['name'], diff --git a/openstackclient/tests/unit/network/test_common.py b/openstackclient/tests/unit/network/test_common.py index 4dde1b2bea..cc84c8bdf0 100644 --- a/openstackclient/tests/unit/network/test_common.py +++ b/openstackclient/tests/unit/network/test_common.py @@ -11,7 +11,6 @@ # under the License. # -import argparse from unittest import mock import openstack @@ -49,7 +48,6 @@ def _add_compute_argument(parser): class FakeNetworkAndComputeCommand(common.NetworkAndComputeCommand): - def update_parser_common(self, parser): return _add_common_argument(parser) @@ -67,7 +65,6 @@ def take_action_compute(self, client, parsed_args): class FakeNetworkAndComputeLister(common.NetworkAndComputeLister): - def update_parser_common(self, parser): return _add_common_argument(parser) @@ -85,7 +82,6 @@ def take_action_compute(self, client, parsed_args): class FakeNetworkAndComputeShowOne(common.NetworkAndComputeShowOne): - def update_parser_common(self, parser): return _add_common_argument(parser) @@ -103,11 +99,10 @@ def take_action_compute(self, client, parsed_args): class FakeCreateNeutronCommandWithExtraArgs( - common.NeutronCommandWithExtraArgs): - + common.NeutronCommandWithExtraArgs +): def get_parser(self, prog_name): - parser = super(FakeCreateNeutronCommandWithExtraArgs, - self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--known-attribute', ) @@ -119,210 +114,234 @@ def take_action(self, parsed_args): if 'known_attribute' in parsed_args: attrs['known_attribute'] = parsed_args.known_attribute attrs.update( - self._parse_extra_properties(parsed_args.extra_properties)) + self._parse_extra_properties(parsed_args.extra_properties) + ) client.test_create_action(**attrs) class TestNetworkAndCompute(utils.TestCommand): - def setUp(self): - super(TestNetworkAndCompute, self).setUp() + super().setUp() - self.namespace = argparse.Namespace() + # Create client mocks. Note that we intentionally do not use specced + # mocks since we want to test fake methods. - # Create network client mocks. - self.app.client_manager.network = mock.Mock() - self.network = self.app.client_manager.network - self.network.network_action = mock.Mock( - return_value='take_action_network') + self.app.client_manager.network = mock.Mock() # noqa: O401 + self.network_client = self.app.client_manager.network # noqa: O401 + self.network_client.network_action.return_value = 'take_action_network' - # Create compute client mocks. - self.app.client_manager.compute = mock.Mock() - self.compute = self.app.client_manager.compute - self.compute.compute_action = mock.Mock( - return_value='take_action_compute') + self.app.client_manager.compute = mock.Mock() # noqa: O401 + self.compute_client = self.app.client_manager.compute # noqa: O401 + self.compute_client.compute_action.return_value = 'take_action_compute' - # Subclasses can override the command object to test. - self.cmd = FakeNetworkAndComputeCommand(self.app, self.namespace) + self.cmd = FakeNetworkAndComputeCommand(self.app, None) def test_take_action_network(self): - arglist = [ - 'common', - 'network' - ] - verifylist = [ - ('common', 'common'), - ('network', 'network') - ] + arglist = ['common', 'network'] + verifylist = [('common', 'common'), ('network', 'network')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.network_action.assert_called_with(parsed_args) + self.network_client.network_action.assert_called_with(parsed_args) self.assertEqual('take_action_network', result) def test_take_action_compute(self): - arglist = [ - 'common', - 'compute' - ] - verifylist = [ - ('common', 'common'), - ('compute', 'compute') - ] + arglist = ['common', 'compute'] + verifylist = [('common', 'common'), ('compute', 'compute')] self.app.client_manager.network_endpoint_enabled = False parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.compute.compute_action.assert_called_with(parsed_args) + self.compute_client.compute_action.assert_called_with(parsed_args) self.assertEqual('take_action_compute', result) class TestNetworkAndComputeCommand(TestNetworkAndCompute): - def setUp(self): - super(TestNetworkAndComputeCommand, self).setUp() - self.cmd = FakeNetworkAndComputeCommand(self.app, self.namespace) + super().setUp() + self.cmd = FakeNetworkAndComputeCommand(self.app, None) class TestNetworkAndComputeLister(TestNetworkAndCompute): - def setUp(self): - super(TestNetworkAndComputeLister, self).setUp() - self.cmd = FakeNetworkAndComputeLister(self.app, self.namespace) + super().setUp() + self.cmd = FakeNetworkAndComputeLister(self.app, None) class TestNetworkAndComputeShowOne(TestNetworkAndCompute): - def setUp(self): - super(TestNetworkAndComputeShowOne, self).setUp() - self.cmd = FakeNetworkAndComputeShowOne(self.app, self.namespace) + super().setUp() + self.cmd = FakeNetworkAndComputeShowOne(self.app, None) def test_take_action_with_http_exception(self): with mock.patch.object(self.cmd, 'take_action_network') as m_action: m_action.side_effect = openstack.exceptions.HttpException("bar") - self.assertRaisesRegex(exceptions.CommandError, "bar", - self.cmd.take_action, mock.Mock()) + self.assertRaisesRegex( + exceptions.CommandError, + "bar", + self.cmd.take_action, + mock.Mock(), + ) self.app.client_manager.network_endpoint_enabled = False with mock.patch.object(self.cmd, 'take_action_compute') as m_action: m_action.side_effect = openstack.exceptions.HttpException("bar") - self.assertRaisesRegex(exceptions.CommandError, "bar", - self.cmd.take_action, mock.Mock()) + self.assertRaisesRegex( + exceptions.CommandError, + "bar", + self.cmd.take_action, + mock.Mock(), + ) class TestNeutronCommandWithExtraArgs(utils.TestCommand): - def setUp(self): - super(TestNeutronCommandWithExtraArgs, self).setUp() + super().setUp() - self.namespace = argparse.Namespace() + # Create client mocks. Note that we intentionally do not use specced + # mocks since we want to test fake methods. - self.app.client_manager.network = mock.Mock() - self.network = self.app.client_manager.network - self.network.test_create_action = mock.Mock() + self.app.client_manager.network = mock.Mock() # noqa: O401 + self.network_client = self.app.client_manager.network # noqa: O401 + self.network_client.test_create_action = mock.Mock() # noqa: O402 # Subclasses can override the command object to test. - self.cmd = FakeCreateNeutronCommandWithExtraArgs( - self.app, self.namespace) + self.cmd = FakeCreateNeutronCommandWithExtraArgs(self.app, None) def test_create_extra_attributes_default_type(self): arglist = [ - '--known-attribute', 'known-value', - '--extra-property', 'name=extra_name,value=extra_value' + '--known-attribute', + 'known-value', + '--extra-property', + 'name=extra_name,value=extra_value', ] verifylist = [ ('known_attribute', 'known-value'), - ('extra_properties', [{'name': 'extra_name', - 'value': 'extra_value'}]) + ( + 'extra_properties', + [{'name': 'extra_name', 'value': 'extra_value'}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.test_create_action.assert_called_with( - known_attribute='known-value', extra_name='extra_value') + self.network_client.test_create_action.assert_called_with( + known_attribute='known-value', extra_name='extra_value' + ) def test_create_extra_attributes_string(self): arglist = [ - '--known-attribute', 'known-value', - '--extra-property', 'type=str,name=extra_name,value=extra_value' + '--known-attribute', + 'known-value', + '--extra-property', + 'type=str,name=extra_name,value=extra_value', ] verifylist = [ ('known_attribute', 'known-value'), - ('extra_properties', [{'name': 'extra_name', - 'type': 'str', - 'value': 'extra_value'}]) + ( + 'extra_properties', + [ + { + 'name': 'extra_name', + 'type': 'str', + 'value': 'extra_value', + } + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.test_create_action.assert_called_with( - known_attribute='known-value', extra_name='extra_value') + self.network_client.test_create_action.assert_called_with( + known_attribute='known-value', extra_name='extra_value' + ) def test_create_extra_attributes_bool(self): arglist = [ - '--known-attribute', 'known-value', - '--extra-property', 'type=bool,name=extra_name,value=TrUe' + '--known-attribute', + 'known-value', + '--extra-property', + 'type=bool,name=extra_name,value=TrUe', ] verifylist = [ ('known_attribute', 'known-value'), - ('extra_properties', [{'name': 'extra_name', - 'type': 'bool', - 'value': 'TrUe'}]) + ( + 'extra_properties', + [{'name': 'extra_name', 'type': 'bool', 'value': 'TrUe'}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.test_create_action.assert_called_with( - known_attribute='known-value', extra_name=True) + self.network_client.test_create_action.assert_called_with( + known_attribute='known-value', extra_name=True + ) def test_create_extra_attributes_int(self): arglist = [ - '--known-attribute', 'known-value', - '--extra-property', 'type=int,name=extra_name,value=8' + '--known-attribute', + 'known-value', + '--extra-property', + 'type=int,name=extra_name,value=8', ] verifylist = [ ('known_attribute', 'known-value'), - ('extra_properties', [{'name': 'extra_name', - 'type': 'int', - 'value': '8'}]) + ( + 'extra_properties', + [{'name': 'extra_name', 'type': 'int', 'value': '8'}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.test_create_action.assert_called_with( - known_attribute='known-value', extra_name=8) + self.network_client.test_create_action.assert_called_with( + known_attribute='known-value', extra_name=8 + ) def test_create_extra_attributes_list(self): arglist = [ - '--known-attribute', 'known-value', - '--extra-property', 'type=list,name=extra_name,value=v_1;v_2' + '--known-attribute', + 'known-value', + '--extra-property', + 'type=list,name=extra_name,value=v_1;v_2', ] verifylist = [ ('known_attribute', 'known-value'), - ('extra_properties', [{'name': 'extra_name', - 'type': 'list', - 'value': 'v_1;v_2'}]) + ( + 'extra_properties', + [{'name': 'extra_name', 'type': 'list', 'value': 'v_1;v_2'}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.test_create_action.assert_called_with( - known_attribute='known-value', extra_name=['v_1', 'v_2']) + self.network_client.test_create_action.assert_called_with( + known_attribute='known-value', extra_name=['v_1', 'v_2'] + ) def test_create_extra_attributes_dict(self): arglist = [ - '--known-attribute', 'known-value', - '--extra-property', 'type=dict,name=extra_name,value=n1:v1;n2:v2' + '--known-attribute', + 'known-value', + '--extra-property', + 'type=dict,name=extra_name,value=n1:v1;n2:v2', ] verifylist = [ ('known_attribute', 'known-value'), - ('extra_properties', [{'name': 'extra_name', - 'type': 'dict', - 'value': 'n1:v1;n2:v2'}]) + ( + 'extra_properties', + [ + { + 'name': 'extra_name', + 'type': 'dict', + 'value': 'n1:v1;n2:v2', + } + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.test_create_action.assert_called_with( - known_attribute='known-value', - extra_name={'n1': 'v1', 'n2': 'v2'}) + self.network_client.test_create_action.assert_called_with( + known_attribute='known-value', extra_name={'n1': 'v1', 'n2': 'v2'} + ) diff --git a/openstackclient/tests/unit/network/test_utils.py b/openstackclient/tests/unit/network/test_utils.py index 6252d7f766..bc3bc127d8 100644 --- a/openstackclient/tests/unit/network/test_utils.py +++ b/openstackclient/tests/unit/network/test_utils.py @@ -18,7 +18,6 @@ class TestUtils(tests_utils.TestCase): - def test_str2bool(self): self.assertTrue(utils.str2bool("true")) self.assertTrue(utils.str2bool("True")) @@ -35,25 +34,19 @@ def test_str2bool(self): self.assertIsNone(utils.str2bool(None)) def test_str2list(self): - self.assertEqual( - ['a', 'b', 'c'], utils.str2list("a;b;c")) - self.assertEqual( - ['abc'], utils.str2list("abc")) + self.assertEqual(['a', 'b', 'c'], utils.str2list("a;b;c")) + self.assertEqual(['abc'], utils.str2list("abc")) self.assertEqual([], utils.str2list("")) self.assertEqual([], utils.str2list(None)) def test_str2dict(self): + self.assertEqual({'a': 'aaa', 'b': '2'}, utils.str2dict('a:aaa;b:2')) self.assertEqual( - {'a': 'aaa', 'b': '2'}, - utils.str2dict('a:aaa;b:2')) - self.assertEqual( - {'a': 'aaa;b;c', 'd': 'ddd'}, - utils.str2dict('a:aaa;b;c;d:ddd')) + {'a': 'aaa;b;c', 'd': 'ddd'}, utils.str2dict('a:aaa;b;c;d:ddd') + ) self.assertEqual({}, utils.str2dict("")) self.assertEqual({}, utils.str2dict(None)) - self.assertRaises( - exceptions.CommandError, - utils.str2dict, "aaa;b:2") + self.assertRaises(exceptions.CommandError, utils.str2dict, "aaa;b:2") diff --git a/openstackclient/tests/unit/network/v2/fakes.py b/openstackclient/tests/unit/network/v2/fakes.py index 6d92200866..16bbfa5ea3 100644 --- a/openstackclient/tests/unit/network/v2/fakes.py +++ b/openstackclient/tests/unit/network/v2/fakes.py @@ -9,20 +9,20 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse import copy from random import choice from random import randint from unittest import mock import uuid +from openstack.network.v2 import _proxy from openstack.network.v2 import address_group as _address_group from openstack.network.v2 import address_scope as _address_scope from openstack.network.v2 import agent as network_agent from openstack.network.v2 import auto_allocated_topology as allocated_topology from openstack.network.v2 import availability_zone as _availability_zone +from openstack.network.v2 import extension as _extension from openstack.network.v2 import flavor as _flavor from openstack.network.v2 import local_ip as _local_ip from openstack.network.v2 import local_ip_association as _local_ip_association @@ -31,584 +31,110 @@ from openstack.network.v2 import network_ip_availability as _ip_availability from openstack.network.v2 import network_segment_range as _segment_range from openstack.network.v2 import port as _port +from openstack.network.v2 import ( + qos_bandwidth_limit_rule as _qos_bandwidth_limit_rule, +) +from openstack.network.v2 import ( + qos_dscp_marking_rule as _qos_dscp_marking_rule, +) +from openstack.network.v2 import ( + qos_minimum_bandwidth_rule as _qos_minimum_bandwidth_rule, +) +from openstack.network.v2 import ( + qos_minimum_packet_rate_rule as _qos_minimum_packet_rate_rule, +) +from openstack.network.v2 import qos_policy as _qos_policy +from openstack.network.v2 import qos_rule_type as _qos_rule_type from openstack.network.v2 import rbac_policy as network_rbac +from openstack.network.v2 import router as _router +from openstack.network.v2 import security_group as _security_group +from openstack.network.v2 import security_group_rule as _security_group_rule from openstack.network.v2 import segment as _segment -from openstack.network.v2 import service_profile as _flavor_profile +from openstack.network.v2 import service_profile as _service_profile from openstack.network.v2 import trunk as _trunk from openstackclient.tests.unit import fakes -from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3 +from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit import utils -QUOTA = { - "subnet": 10, - "network": 10, - "floatingip": 50, - "subnetpool": -1, - "security_group_rule": 100, - "security_group": 10, - "router": 10, - "rbac_policy": -1, - "port": 50, - "vip": 10, - "member": 10, - "healthmonitor": 10, - "l7policy": 5, -} - RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth-limit' RULE_TYPE_DSCP_MARKING = 'dscp-marking' RULE_TYPE_MINIMUM_BANDWIDTH = 'minimum-bandwidth' RULE_TYPE_MINIMUM_PACKET_RATE = 'minimum-packet-rate' -VALID_QOS_RULES = [RULE_TYPE_BANDWIDTH_LIMIT, - RULE_TYPE_DSCP_MARKING, - RULE_TYPE_MINIMUM_BANDWIDTH, - RULE_TYPE_MINIMUM_PACKET_RATE] -VALID_DSCP_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, - 34, 36, 38, 40, 46, 48, 56] - - -class FakeNetworkV2Client(object): - - def __init__(self, **kwargs): - self.session = mock.Mock() - self.extensions = mock.Mock() - self.extensions.resource_class = fakes.FakeResource(None, {}) - - -class TestNetworkV2(utils.TestCommand): - +VALID_QOS_RULES = [ + RULE_TYPE_BANDWIDTH_LIMIT, + RULE_TYPE_DSCP_MARKING, + RULE_TYPE_MINIMUM_BANDWIDTH, + RULE_TYPE_MINIMUM_PACKET_RATE, +] +VALID_DSCP_MARKS = [ + 0, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 46, + 48, + 56, +] + + +class FakeClientMixin: def setUp(self): - super(TestNetworkV2, self).setUp() - - self.namespace = argparse.Namespace() - - self.app.client_manager.session = mock.Mock() - - self.app.client_manager.network = FakeNetworkV2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - - self.app.client_manager.sdk_connection = mock.Mock() - self.app.client_manager.sdk_connection.network = \ - self.app.client_manager.network - - self.app.client_manager.identity = ( - identity_fakes_v3.FakeIdentityv3Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - ) - - -class FakeExtension(object): - """Fake one or more extension.""" - - @staticmethod - def create_one_extension(attrs=None): - """Create a fake extension. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, namespace, etc. - """ - attrs = attrs or {} - - # Set default attributes. - extension_info = { - 'name': 'name-' + uuid.uuid4().hex, - 'namespace': 'http://docs.openstack.org/network/', - 'description': 'description-' + uuid.uuid4().hex, - 'updated': '2013-07-09T12:00:0-00:00', - 'alias': 'Dystopian', - 'links': '[{"href":''"https://github.com/os/network", "type"}]', - } - - # Overwrite default attributes. - extension_info.update(attrs) - - extension = fakes.FakeResource( - info=copy.deepcopy(extension_info), - loaded=True) - return extension - - -class FakeNetworkQosPolicy(object): - """Fake one or more QoS policies.""" - - @staticmethod - def create_one_qos_policy(attrs=None): - """Create a fake QoS policy. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, id, etc. - """ - attrs = attrs or {} - qos_id = attrs.get('id') or 'qos-policy-id-' + uuid.uuid4().hex - rule_attrs = {'qos_policy_id': qos_id} - rules = [FakeNetworkQosRule.create_one_qos_rule(rule_attrs)] - - # Set default attributes. - qos_policy_attrs = { - 'name': 'qos-policy-name-' + uuid.uuid4().hex, - 'id': qos_id, - 'is_default': False, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'shared': False, - 'description': 'qos-policy-description-' + uuid.uuid4().hex, - 'rules': rules, - 'location': 'MUNCHMUNCHMUNCH', - } - - # Overwrite default attributes. - qos_policy_attrs.update(attrs) - - qos_policy = fakes.FakeResource( - info=copy.deepcopy(qos_policy_attrs), - loaded=True) - - # Set attributes with special mapping in OpenStack SDK. - qos_policy.is_shared = qos_policy_attrs['shared'] - - return qos_policy - - @staticmethod - def create_qos_policies(attrs=None, count=2): - """Create multiple fake QoS policies. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of QoS policies to fake - :return: - A list of FakeResource objects faking the QoS policies - """ - qos_policies = [] - for i in range(0, count): - qos_policies.append( - FakeNetworkQosPolicy.create_one_qos_policy(attrs)) - - return qos_policies - - @staticmethod - def get_qos_policies(qos_policies=None, count=2): - """Get an iterable MagicMock object with a list of faked QoS policies. - - If qos policies list is provided, then initialize the Mock object - with the list. Otherwise create one. - - :param List qos_policies: - A list of FakeResource objects faking qos policies - :param int count: - The number of QoS policies to fake - :return: - An iterable Mock object with side_effect set to a list of faked - QoS policies - """ - if qos_policies is None: - qos_policies = FakeNetworkQosPolicy.create_qos_policies(count) - return mock.Mock(side_effect=qos_policies) - - -class FakeNetworkSecGroup(object): - """Fake one security group.""" - - @staticmethod - def create_one_security_group(attrs=None): - """Create a fake security group. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, id, etc. - """ - attrs = attrs or {} - sg_id = attrs.get('id') or 'security-group-id-' + uuid.uuid4().hex - - # Set default attributes. - security_group_attrs = { - 'name': 'security-group-name-' + uuid.uuid4().hex, - 'id': sg_id, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'description': 'security-group-description-' + uuid.uuid4().hex, - 'location': 'MUNCHMUNCHMUNCH', - } - - security_group = fakes.FakeResource( - info=copy.deepcopy(security_group_attrs), - loaded=True) - - return security_group - - -class FakeNetworkQosRule(object): - """Fake one or more Network QoS rules.""" - - @staticmethod - def create_one_qos_rule(attrs=None): - """Create a fake Network QoS rule. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, id, etc. - """ - attrs = attrs or {} - - # Set default attributes. - type = attrs.get('type') or choice(VALID_QOS_RULES) - qos_rule_attrs = { - 'id': 'qos-rule-id-' + uuid.uuid4().hex, - 'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'type': type, - 'location': 'MUNCHMUNCHMUNCH', - } - if type == RULE_TYPE_BANDWIDTH_LIMIT: - qos_rule_attrs['max_kbps'] = randint(1, 10000) - qos_rule_attrs['max_burst_kbits'] = randint(1, 10000) - qos_rule_attrs['direction'] = 'egress' - elif type == RULE_TYPE_DSCP_MARKING: - qos_rule_attrs['dscp_mark'] = choice(VALID_DSCP_MARKS) - elif type == RULE_TYPE_MINIMUM_BANDWIDTH: - qos_rule_attrs['min_kbps'] = randint(1, 10000) - qos_rule_attrs['direction'] = 'egress' - elif type == RULE_TYPE_MINIMUM_PACKET_RATE: - qos_rule_attrs['min_kpps'] = randint(1, 10000) - qos_rule_attrs['direction'] = 'egress' - - # Overwrite default attributes. - qos_rule_attrs.update(attrs) - - qos_rule = fakes.FakeResource(info=copy.deepcopy(qos_rule_attrs), - loaded=True) - - return qos_rule - - @staticmethod - def create_qos_rules(attrs=None, count=2): - """Create multiple fake Network QoS rules. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of Network QoS rule to fake - :return: - A list of FakeResource objects faking the Network QoS rules - """ - qos_rules = [] - for i in range(0, count): - qos_rules.append(FakeNetworkQosRule.create_one_qos_rule(attrs)) - return qos_rules - - @staticmethod - def get_qos_rules(qos_rules=None, count=2): - """Get a list of faked Network QoS rules. - - If Network QoS rules list is provided, then initialize the Mock - object with the list. Otherwise create one. - - :param List qos_rules: - A list of FakeResource objects faking Network QoS rules - :param int count: - The number of QoS minimum bandwidth rules to fake - :return: - An iterable Mock object with side_effect set to a list of faked - qos minimum bandwidth rules - """ - if qos_rules is None: - qos_rules = (FakeNetworkQosRule.create_qos_rules(count)) - return mock.Mock(side_effect=qos_rules) - - -class FakeNetworkQosRuleType(object): - """Fake one or more Network QoS rule types.""" - - @staticmethod - def create_one_qos_rule_type(attrs=None): - """Create a fake Network QoS rule type. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, id, etc. - """ - attrs = attrs or {} - - # Set default attributes. - qos_rule_type_attrs = { - 'type': 'rule-type-' + uuid.uuid4().hex, - 'location': 'MUNCHMUNCHMUNCH', - } - - # Overwrite default attributes. - qos_rule_type_attrs.update(attrs) - - return fakes.FakeResource( - info=copy.deepcopy(qos_rule_type_attrs), - loaded=True) - - @staticmethod - def create_qos_rule_types(attrs=None, count=2): - """Create multiple fake Network QoS rule types. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of QoS rule types to fake - :return: - A list of FakeResource objects faking the QoS rule types - """ - qos_rule_types = [] - for i in range(0, count): - qos_rule_types.append( - FakeNetworkQosRuleType.create_one_qos_rule_type(attrs)) - - return qos_rule_types - - -class FakeRouter(object): - """Fake one or more routers.""" - - @staticmethod - def create_one_router(attrs=None): - """Create a fake router. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, name, admin_state_up, - status, project_id - """ - attrs = attrs or {} - - # Set default attributes. - router_attrs = { - 'id': 'router-id-' + uuid.uuid4().hex, - 'name': 'router-name-' + uuid.uuid4().hex, - 'status': 'ACTIVE', - 'admin_state_up': True, - 'description': 'router-description-' + uuid.uuid4().hex, - 'distributed': False, - 'ha': False, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'routes': [], - 'external_gateway_info': {}, - 'availability_zone_hints': [], - 'availability_zones': [], - 'tags': [], - 'location': 'MUNCHMUNCHMUNCH', - } - - # Overwrite default attributes. - router_attrs.update(attrs) - - router = fakes.FakeResource(info=copy.deepcopy(router_attrs), - loaded=True) - - # Set attributes with special mapping in OpenStack SDK. - router.is_admin_state_up = router_attrs['admin_state_up'] - router.is_distributed = router_attrs['distributed'] - router.is_ha = router_attrs['ha'] - - return router - - @staticmethod - def create_routers(attrs=None, count=2): - """Create multiple fake routers. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of routers to fake - :return: - A list of FakeResource objects faking the routers - """ - routers = [] - for i in range(0, count): - routers.append(FakeRouter.create_one_router(attrs)) - - return routers - - @staticmethod - def get_routers(routers=None, count=2): - """Get an iterable Mock object with a list of faked routers. - - If routers list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List routers: - A list of FakeResource objects faking routers - :param int count: - The number of routers to fake - :return: - An iterable Mock object with side_effect set to a list of faked - routers - """ - if routers is None: - routers = FakeRouter.create_routers(count) - return mock.Mock(side_effect=routers) - - -class FakeSecurityGroup(object): - """Fake one or more security groups.""" - - @staticmethod - def create_one_security_group(attrs=None): - """Create a fake security group. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, name, etc. - """ - attrs = attrs or {} - - # Set default attributes. - security_group_attrs = { - 'id': 'security-group-id-' + uuid.uuid4().hex, - 'name': 'security-group-name-' + uuid.uuid4().hex, - 'description': 'security-group-description-' + uuid.uuid4().hex, - 'stateful': True, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'security_group_rules': [], - 'tags': [], - 'location': 'MUNCHMUNCHMUNCH', - } - - # Overwrite default attributes. - security_group_attrs.update(attrs) - - security_group = fakes.FakeResource( - info=copy.deepcopy(security_group_attrs), - loaded=True) - - return security_group - - @staticmethod - def create_security_groups(attrs=None, count=2): - """Create multiple fake security groups. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of security groups to fake - :return: - A list of FakeResource objects faking the security groups - """ - security_groups = [] - for i in range(0, count): - security_groups.append( - FakeSecurityGroup.create_one_security_group(attrs)) - - return security_groups - - @staticmethod - def get_security_groups(security_groups=None, count=2): - """Get an iterable Mock object with a list of faked security groups. - - If security groups list is provided, then initialize the Mock object - with the list. Otherwise create one. - - :param List security_groups: - A list of FakeResource objects faking security groups - :param int count: - The number of security groups to fake - :return: - An iterable Mock object with side_effect set to a list of faked - security groups - """ - if security_groups is None: - security_groups = FakeSecurityGroup.create_security_groups(count) - return mock.Mock(side_effect=security_groups) + super().setUp() + self.app.client_manager.network = mock.Mock(spec=_proxy.Proxy) + self.network_client = self.app.client_manager.network -class FakeSecurityGroupRule(object): - """Fake one or more security group rules.""" - @staticmethod - def create_one_security_group_rule(attrs=None): - """Create a fake security group rule. +class TestNetworkV2( + identity_fakes.FakeClientMixin, + FakeClientMixin, + utils.TestCommand, +): ... - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object, with id, etc. - """ - attrs = attrs or {} - # Set default attributes. - security_group_rule_attrs = { - 'description': 'security-group-rule-description-' + - uuid.uuid4().hex, - 'direction': 'ingress', - 'ether_type': 'IPv4', - 'id': 'security-group-rule-id-' + uuid.uuid4().hex, - 'port_range_max': None, - 'port_range_min': None, - 'protocol': None, - 'remote_group_id': None, - 'remote_address_group_id': None, - 'remote_ip_prefix': '0.0.0.0/0', - 'security_group_id': 'security-group-id-' + uuid.uuid4().hex, - 'project_id': 'project-id-' + uuid.uuid4().hex, - 'location': 'MUNCHMUNCHMUNCH', - } +def create_one_extension(attrs=None): + """Create a fake extension. - # Overwrite default attributes. - security_group_rule_attrs.update(attrs) - - security_group_rule = fakes.FakeResource( - info=copy.deepcopy(security_group_rule_attrs), - loaded=True) - - return security_group_rule - - @staticmethod - def create_security_group_rules(attrs=None, count=2): - """Create multiple fake security group rules. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of security group rules to fake - :return: - A list of FakeResource objects faking the security group rules - """ - security_group_rules = [] - for i in range(0, count): - security_group_rules.append( - FakeSecurityGroupRule.create_one_security_group_rule(attrs)) - - return security_group_rules + :param Dictionary attrs: + A dictionary with all attributes + :return: + An Extension object with name, namespace, etc. + """ + attrs = attrs or {} - @staticmethod - def get_security_group_rules(security_group_rules=None, count=2): - """Get an iterable Mock with a list of faked security group rules. + # Set default attributes. + extension_info = { + 'name': 'name-' + uuid.uuid4().hex, + 'description': 'description-' + uuid.uuid4().hex, + 'alias': 'Dystopian', + 'links': [], + 'updated_at': '2013-07-09T12:00:0-00:00', + } - If security group rules list is provided, then initialize the Mock - object with the list. Otherwise create one. + # Overwrite default attributes. + extension_info.update(attrs) - :param List security_group_rules: - A list of FakeResource objects faking security group rules - :param int count: - The number of security group rules to fake - :return: - An iterable Mock object with side_effect set to a list of faked - security group rules - """ - if security_group_rules is None: - security_group_rules = ( - FakeSecurityGroupRule.create_security_group_rules(count)) - return mock.Mock(side_effect=security_group_rules) + extension = _extension.Extension(**extension_info) + return extension -class FakeSubnet(object): +class FakeSubnet: """Fake one or more subnets.""" @staticmethod @@ -649,8 +175,9 @@ def create_one_subnet(attrs=None): # Overwrite default attributes. subnet_attrs.update(attrs) - subnet = fakes.FakeResource(info=copy.deepcopy(subnet_attrs), - loaded=True) + subnet = fakes.FakeResource( + info=copy.deepcopy(subnet_attrs), loaded=True + ) # Set attributes with special mappings in OpenStack SDK. subnet.is_dhcp_enabled = subnet_attrs['enable_dhcp'] @@ -695,7 +222,7 @@ def get_subnets(subnets=None, count=2): return mock.Mock(side_effect=subnets) -class FakeFloatingIP(object): +class FakeFloatingIP: """Fake one or more floating ip.""" @staticmethod @@ -731,8 +258,7 @@ def create_one_floating_ip(attrs=None): floating_ip_attrs.update(attrs) floating_ip = fakes.FakeResource( - info=copy.deepcopy(floating_ip_attrs), - loaded=True + info=copy.deepcopy(floating_ip_attrs), loaded=True ) return floating_ip @@ -773,7 +299,7 @@ def get_floating_ips(floating_ips=None, count=2): return mock.Mock(side_effect=floating_ips) -class FakeNetworkMeter(object): +class FakeNetworkMeter: """Fake network meter""" @staticmethod @@ -793,8 +319,8 @@ def create_one_meter(attrs=None): meter_attrs.update(attrs) meter = fakes.FakeResource( - info=copy.deepcopy(meter_attrs), - loaded=True) + info=copy.deepcopy(meter_attrs), loaded=True + ) return meter @@ -804,20 +330,18 @@ def create_meter(attrs=None, count=2): meters = [] for i in range(0, count): - meters.append(FakeNetworkMeter. - create_one_meter(attrs)) + meters.append(FakeNetworkMeter.create_one_meter(attrs)) return meters @staticmethod def get_meter(meter=None, count=2): """Get a list of meters""" if meter is None: - meter = (FakeNetworkMeter. - create_meter(count)) + meter = FakeNetworkMeter.create_meter(count) return mock.Mock(side_effect=meter) -class FakeNetworkMeterRule(object): +class FakeNetworkMeterRule: """Fake metering rule""" @staticmethod @@ -840,8 +364,8 @@ def create_one_rule(attrs=None): meter_rule_attrs.update(attrs) meter_rule = fakes.FakeResource( - info=copy.deepcopy(meter_rule_attrs), - loaded=True) + info=copy.deepcopy(meter_rule_attrs), loaded=True + ) return meter_rule @@ -851,20 +375,18 @@ def create_meter_rule(attrs=None, count=2): meter_rules = [] for i in range(0, count): - meter_rules.append(FakeNetworkMeterRule. - create_one_rule(attrs)) + meter_rules.append(FakeNetworkMeterRule.create_one_rule(attrs)) return meter_rules @staticmethod def get_meter_rule(meter_rule=None, count=2): """Get a list of meter rules""" if meter_rule is None: - meter_rule = (FakeNetworkMeterRule. - create_meter_rule(count)) + meter_rule = FakeNetworkMeterRule.create_meter_rule(count) return mock.Mock(side_effect=meter_rule) -class FakeSubnetPool(object): +class FakeSubnetPool: """Fake one or more subnet pools.""" @staticmethod @@ -901,13 +423,13 @@ def create_one_subnet_pool(attrs=None): subnet_pool_attrs.update(attrs) subnet_pool = fakes.FakeResource( - info=copy.deepcopy(subnet_pool_attrs), - loaded=True + info=copy.deepcopy(subnet_pool_attrs), loaded=True ) # Set attributes with special mapping in OpenStack SDK. - subnet_pool.default_prefix_length = \ - subnet_pool_attrs['default_prefixlen'] + subnet_pool.default_prefix_length = subnet_pool_attrs[ + 'default_prefixlen' + ] subnet_pool.is_shared = subnet_pool_attrs['shared'] subnet_pool.maximum_prefix_length = subnet_pool_attrs['max_prefixlen'] subnet_pool.minimum_prefix_length = subnet_pool_attrs['min_prefixlen'] @@ -927,9 +449,7 @@ def create_subnet_pools(attrs=None, count=2): """ subnet_pools = [] for i in range(0, count): - subnet_pools.append( - FakeSubnetPool.create_one_subnet_pool(attrs) - ) + subnet_pools.append(FakeSubnetPool.create_one_subnet_pool(attrs)) return subnet_pools @@ -953,7 +473,7 @@ def get_subnet_pools(subnet_pools=None, count=2): return mock.Mock(side_effect=subnet_pools) -class FakeNetworkServiceProvider(object): +class FakeNetworkServiceProvider: """Fake Network Service Providers""" @staticmethod @@ -966,104 +486,33 @@ def create_one_network_service_provider(attrs=None): 'service_type': 'service-type-' + uuid.uuid4().hex, 'default': False, 'location': 'MUNCHMUNCHMUNCH', - } - - service_provider.update(attrs) - - provider = fakes.FakeResource( - info=copy.deepcopy(service_provider), - loaded=True) - provider.is_default = service_provider['default'] - - return provider - - @staticmethod - def create_network_service_providers(attrs=None, count=2): - """Create multiple service providers""" - - service_providers = [] - for i in range(0, count): - service_providers.append(FakeNetworkServiceProvider. - create_one_network_service_provider( - attrs)) - return service_providers - - -class FakeQuota(object): - """Fake quota""" - - @staticmethod - def create_one_net_quota(attrs=None): - """Create one quota""" - attrs = attrs or {} - - quota_attrs = { - 'floating_ips': 20, - 'networks': 25, - 'ports': 11, - 'rbac_policies': 15, - 'routers': 40, - 'security_groups': 10, - 'security_group_rules': 100, - 'subnets': 20, - 'subnet_pools': 30} - - quota_attrs.update(attrs) - - quota = fakes.FakeResource( - info=copy.deepcopy(quota_attrs), - loaded=True) - return quota - - @staticmethod - def create_one_default_net_quota(attrs=None): - """Create one quota""" - attrs = attrs or {} + } - quota_attrs = { - 'floatingip': 30, - 'network': 20, - 'port': 10, - 'rbac_policy': 25, - 'router': 30, - 'security_group': 30, - 'security_group_rule': 200, - 'subnet': 10, - 'subnetpool': 20} + service_provider.update(attrs) - quota_attrs.update(attrs) + provider = fakes.FakeResource( + info=copy.deepcopy(service_provider), loaded=True + ) + provider.is_default = service_provider['default'] - quota = fakes.FakeResource( - info=copy.deepcopy(quota_attrs), - loaded=True) - return quota + return provider @staticmethod - def create_one_net_detailed_quota(attrs=None): - """Create one quota""" - attrs = attrs or {} - - quota_attrs = { - 'floating_ips': {'used': 0, 'reserved': 0, 'limit': 20}, - 'networks': {'used': 0, 'reserved': 0, 'limit': 25}, - 'ports': {'used': 0, 'reserved': 0, 'limit': 11}, - 'rbac_policies': {'used': 0, 'reserved': 0, 'limit': 15}, - 'routers': {'used': 0, 'reserved': 0, 'limit': 40}, - 'security_groups': {'used': 0, 'reserved': 0, 'limit': 10}, - 'security_group_rules': {'used': 0, 'reserved': 0, 'limit': 100}, - 'subnets': {'used': 0, 'reserved': 0, 'limit': 20}, - 'subnet_pools': {'used': 0, 'reserved': 0, 'limit': 30}} - - quota_attrs.update(attrs) + def create_network_service_providers(attrs=None, count=2): + """Create multiple service providers""" - quota = fakes.FakeResource( - info=copy.deepcopy(quota_attrs), - loaded=True) - return quota + service_providers = [] + for i in range(0, count): + service_providers.append( + FakeNetworkServiceProvider.create_one_network_service_provider( + attrs + ) + ) + return service_providers -class FakeFloatingIPPortForwarding(object): - """"Fake one or more Port forwarding""" +class FakeFloatingIPPortForwarding: + """Fake one or more Port forwarding""" @staticmethod def create_one_port_forwarding(attrs=None, use_range=False): @@ -1113,8 +562,7 @@ def create_one_port_forwarding(attrs=None, use_range=False): port_forwarding_attrs.update(attrs) port_forwarding = fakes.FakeResource( - info=copy.deepcopy(port_forwarding_attrs), - loaded=True + info=copy.deepcopy(port_forwarding_attrs), loaded=True ) return port_forwarding @@ -1122,20 +570,21 @@ def create_one_port_forwarding(attrs=None, use_range=False): def create_port_forwardings(attrs=None, count=2, use_range=False): """Create multiple fake Port Forwarding. - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of Port Forwarding rule to fake - :param Boolean use_range: - A boolean which defines if we will use ranges or not - :return: - A list of FakeResource objects faking the Port Forwardings - """ + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of Port Forwarding rule to fake + :param Boolean use_range: + A boolean which defines if we will use ranges or not + :return: + A list of FakeResource objects faking the Port Forwardings + """ port_forwardings = [] for i in range(0, count): port_forwardings.append( FakeFloatingIPPortForwarding.create_one_port_forwarding( - attrs, use_range=use_range) + attrs, use_range=use_range + ) ) return port_forwardings @@ -1159,14 +608,15 @@ def get_port_forwardings(port_forwardings=None, count=2, use_range=False): if port_forwardings is None: port_forwardings = ( FakeFloatingIPPortForwarding.create_port_forwardings( - count, use_range=use_range) + count, use_range=use_range + ) ) return mock.Mock(side_effect=port_forwardings) -class FakeL3ConntrackHelper(object): - """"Fake one or more L3 conntrack helper""" +class FakeL3ConntrackHelper: + """Fake one or more L3 conntrack helper""" @staticmethod def create_one_l3_conntrack_helper(attrs=None): @@ -1178,9 +628,7 @@ def create_one_l3_conntrack_helper(attrs=None): A FakeResource object with protocol, port, etc. """ attrs = attrs or {} - router_id = ( - attrs.get('router_id') or 'router-id-' + uuid.uuid4().hex - ) + router_id = attrs.get('router_id') or 'router-id-' + uuid.uuid4().hex # Set default attributes. ct_attrs = { 'id': uuid.uuid4().hex, @@ -1194,10 +642,7 @@ def create_one_l3_conntrack_helper(attrs=None): # Overwrite default attributes. ct_attrs.update(attrs) - ct = fakes.FakeResource( - info=copy.deepcopy(ct_attrs), - loaded=True - ) + ct = fakes.FakeResource(info=copy.deepcopy(ct_attrs), loaded=True) return ct @staticmethod @@ -1234,8 +679,8 @@ def get_l3_conntrack_helpers(ct_helpers=None, count=2): L3 conntrack helpers """ if ct_helpers is None: - ct_helpers = ( - FakeL3ConntrackHelper.create_l3_conntrack_helpers(count) + ct_helpers = FakeL3ConntrackHelper.create_l3_conntrack_helpers( + count ) return mock.Mock(side_effect=ct_helpers) @@ -1456,7 +901,8 @@ def create_one_ip_availability(attrs=None): network_ip_attrs.update(attrs) network_ip_availability = _ip_availability.NetworkIPAvailability( - **network_ip_attrs) + **network_ip_attrs + ) return network_ip_availability @@ -1510,6 +956,7 @@ def create_one_network(attrs=None): 'availability_zone_hints': [], 'is_default': False, 'is_vlan_transparent': True, + 'is_vlan_qinq': False, 'port_security_enabled': True, 'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex, 'ipv4_address_scope': 'ipv4' + uuid.uuid4().hex, @@ -1684,8 +1131,10 @@ def create_one_network_segment_range(attrs=None): 'physical_network': 'physical-network-name-' + fake_uuid, 'minimum': 100, 'maximum': 106, - 'used': {104: '3312e4ba67864b2eb53f3f41432f8efc', - 106: '3312e4ba67864b2eb53f3f41432f8efc'}, + 'used': { + 104: '3312e4ba67864b2eb53f3f41432f8efc', + 106: '3312e4ba67864b2eb53f3f41432f8efc', + }, 'available': [100, 101, 102, 103, 105], 'location': 'MUNCHMUNCHMUNCH', } @@ -1693,8 +1142,9 @@ def create_one_network_segment_range(attrs=None): # Overwrite default attributes. network_segment_range_attrs.update(attrs) - network_segment_range = ( - _segment_range.NetworkSegmentRange(**network_segment_range_attrs)) + network_segment_range = _segment_range.NetworkSegmentRange( + **network_segment_range_attrs + ) return network_segment_range @@ -1744,8 +1194,14 @@ def create_one_port(attrs=None): 'dns_domain': 'dns-domain-' + uuid.uuid4().hex, 'dns_name': 'dns-name-' + uuid.uuid4().hex, 'extra_dhcp_opts': [{}], - 'fixed_ips': [{'ip_address': '10.0.0.3', - 'subnet_id': 'subnet-id-' + uuid.uuid4().hex}], + 'fixed_ips': [ + { + 'ip_address': '10.0.0.3', + 'subnet_id': 'subnet-id-' + uuid.uuid4().hex, + } + ], + 'hardware_offload_type': None, + 'hints': {}, 'id': 'port-id-' + uuid.uuid4().hex, 'mac_address': 'fa:16:3e:a9:4e:72', 'name': 'port-name-' + uuid.uuid4().hex, @@ -1758,8 +1214,10 @@ def create_one_port(attrs=None): 'qos_network_policy_id': 'qos-policy-id-' + uuid.uuid4().hex, 'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex, 'tags': [], + 'trusted': None, 'propagate_uplink_status': False, 'location': 'MUNCHMUNCHMUNCH', + 'trunk_details': {}, } # Overwrite default attributes. @@ -1934,11 +1392,153 @@ def get_network_rbacs(rbac_policies=None, count=2): return mock.Mock(side_effect=rbac_policies) +def create_one_security_group(attrs=None): + """Create a fake security group. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A SecurityGroup object, with id, name, etc. + """ + attrs = attrs or {} + + # Set default attributes. + security_group_attrs = { + 'id': 'security-group-id-' + uuid.uuid4().hex, + 'name': 'security-group-name-' + uuid.uuid4().hex, + 'description': 'security-group-description-' + uuid.uuid4().hex, + 'stateful': True, + 'project_id': 'project-id-' + uuid.uuid4().hex, + 'security_group_rules': [], + 'tags': [], + 'location': 'MUNCHMUNCHMUNCH', + 'is_shared': False, + } + + # Overwrite default attributes. + security_group_attrs.update(attrs) + + security_group = _security_group.SecurityGroup(**security_group_attrs) + security_group.tenant_id = None # unset deprecated opts + + return security_group + + +def create_security_groups(attrs=None, count=2): + """Create multiple fake security groups. + + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of security groups to fake + :return: + A list of SecurityGroup objects faking the security groups + """ + security_groups = [] + for i in range(0, count): + security_groups.append(create_one_security_group(attrs)) + + return security_groups + + +def get_security_groups(security_groups=None, count=2): + """Get an iterable Mock object with a list of faked security groups. + + If security groups list is provided, then initialize the Mock object + with the list. Otherwise create one. + + :param List security_groups: + A list of SecurityGroup objects faking security groups + :param int count: + The number of security groups to fake + :return: + An iterable Mock object with side_effect set to a list of faked + security groups + """ + if security_groups is None: + security_groups = create_security_groups(count) + return mock.Mock(side_effect=security_groups) + + +def create_one_security_group_rule(attrs=None): + """Create a fake security group rule. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A FakeResource object, with id, etc. + """ + attrs = attrs or {} + + # Set default attributes. + security_group_rule_attrs = { + 'description': 'security-group-rule-description-' + uuid.uuid4().hex, + 'direction': 'ingress', + 'ether_type': 'IPv4', + 'id': 'security-group-rule-id-' + uuid.uuid4().hex, + 'port_range_max': None, + 'port_range_min': None, + 'protocol': None, + 'remote_group_id': None, + 'remote_address_group_id': None, + 'remote_ip_prefix': '0.0.0.0/0', + 'security_group_id': 'security-group-id-' + uuid.uuid4().hex, + 'project_id': 'project-id-' + uuid.uuid4().hex, + 'location': 'MUNCHMUNCHMUNCH', + } + + # Overwrite default attributes. + security_group_rule_attrs.update(attrs) + + security_group_rule = _security_group_rule.SecurityGroupRule( + **security_group_rule_attrs + ) + security_group_rule.tenant_id = None # unset deprecated opts + + return security_group_rule + + +def create_security_group_rules(attrs=None, count=2): + """Create multiple fake security group rules. + + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of security group rules to fake + :return: + A list of SecurityGroupRule objects faking the security group rules + """ + security_group_rules = [] + for i in range(0, count): + security_group_rules.append(create_one_security_group_rule(attrs)) + + return security_group_rules + + +def get_security_group_rules(security_group_rules=None, count=2): + """Get an iterable Mock with a list of faked security group rules. + + If security group rules list is provided, then initialize the Mock + object with the list. Otherwise create one. + + :param List security_group_rules: + A list of SecurityGroupRule objects faking security group rules + :param int count: + The number of security group rules to fake + :return: + An iterable Mock object with side_effect set to a list of faked + security group rules + """ + if security_group_rules is None: + security_group_rules = create_security_group_rules(count) + return mock.Mock(side_effect=security_group_rules) + + def create_one_service_profile(attrs=None): - """Create flavor profile.""" + """Create service profile.""" attrs = attrs or {} - flavor_profile_attrs = { + service_profile_attrs = { 'id': 'flavor-profile-id' + uuid.uuid4().hex, 'description': 'flavor-profile-description-' + uuid.uuid4().hex, 'project_id': 'project-id-' + uuid.uuid4().hex, @@ -1948,20 +1548,20 @@ def create_one_service_profile(attrs=None): 'location': 'MUNCHMUNCHMUNCH', } - flavor_profile_attrs.update(attrs) + service_profile_attrs.update(attrs) - flavor_profile = _flavor_profile.ServiceProfile(**flavor_profile_attrs) + flavor_profile = _service_profile.ServiceProfile(**service_profile_attrs) return flavor_profile def create_service_profile(attrs=None, count=2): - """Create multiple flavor profiles.""" + """Create multiple service profiles.""" - flavor_profiles = [] + service_profiles = [] for i in range(0, count): - flavor_profiles.append(create_one_service_profile(attrs)) - return flavor_profiles + service_profiles.append(create_one_service_profile(attrs)) + return service_profiles def get_service_profile(flavor_profile=None, count=2): @@ -1972,6 +1572,281 @@ def get_service_profile(flavor_profile=None, count=2): return mock.Mock(side_effect=flavor_profile) +def create_one_qos_policy(attrs=None): + """Create a fake QoS policy. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A QoSPolicy object with name, id, etc. + """ + attrs = attrs or {} + qos_id = attrs.get('id') or 'qos-policy-id-' + uuid.uuid4().hex + rules = [] + + # Set default attributes. + qos_policy_attrs = { + 'name': 'qos-policy-name-' + uuid.uuid4().hex, + 'id': qos_id, + 'is_default': False, + 'project_id': 'project-id-' + uuid.uuid4().hex, + 'shared': False, + 'description': 'qos-policy-description-' + uuid.uuid4().hex, + 'rules': rules, + 'location': 'MUNCHMUNCHMUNCH', + } + + # Overwrite default attributes. + qos_policy_attrs.update(attrs) + + qos_policy = _qos_policy.QoSPolicy(**qos_policy_attrs) + + return qos_policy + + +def create_qos_policies(attrs=None, count=2): + """Create multiple fake QoS policies. + + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of QoS policies to fake + :return: + A list of QoSPolicy objects faking the QoS policies + """ + qos_policies = [] + for i in range(0, count): + qos_policies.append(create_one_qos_policy(attrs)) + + return qos_policies + + +def get_qos_policies(qos_policies=None, count=2): + """Get an iterable MagicMock object with a list of faked QoS policies. + + If qos policies list is provided, then initialize the Mock object + with the list. Otherwise create one. + + :param List qos_policies: + A list of QoSPolicy objects faking qos policies + :param int count: + The number of QoS policies to fake + :return: + An iterable Mock object with side_effect set to a list of faked + QoS policies + """ + if qos_policies is None: + qos_policies = create_qos_policies(count) + return mock.Mock(side_effect=qos_policies) + + +def create_one_qos_rule(attrs=None): + """Create a fake Network QoS rule. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A QoSRule object with id, type, etc. + """ + attrs = attrs or {} + + # Set default attributes. + type = attrs.get('type') or choice(VALID_QOS_RULES) + qos_rule_attrs = { + 'id': 'qos-rule-id-' + uuid.uuid4().hex, + 'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex, + 'project_id': 'project-id-' + uuid.uuid4().hex, + 'type': type, + 'location': 'MUNCHMUNCHMUNCH', + } + + if type == RULE_TYPE_BANDWIDTH_LIMIT: + qos_rule_attrs['max_kbps'] = randint(1, 10000) + qos_rule_attrs['max_burst_kbps'] = randint(1, 10000) + qos_rule_attrs['direction'] = 'egress' + + qos_rule_attrs.update(attrs) + qos_rule = _qos_bandwidth_limit_rule.QoSBandwidthLimitRule( + **qos_rule_attrs + ) + + elif type == RULE_TYPE_DSCP_MARKING: + qos_rule_attrs['dscp_mark'] = choice(VALID_DSCP_MARKS) + + qos_rule_attrs.update(attrs) + qos_rule = _qos_dscp_marking_rule.QoSDSCPMarkingRule(**qos_rule_attrs) + + elif type == RULE_TYPE_MINIMUM_BANDWIDTH: + qos_rule_attrs['min_kbps'] = randint(1, 10000) + qos_rule_attrs['direction'] = 'egress' + + qos_rule_attrs.update(attrs) + + qos_rule = _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule( + **qos_rule_attrs + ) + else: # type == RULE_TYPE_MINIMUM_PACKET_RATE: + qos_rule_attrs['min_kpps'] = randint(1, 10000) + qos_rule_attrs['direction'] = 'egress' + + qos_rule_attrs.update(attrs) + + qos_rule = _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule( + **qos_rule_attrs + ) + + return qos_rule + + +def create_qos_rules(attrs=None, count=2): + """Create multiple fake Network QoS rules. + + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of Network QoS rule to fake + :return: + A list of QoS Rules objects faking the Network QoS rules + """ + qos_rules = [] + for i in range(0, count): + qos_rules.append(create_one_qos_rule(attrs)) + return qos_rules + + +def get_qos_rules(qos_rules=None, count=2): + """Get a list of faked Network QoS rules. + + If Network QoS rules list is provided, then initialize the Mock + object with the list. Otherwise create one. + + :param List qos_rules: + A list of FakeResource objects faking Network QoS rules + :param int count: + The number of QoS minimum bandwidth rules to fake + :return: + An iterable Mock object with side_effect set to a list of faked + qos minimum bandwidth rules + """ + if qos_rules is None: + qos_rules = create_qos_rules(count) + return mock.Mock(side_effect=qos_rules) + + +def create_one_qos_rule_type(attrs=None): + """Create a fake Network QoS rule type. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A QoSRuleType object with name, id, etc. + """ + attrs = attrs or {} + + # Set default attributes. + qos_rule_type_attrs = { + 'type': 'rule-type-' + uuid.uuid4().hex, + 'location': 'MUNCHMUNCHMUNCH', + } + + # Overwrite default attributes. + qos_rule_type_attrs.update(attrs) + qos_rule_type = _qos_rule_type.QoSRuleType(**qos_rule_type_attrs) + + return qos_rule_type + + +def create_qos_rule_types(attrs=None, count=2): + """Create multiple fake Network QoS rule types. + + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of QoS rule types to fake + :return: + A list of QoSRuleType objects faking the QoS rule types + """ + qos_rule_types = [] + for i in range(0, count): + qos_rule_types.append(create_one_qos_rule_type(attrs)) + + return qos_rule_types + + +def create_one_router(attrs=None): + """Create a fake router. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A Router object, with id, name, admin_state_up, + status, project_id + """ + attrs = attrs or {} + + # Set default attributes. + router_attrs = { + 'id': 'router-id-' + uuid.uuid4().hex, + 'name': 'router-name-' + uuid.uuid4().hex, + 'status': 'ACTIVE', + 'is_admin_state_up': True, + 'description': 'router-description-' + uuid.uuid4().hex, + 'distributed': False, + 'ha': False, + 'project_id': 'project-id-' + uuid.uuid4().hex, + 'routes': [], + 'external_gateway_info': {}, + 'availability_zone_hints': [], + 'availability_zones': [], + 'tags': [], + 'location': 'MUNCHMUNCHMUNCH', + } + + # Overwrite default attributes. + router_attrs.update(attrs) + + router = _router.Router(**router_attrs) + router.tenant_id = None # unset deprecated opts + + return router + + +def create_routers(attrs=None, count=2): + """Create multiple fake routers. + + :param Dictionary attrs: + A dictionary with all attributes + :param int count: + The number of routers to fake + :return: + A list of Router objects faking the routers + """ + routers = [] + for i in range(0, count): + routers.append(create_one_router(attrs)) + + return routers + + +def get_routers(routers=None, count=2): + """Get an iterable Mock object with a list of faked routers. + + If routers list is provided, then initialize the Mock object with the + list. Otherwise create one. + + :param List routers: + A list of Router objects faking routers + :param int count: + The number of routers to fake + :return: + An iterable Mock object with side_effect set to a list of faked + routers + """ + if routers is None: + routers = create_routers(count) + return mock.Mock(side_effect=routers) + + def create_one_local_ip(attrs=None): """Create a fake local ip. @@ -2063,9 +1938,9 @@ def create_one_local_ip_association(attrs=None): # Overwrite default attributes. local_ip_association_attrs.update(attrs) - local_ip_association = ( - _local_ip_association.LocalIPAssociation( - **local_ip_association_attrs)) + local_ip_association = _local_ip_association.LocalIPAssociation( + **local_ip_association_attrs + ) return local_ip_association @@ -2116,12 +1991,8 @@ def create_one_ndp_proxy(attrs=None): A FakeResource object with router_id, port_id, etc. """ attrs = attrs or {} - router_id = ( - attrs.get('router_id') or 'router-id-' + uuid.uuid4().hex - ) - port_id = ( - attrs.get('port_id') or 'port-id-' + uuid.uuid4().hex - ) + router_id = attrs.get('router_id') or 'router-id-' + uuid.uuid4().hex + port_id = attrs.get('port_id') or 'port-id-' + uuid.uuid4().hex # Set default attributes. np_attrs = { 'id': uuid.uuid4().hex, @@ -2152,9 +2023,7 @@ def create_ndp_proxies(attrs=None, count=2): """ ndp_proxies = [] for i in range(0, count): - ndp_proxies.append( - create_one_ndp_proxy(attrs) - ) + ndp_proxies.append(create_one_ndp_proxy(attrs)) return ndp_proxies @@ -2173,9 +2042,7 @@ def get_ndp_proxies(ndp_proxies=None, count=2): ndp proxy """ if ndp_proxies is None: - ndp_proxies = ( - create_ndp_proxies(count) - ) + ndp_proxies = create_ndp_proxies(count) return mock.Mock(side_effect=ndp_proxies) @@ -2198,10 +2065,13 @@ def create_one_trunk(attrs=None): 'admin_state_up': True, 'project_id': 'project-id-' + uuid.uuid4().hex, 'status': 'ACTIVE', - 'sub_ports': [{'port_id': 'subport-' + - uuid.uuid4().hex, - 'segmentation_type': 'vlan', - 'segmentation_id': 100}], + 'sub_ports': [ + { + 'port_id': 'subport-' + uuid.uuid4().hex, + 'segmentation_type': 'vlan', + 'segmentation_id': 100, + } + ], } # Overwrite default attributes. trunk_attrs.update(attrs) diff --git a/openstackclient/tests/unit/volume/v1/__init__.py b/openstackclient/tests/unit/network/v2/taas/__init__.py similarity index 100% rename from openstackclient/tests/unit/volume/v1/__init__.py rename to openstackclient/tests/unit/network/v2/taas/__init__.py diff --git a/openstackclient/tests/unit/network/v2/taas/test_osc_tap_flow.py b/openstackclient/tests/unit/network/v2/taas/test_osc_tap_flow.py new file mode 100644 index 0000000000..8e4f185c8d --- /dev/null +++ b/openstackclient/tests/unit/network/v2/taas/test_osc_tap_flow.py @@ -0,0 +1,276 @@ +# All Rights Reserved 2020 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import operator +import uuid + +from openstack.network.v2 import tap_flow as _tap_flow +from openstack.network.v2 import tap_service as _tap_service +from openstack.test import fakes as sdk_fakes +from osc_lib import utils as osc_utils +from osc_lib.utils import columns as column_util + +from openstackclient.network.v2.taas import tap_flow as osc_tap_flow +from openstackclient.network.v2.taas import tap_service as osc_tap_service +from openstackclient.tests.unit.network.v2 import fakes as network_fakes + + +columns_long = tuple( + col + for col, _, listing_mode in osc_tap_flow._attr_map + if listing_mode in (column_util.LIST_BOTH, column_util.LIST_LONG_ONLY) +) +headers_long = tuple( + head + for _, head, listing_mode in osc_tap_flow._attr_map + if listing_mode in (column_util.LIST_BOTH, column_util.LIST_LONG_ONLY) +) +sorted_attr_map = sorted(osc_tap_flow._attr_map, key=operator.itemgetter(1)) +sorted_columns = tuple(col for col, _, _ in sorted_attr_map) +sorted_headers = tuple(head for _, head, _ in sorted_attr_map) + + +def _get_data(attrs, columns=sorted_columns): + return osc_utils.get_dict_properties(attrs, columns) + + +class TestCreateTapFlow(network_fakes.TestNetworkV2): + columns = ( + 'description', + 'direction', + 'id', + 'name', + 'project_id', + 'source_port', + 'status', + 'tap_service_id', + ) + + def setUp(self): + super().setUp() + self.cmd = osc_tap_flow.CreateTapFlow(self.app, None) + + def test_create_tap_flow(self): + """Test Create Tap Flow.""" + fake_tap_service = sdk_fakes.generate_fake_resource( + _tap_service.TapService + ) + port_id = str(uuid.uuid4()) + fake_port = network_fakes.create_one_port(attrs={'id': port_id}) + fake_tap_flow = sdk_fakes.generate_fake_resource( + _tap_flow.TapFlow, + **{ + 'source_port': port_id, + 'tap_service_id': fake_tap_service['id'], + 'direction': 'BOTH', + }, + ) + self.app.client_manager.network.create_tap_flow.return_value = ( + fake_tap_flow + ) + self.app.client_manager.network.find_port.return_value = fake_port + self.app.client_manager.network.find_tap_service.return_value = ( + fake_tap_service + ) + arg_list = [ + '--name', + fake_tap_flow['name'], + '--port', + fake_tap_flow['source_port'], + '--tap-service', + fake_tap_flow['tap_service_id'], + '--direction', + fake_tap_flow['direction'], + ] + + verify_list = [ + ('name', fake_tap_flow['name']), + ('port', fake_tap_flow['source_port']), + ('tap_service', fake_tap_flow['tap_service_id']), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + columns, data = self.cmd.take_action(parsed_args) + mock_create_t_f = self.app.client_manager.network.create_tap_flow + mock_create_t_f.assert_called_once_with( + **{ + 'name': fake_tap_flow['name'], + 'source_port': fake_tap_flow['source_port'], + 'tap_service_id': fake_tap_flow['tap_service_id'], + 'direction': fake_tap_flow['direction'], + } + ) + self.assertEqual(self.columns, columns) + fake_data = _get_data( + fake_tap_flow, osc_tap_service._get_columns(fake_tap_flow)[1] + ) + self.assertEqual(fake_data, data) + + +class TestListTapFlow(network_fakes.TestNetworkV2): + def setUp(self): + super().setUp() + self.cmd = osc_tap_flow.ListTapFlow(self.app, None) + + def test_list_tap_flows(self): + """Test List Tap Flow.""" + fake_tap_flows = list( + sdk_fakes.generate_fake_resources(_tap_flow.TapFlow, count=2) + ) + self.app.client_manager.network.tap_flows.return_value = fake_tap_flows + arg_list = [] + verify_list = [] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + headers, data = self.cmd.take_action(parsed_args) + + self.app.client_manager.network.tap_flows.assert_called_once() + self.assertEqual(headers, list(headers_long)) + self.assertCountEqual( + list(data), + [ + _get_data(fake_tap_flow, columns_long) + for fake_tap_flow in fake_tap_flows + ], + ) + + +class TestDeleteTapFlow(network_fakes.TestNetworkV2): + def setUp(self): + super().setUp() + self.app.client_manager.network.find_tap_flow.side_effect = ( + lambda name_or_id, ignore_missing: _tap_flow.TapFlow(id=name_or_id) + ) + self.cmd = osc_tap_flow.DeleteTapFlow(self.app, None) + + def test_delete_tap_flow(self): + """Test Delete tap flow.""" + + fake_tap_flow = sdk_fakes.generate_fake_resource(_tap_flow.TapFlow) + arg_list = [ + fake_tap_flow['id'], + ] + verify_list = [ + (osc_tap_flow.TAP_FLOW, [fake_tap_flow['id']]), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + result = self.cmd.take_action(parsed_args) + + mock_delete_tap_flow = self.app.client_manager.network.delete_tap_flow + mock_delete_tap_flow.assert_called_once_with(fake_tap_flow['id']) + self.assertIsNone(result) + + +class TestShowTapFlow(network_fakes.TestNetworkV2): + columns = ( + 'description', + 'direction', + 'id', + 'name', + 'project_id', + 'source_port', + 'status', + 'tap_service_id', + ) + + def setUp(self): + super().setUp() + self.app.client_manager.network.find_tap_flow.side_effect = ( + lambda name_or_id, ignore_missing: _tap_flow.TapFlow(id=name_or_id) + ) + self.cmd = osc_tap_flow.ShowTapFlow(self.app, None) + + def test_show_tap_flow(self): + """Test Show tap flow.""" + fake_tap_flow = sdk_fakes.generate_fake_resource(_tap_flow.TapFlow) + self.app.client_manager.network.get_tap_flow.return_value = ( + fake_tap_flow + ) + arg_list = [ + fake_tap_flow['id'], + ] + verify_list = [ + (osc_tap_flow.TAP_FLOW, fake_tap_flow['id']), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + headers, data = self.cmd.take_action(parsed_args) + + self.app.client_manager.network.get_tap_flow.assert_called_once_with( + fake_tap_flow['id'] + ) + self.assertEqual(self.columns, headers) + fake_data = _get_data( + fake_tap_flow, osc_tap_service._get_columns(fake_tap_flow)[1] + ) + self.assertEqual(fake_data, data) + + +class TestUpdateTapFlow(network_fakes.TestNetworkV2): + _new_name = 'new_name' + + # NOTE(mtomaska): The Resource class from which TapFlow inherits from + # returns duplicate `ID and `Name` keys. + columns = ( + 'Direction', + 'ID', + 'ID', + 'Name', + 'Name', + 'Status', + 'Tenant', + 'description', + 'location', + 'project_id', + 'source_port', + 'tap_service_id', + ) + + def setUp(self): + super().setUp() + self.cmd = osc_tap_flow.UpdateTapFlow(self.app, None) + self.app.client_manager.network.find_tap_flow.side_effect = ( + lambda name_or_id, ignore_missing: _tap_flow.TapFlow(id=name_or_id) + ) + + def test_update_tap_flow(self): + """Test update tap service""" + fake_tap_flow = sdk_fakes.generate_fake_resource(_tap_flow.TapFlow) + new_tap_flow = copy.deepcopy(fake_tap_flow) + new_tap_flow['name'] = self._new_name + + self.app.client_manager.network.update_tap_flow.return_value = ( + new_tap_flow + ) + + arg_list = [ + fake_tap_flow['id'], + '--name', + self._new_name, + ] + verify_list = [('name', self._new_name)] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + columns, data = self.cmd.take_action(parsed_args) + attrs = {'name': self._new_name} + + mock_update_t_f = self.app.client_manager.network.update_tap_flow + mock_update_t_f.assert_called_once_with(new_tap_flow['id'], **attrs) + self.assertEqual(self.columns, columns) + self.assertEqual(_get_data(new_tap_flow, self.columns), data) diff --git a/openstackclient/tests/unit/network/v2/taas/test_osc_tap_mirror.py b/openstackclient/tests/unit/network/v2/taas/test_osc_tap_mirror.py new file mode 100644 index 0000000000..10f3251c36 --- /dev/null +++ b/openstackclient/tests/unit/network/v2/taas/test_osc_tap_mirror.py @@ -0,0 +1,288 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import operator +import uuid + +from openstack.network.v2 import tap_mirror +from openstack.test import fakes as sdk_fakes +from osc_lib import utils as osc_utils +from osc_lib.utils import columns as column_util + +from openstackclient.network.v2.taas import tap_mirror as osc_tap_mirror +from openstackclient.tests.unit.network.v2 import fakes as network_fakes + + +columns_long = tuple( + col + for col, _, listing_mode in osc_tap_mirror._attr_map + if listing_mode in (column_util.LIST_BOTH, column_util.LIST_LONG_ONLY) +) +headers_long = tuple( + head + for _, head, listing_mode in osc_tap_mirror._attr_map + if listing_mode in (column_util.LIST_BOTH, column_util.LIST_LONG_ONLY) +) +sorted_attr_map = sorted(osc_tap_mirror._attr_map, key=operator.itemgetter(1)) +sorted_columns = tuple(col for col, _, _ in sorted_attr_map) +sorted_headers = tuple(head for _, head, _ in sorted_attr_map) + + +def _get_data(attrs, columns=sorted_columns): + return osc_utils.get_dict_properties(attrs, columns) + + +class TestCreateTapMirror(network_fakes.TestNetworkV2): + columns = ( + 'description', + 'directions', + 'id', + 'mirror_type', + 'name', + 'port_id', + 'project_id', + 'remote_ip', + ) + + def setUp(self): + super().setUp() + self.cmd = osc_tap_mirror.CreateTapMirror(self.app, None) + + def test_create_tap_mirror(self): + port_id = str(uuid.uuid4()) + fake_port = network_fakes.create_one_port(attrs={'id': port_id}) + fake_tap_mirror = sdk_fakes.generate_fake_resource( + tap_mirror.TapMirror, **{'port_id': port_id, 'directions': 'IN=99'} + ) + self.app.client_manager.network.create_tap_mirror.return_value = ( + fake_tap_mirror + ) + self.app.client_manager.network.find_port.return_value = fake_port + self.app.client_manager.network.find_tap_mirror.side_effect = ( + lambda _, name_or_id: {'id': name_or_id} + ) + arg_list = [ + '--name', + fake_tap_mirror['name'], + '--port', + fake_tap_mirror['port_id'], + '--directions', + fake_tap_mirror['directions'], + '--remote-ip', + fake_tap_mirror['remote_ip'], + '--mirror-type', + fake_tap_mirror['mirror_type'], + ] + + verify_directions = fake_tap_mirror['directions'].split('=') + verify_directions_dict = {verify_directions[0]: verify_directions[1]} + + verify_list = [ + ('name', fake_tap_mirror['name']), + ('port_id', fake_tap_mirror['port_id']), + ('directions', verify_directions_dict), + ('remote_ip', fake_tap_mirror['remote_ip']), + ('mirror_type', fake_tap_mirror['mirror_type']), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + self.app.client_manager.network.find_tap_mirror.return_value = ( + fake_tap_mirror + ) + + columns, data = self.cmd.take_action(parsed_args) + create_tap_m_mock = self.app.client_manager.network.create_tap_mirror + create_tap_m_mock.assert_called_once_with( + **{ + 'name': fake_tap_mirror['name'], + 'port_id': fake_tap_mirror['port_id'], + 'directions': verify_directions_dict, + 'remote_ip': fake_tap_mirror['remote_ip'], + 'mirror_type': fake_tap_mirror['mirror_type'], + } + ) + self.assertEqual(self.columns, columns) + fake_data = _get_data( + fake_tap_mirror, osc_tap_mirror._get_columns(fake_tap_mirror)[1] + ) + self.assertEqual(fake_data, data) + + +class TestListTapMirror(network_fakes.TestNetworkV2): + def setUp(self): + super().setUp() + self.cmd = osc_tap_mirror.ListTapMirror(self.app, None) + + def test_list_tap_mirror(self): + """Test List Tap Mirror.""" + fake_tap_mirrors = list( + sdk_fakes.generate_fake_resources(tap_mirror.TapMirror, count=4) + ) + self.app.client_manager.network.tap_mirrors.return_value = ( + fake_tap_mirrors + ) + + arg_list = [] + verify_list = [] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + headers, data = self.cmd.take_action(parsed_args) + + self.app.client_manager.network.tap_mirrors.assert_called_once() + self.assertEqual(headers, list(headers_long)) + self.assertCountEqual( + list(data), + [ + _get_data(fake_tap_mirror, columns_long) + for fake_tap_mirror in fake_tap_mirrors + ], + ) + + +class TestDeleteTapMirror(network_fakes.TestNetworkV2): + def setUp(self): + super().setUp() + self.app.client_manager.network.find_tap_mirror.side_effect = ( + lambda name_or_id, ignore_missing: tap_mirror.TapMirror( + id=name_or_id + ) + ) + self.cmd = osc_tap_mirror.DeleteTapMirror(self.app, None) + + def test_delete_tap_mirror(self): + """Test Delete Tap Mirror.""" + + fake_tap_mirror = sdk_fakes.generate_fake_resource( + tap_mirror.TapMirror + ) + + arg_list = [ + fake_tap_mirror['id'], + ] + verify_list = [ + (osc_tap_mirror.TAP_MIRROR, [fake_tap_mirror['id']]), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + result = self.cmd.take_action(parsed_args) + + mock_delete_tap_m = self.app.client_manager.network.delete_tap_mirror + mock_delete_tap_m.assert_called_once_with(fake_tap_mirror['id']) + self.assertIsNone(result) + + +class TestShowTapMirror(network_fakes.TestNetworkV2): + columns = ( + 'description', + 'directions', + 'id', + 'mirror_type', + 'name', + 'port_id', + 'project_id', + 'remote_ip', + ) + + def setUp(self): + super().setUp() + self.app.client_manager.network.find_tap_mirror.side_effect = ( + lambda name_or_id, ignore_missing: tap_mirror.TapMirror( + id=name_or_id + ) + ) + self.cmd = osc_tap_mirror.ShowTapMirror(self.app, None) + + def test_show_tap_mirror(self): + """Test Show Tap Mirror.""" + + fake_tap_mirror = sdk_fakes.generate_fake_resource( + tap_mirror.TapMirror + ) + self.app.client_manager.network.get_tap_mirror.return_value = ( + fake_tap_mirror + ) + arg_list = [ + fake_tap_mirror['id'], + ] + verify_list = [ + (osc_tap_mirror.TAP_MIRROR, fake_tap_mirror['id']), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + headers, data = self.cmd.take_action(parsed_args) + + mock_get_tap_m = self.app.client_manager.network.get_tap_mirror + mock_get_tap_m.assert_called_once_with(fake_tap_mirror['id']) + self.assertEqual(self.columns, headers) + fake_data = _get_data( + fake_tap_mirror, osc_tap_mirror._get_columns(fake_tap_mirror)[1] + ) + self.assertEqual(fake_data, data) + + +class TestUpdateTapMirror(network_fakes.TestNetworkV2): + _new_name = 'new_name' + columns = ( + 'description', + 'directions', + 'id', + 'mirror_type', + 'name', + 'port_id', + 'project_id', + 'remote_ip', + ) + + def setUp(self): + super().setUp() + self.cmd = osc_tap_mirror.UpdateTapMirror(self.app, None) + self.app.client_manager.network.find_tap_mirror.side_effect = ( + lambda name_or_id, ignore_missing: tap_mirror.TapMirror( + id=name_or_id + ) + ) + + def test_update_tap_mirror(self): + """Test update Tap Mirror""" + fake_tap_mirror = sdk_fakes.generate_fake_resource( + tap_mirror.TapMirror + ) + new_tap_mirror = copy.deepcopy(fake_tap_mirror) + new_tap_mirror['name'] = self._new_name + + self.app.client_manager.network.update_tap_mirror.return_value = ( + new_tap_mirror + ) + + arg_list = [ + fake_tap_mirror['id'], + '--name', + self._new_name, + ] + verify_list = [('name', self._new_name)] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + columns, data = self.cmd.take_action(parsed_args) + attrs = {'name': self._new_name} + + mock_update_tap_m = self.app.client_manager.network.update_tap_mirror + mock_update_tap_m.assert_called_once_with( + fake_tap_mirror['id'], **attrs + ) + self.assertEqual(self.columns, columns) + fake_data = _get_data( + new_tap_mirror, osc_tap_mirror._get_columns(new_tap_mirror)[1] + ) + self.assertEqual(fake_data, data) diff --git a/openstackclient/tests/unit/network/v2/taas/test_osc_tap_service.py b/openstackclient/tests/unit/network/v2/taas/test_osc_tap_service.py new file mode 100644 index 0000000000..fa766891ee --- /dev/null +++ b/openstackclient/tests/unit/network/v2/taas/test_osc_tap_service.py @@ -0,0 +1,271 @@ +# All Rights Reserved 2020 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import operator +import uuid + +from openstack.network.v2 import tap_service +from openstack.test import fakes as sdk_fakes +from osc_lib import utils as osc_utils +from osc_lib.utils import columns as column_util + +from openstackclient.network.v2.taas import tap_service as osc_tap_service +from openstackclient.tests.unit.network.v2 import fakes as network_fakes + + +columns_long = tuple( + col + for col, _, listing_mode in osc_tap_service._attr_map + if listing_mode in (column_util.LIST_BOTH, column_util.LIST_LONG_ONLY) +) +headers_long = tuple( + head + for _, head, listing_mode in osc_tap_service._attr_map + if listing_mode in (column_util.LIST_BOTH, column_util.LIST_LONG_ONLY) +) +sorted_attr_map = sorted(osc_tap_service._attr_map, key=operator.itemgetter(1)) +sorted_columns = tuple(col for col, _, _ in sorted_attr_map) +sorted_headers = tuple(head for _, head, _ in sorted_attr_map) + + +def _get_data(attrs, columns=sorted_columns): + return osc_utils.get_dict_properties(attrs, columns) + + +class TestCreateTapService(network_fakes.TestNetworkV2): + columns = ( + 'description', + 'id', + 'name', + 'port_id', + 'project_id', + 'status', + ) + + def setUp(self): + super().setUp() + self.cmd = osc_tap_service.CreateTapService(self.app, None) + + def test_create_tap_service(self): + """Test Create Tap Service.""" + port_id = str(uuid.uuid4()) + fake_port = network_fakes.create_one_port(attrs={'id': port_id}) + fake_tap_service = sdk_fakes.generate_fake_resource( + tap_service.TapService, **{'port_id': port_id} + ) + self.app.client_manager.network.create_tap_service.return_value = ( + fake_tap_service + ) + self.app.client_manager.network.find_port.return_value = fake_port + self.app.client_manager.network.find_tap_service.side_effect = ( + lambda _, name_or_id: {'id': name_or_id} + ) + arg_list = [ + '--name', + fake_tap_service['name'], + '--port', + fake_tap_service['port_id'], + ] + + verify_list = [ + ('name', fake_tap_service['name']), + ('port_id', fake_tap_service['port_id']), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + self.app.client_manager.network.find_tap_service.return_value = ( + fake_tap_service + ) + + columns, data = self.cmd.take_action(parsed_args) + create_tap_s_mock = self.app.client_manager.network.create_tap_service + create_tap_s_mock.assert_called_once_with( + **{ + 'name': fake_tap_service['name'], + 'port_id': fake_tap_service['port_id'], + } + ) + self.assertEqual(self.columns, columns) + fake_data = _get_data( + fake_tap_service, osc_tap_service._get_columns(fake_tap_service)[1] + ) + self.assertEqual(fake_data, data) + + +class TestListTapService(network_fakes.TestNetworkV2): + def setUp(self): + super().setUp() + self.cmd = osc_tap_service.ListTapService(self.app, None) + + def test_list_tap_service(self): + """Test List Tap Service.""" + fake_tap_services = list( + sdk_fakes.generate_fake_resources(tap_service.TapService, count=4) + ) + self.app.client_manager.network.tap_services.return_value = ( + fake_tap_services + ) + + arg_list = [] + verify_list = [] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + headers, data = self.cmd.take_action(parsed_args) + + self.app.client_manager.network.tap_services.assert_called_once() + self.assertEqual(headers, list(headers_long)) + self.assertCountEqual( + list(data), + [ + _get_data(fake_tap_service, columns_long) + for fake_tap_service in fake_tap_services + ], + ) + + +class TestDeleteTapService(network_fakes.TestNetworkV2): + def setUp(self): + super().setUp() + self.app.client_manager.network.find_tap_service.side_effect = ( + lambda name_or_id, ignore_missing: tap_service.TapService( + id=name_or_id + ) + ) + self.cmd = osc_tap_service.DeleteTapService(self.app, None) + + def test_delete_tap_service(self): + """Test Delete tap service.""" + + fake_tap_service = sdk_fakes.generate_fake_resource( + tap_service.TapService + ) + + arg_list = [ + fake_tap_service['id'], + ] + verify_list = [ + (osc_tap_service.TAP_SERVICE, [fake_tap_service['id']]), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + result = self.cmd.take_action(parsed_args) + + mock_delete_tap_s = self.app.client_manager.network.delete_tap_service + mock_delete_tap_s.assert_called_once_with(fake_tap_service['id']) + self.assertIsNone(result) + + +class TestShowTapService(network_fakes.TestNetworkV2): + columns = ( + 'description', + 'id', + 'name', + 'port_id', + 'project_id', + 'status', + ) + + def setUp(self): + super().setUp() + self.app.client_manager.network.find_tap_service.side_effect = ( + lambda name_or_id, ignore_missing: tap_service.TapService( + id=name_or_id + ) + ) + self.cmd = osc_tap_service.ShowTapService(self.app, None) + + def test_show_tap_service(self): + """Test Show tap service.""" + + fake_tap_service = sdk_fakes.generate_fake_resource( + tap_service.TapService + ) + self.app.client_manager.network.get_tap_service.return_value = ( + fake_tap_service + ) + arg_list = [ + fake_tap_service['id'], + ] + verify_list = [ + (osc_tap_service.TAP_SERVICE, fake_tap_service['id']), + ] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + + headers, data = self.cmd.take_action(parsed_args) + + mock_get_tap_s = self.app.client_manager.network.get_tap_service + mock_get_tap_s.assert_called_once_with(fake_tap_service['id']) + self.assertEqual(self.columns, headers) + fake_data = _get_data( + fake_tap_service, osc_tap_service._get_columns(fake_tap_service)[1] + ) + self.assertEqual(fake_data, data) + + +class TestUpdateTapService(network_fakes.TestNetworkV2): + _new_name = 'new_name' + + columns = ( + 'description', + 'id', + 'name', + 'port_id', + 'project_id', + 'status', + ) + + def setUp(self): + super().setUp() + self.cmd = osc_tap_service.UpdateTapService(self.app, None) + self.app.client_manager.network.find_tap_service.side_effect = ( + lambda name_or_id, ignore_missing: tap_service.TapService( + id=name_or_id + ) + ) + + def test_update_tap_service(self): + """Test update tap service""" + fake_tap_service = sdk_fakes.generate_fake_resource( + tap_service.TapService + ) + new_tap_service = copy.deepcopy(fake_tap_service) + new_tap_service['name'] = self._new_name + + self.app.client_manager.network.update_tap_service.return_value = ( + new_tap_service + ) + + arg_list = [ + fake_tap_service['id'], + '--name', + self._new_name, + ] + verify_list = [('name', self._new_name)] + + parsed_args = self.check_parser(self.cmd, arg_list, verify_list) + columns, data = self.cmd.take_action(parsed_args) + attrs = {'name': self._new_name} + + mock_update_tap_s = self.app.client_manager.network.update_tap_service + mock_update_tap_s.assert_called_once_with( + fake_tap_service['id'], **attrs + ) + self.assertEqual(self.columns, columns) + fake_data = _get_data( + new_tap_service, osc_tap_service._get_columns(new_tap_service)[1] + ) + self.assertEqual(fake_data, data) diff --git a/openstackclient/tests/unit/network/v2/test_address_group.py b/openstackclient/tests/unit/network/v2/test_address_group.py index f550db692b..48f706631f 100644 --- a/openstackclient/tests/unit/network/v2/test_address_group.py +++ b/openstackclient/tests/unit/network/v2/test_address_group.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -23,29 +22,24 @@ class TestAddressGroup(network_fakes.TestNetworkV2): - def setUp(self): - super(TestAddressGroup, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateAddressGroup(TestAddressGroup): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() # The new address group created. - new_address_group = ( - network_fakes.create_one_address_group( - attrs={ - 'project_id': project.id, - } - )) + new_address_group = network_fakes.create_one_address_group( + attrs={ + 'project_id': project.id, + } + ) columns = ( 'addresses', 'description', @@ -62,12 +56,13 @@ class TestCreateAddressGroup(TestAddressGroup): ) def setUp(self): - super(TestCreateAddressGroup, self).setUp() - self.network.create_address_group = mock.Mock( - return_value=self.new_address_group) + super().setUp() + self.network_client.create_address_group.return_value = ( + self.new_address_group + ) # Get the command object to test - self.cmd = address_group.CreateAddressGroup(self.app, self.namespace) + self.cmd = address_group.CreateAddressGroup(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain @@ -77,8 +72,13 @@ def test_create_no_options(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ @@ -92,21 +92,27 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_address_group.assert_called_once_with(**{ - 'name': self.new_address_group.name, - 'addresses': [], - }) + self.network_client.create_address_group.assert_called_once_with( + **{ + 'name': self.new_address_group.name, + 'addresses': [], + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_all_options(self): arglist = [ - '--project', self.project.name, - '--project-domain', self.domain.name, - '--address', '10.0.0.1', - '--description', self.new_address_group.description, + '--project', + self.project.name, + '--project-domain', + self.domain.name, + '--address', + '10.0.0.1', + '--description', + self.new_address_group.description, self.new_address_group.name, ] verifylist = [ @@ -118,31 +124,35 @@ def test_create_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) - - self.network.create_address_group.assert_called_once_with(**{ - 'addresses': ['10.0.0.1/32'], - 'project_id': self.project.id, - 'name': self.new_address_group.name, - 'description': self.new_address_group.description, - }) + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_address_group.assert_called_once_with( + **{ + 'addresses': ['10.0.0.1/32'], + 'project_id': self.project.id, + 'name': self.new_address_group.name, + 'description': self.new_address_group.description, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestDeleteAddressGroup(TestAddressGroup): - # The address group to delete. _address_groups = network_fakes.create_address_groups(count=2) def setUp(self): - super(TestDeleteAddressGroup, self).setUp() - self.network.delete_address_group = mock.Mock(return_value=None) - self.network.find_address_group = network_fakes.get_address_groups( - address_groups=self._address_groups) + super().setUp() + self.network_client.delete_address_group.return_value = None + self.network_client.find_address_group = ( + network_fakes.get_address_groups( + address_groups=self._address_groups + ) + ) # Get the command object to test - self.cmd = address_group.DeleteAddressGroup(self.app, self.namespace) + self.cmd = address_group.DeleteAddressGroup(self.app, None) def test_address_group_delete(self): arglist = [ @@ -155,10 +165,12 @@ def test_address_group_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_address_group.assert_called_once_with( - self._address_groups[0].name, ignore_missing=False) - self.network.delete_address_group.assert_called_once_with( - self._address_groups[0]) + self.network_client.find_address_group.assert_called_once_with( + self._address_groups[0].name, ignore_missing=False + ) + self.network_client.delete_address_group.assert_called_once_with( + self._address_groups[0] + ) self.assertIsNone(result) def test_multi_address_groups_delete(self): @@ -176,7 +188,7 @@ def test_multi_address_groups_delete(self): calls = [] for a in self._address_groups: calls.append(call(a)) - self.network.delete_address_group.assert_has_calls(calls) + self.network_client.delete_address_group.assert_has_calls(calls) self.assertIsNone(result) def test_multi_address_groups_delete_with_exception(self): @@ -185,15 +197,15 @@ def test_multi_address_groups_delete_with_exception(self): 'unexist_address_group', ] verifylist = [ - ('address_group', - [self._address_groups[0].name, 'unexist_address_group']), + ( + 'address_group', + [self._address_groups[0].name, 'unexist_address_group'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._address_groups[0], exceptions.CommandError] - self.network.find_address_group = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_address_group.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -201,17 +213,18 @@ def test_multi_address_groups_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 address groups failed to delete.', str(e)) - self.network.find_address_group.assert_any_call( - self._address_groups[0].name, ignore_missing=False) - self.network.find_address_group.assert_any_call( - 'unexist_address_group', ignore_missing=False) - self.network.delete_address_group.assert_called_once_with( + self.network_client.find_address_group.assert_any_call( + self._address_groups[0].name, ignore_missing=False + ) + self.network_client.find_address_group.assert_any_call( + 'unexist_address_group', ignore_missing=False + ) + self.network_client.delete_address_group.assert_called_once_with( self._address_groups[0] ) class TestListAddressGroup(TestAddressGroup): - # The address groups to list up. address_groups = network_fakes.create_address_groups(count=3) columns = ( @@ -223,21 +236,22 @@ class TestListAddressGroup(TestAddressGroup): ) data = [] for group in address_groups: - data.append(( - group.id, - group.name, - group.description, - group.project_id, - group.addresses, - )) + data.append( + ( + group.id, + group.name, + group.description, + group.project_id, + group.addresses, + ) + ) def setUp(self): - super(TestListAddressGroup, self).setUp() - self.network.address_groups = mock.Mock( - return_value=self.address_groups) + super().setUp() + self.network_client.address_groups.return_value = self.address_groups # Get the command object to test - self.cmd = address_group.ListAddressGroup(self.app, self.namespace) + self.cmd = address_group.ListAddressGroup(self.app, None) def test_address_group_list(self): arglist = [] @@ -246,13 +260,14 @@ def test_address_group_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_groups.assert_called_once_with(**{}) + self.network_client.address_groups.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_address_group_list_name(self): arglist = [ - '--name', self.address_groups[0].name, + '--name', + self.address_groups[0].name, ] verifylist = [ ('name', self.address_groups[0].name), @@ -260,8 +275,9 @@ def test_address_group_list_name(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_groups.assert_called_once_with( - **{'name': self.address_groups[0].name}) + self.network_client.address_groups.assert_called_once_with( + **{'name': self.address_groups[0].name} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -269,7 +285,8 @@ def test_address_group_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -277,8 +294,9 @@ def test_address_group_list_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_groups.assert_called_once_with( - project_id=project.id) + self.network_client.address_groups.assert_called_once_with( + project_id=project.id + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -286,8 +304,10 @@ def test_address_group_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -295,29 +315,35 @@ def test_address_group_project_domain(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_groups.assert_called_once_with( - project_id=project.id) + self.network_client.address_groups.assert_called_once_with( + project_id=project.id + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) class TestSetAddressGroup(TestAddressGroup): - # The address group to set. _address_group = network_fakes.create_one_address_group() def setUp(self): - super(TestSetAddressGroup, self).setUp() - self.network.update_address_group = mock.Mock(return_value=None) - self.network.find_address_group = mock.Mock( - return_value=self._address_group) - self.network.add_addresses_to_address_group = mock.Mock( - return_value=self._address_group) + super().setUp() + self.network_client.update_address_group.return_value = None + self.network_client.find_address_group.return_value = ( + self._address_group + ) + + self.network_client.add_addresses_to_address_group.return_value = ( + self._address_group + ) + # Get the command object to test - self.cmd = address_group.SetAddressGroup(self.app, self.namespace) + self.cmd = address_group.SetAddressGroup(self.app, None) def test_set_nothing(self): - arglist = [self._address_group.name, ] + arglist = [ + self._address_group.name, + ] verifylist = [ ('address_group', self._address_group.name), ] @@ -325,14 +351,16 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_address_group.assert_not_called() - self.network.add_addresses_to_address_group.assert_not_called() + self.network_client.update_address_group.assert_not_called() + self.network_client.add_addresses_to_address_group.assert_not_called() self.assertIsNone(result) def test_set_name_and_description(self): arglist = [ - '--name', 'new_address_group_name', - '--description', 'new_address_group_description', + '--name', + 'new_address_group_name', + '--description', + 'new_address_group_description', self._address_group.name, ] verifylist = [ @@ -347,14 +375,16 @@ def test_set_name_and_description(self): 'name': "new_address_group_name", 'description': 'new_address_group_description', } - self.network.update_address_group.assert_called_with( - self._address_group, **attrs) + self.network_client.update_address_group.assert_called_with( + self._address_group, **attrs + ) self.assertIsNone(result) def test_set_one_address(self): arglist = [ self._address_group.name, - '--address', '10.0.0.2', + '--address', + '10.0.0.2', ] verifylist = [ ('address_group', self._address_group.name), @@ -363,15 +393,18 @@ def test_set_one_address(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_addresses_to_address_group.assert_called_once_with( - self._address_group, ['10.0.0.2/32']) + self.network_client.add_addresses_to_address_group.assert_called_once_with( + self._address_group, ['10.0.0.2/32'] + ) self.assertIsNone(result) def test_set_multiple_addresses(self): arglist = [ self._address_group.name, - '--address', '10.0.0.2', - '--address', '2001::/16', + '--address', + '10.0.0.2', + '--address', + '2001::/16', ] verifylist = [ ('address_group', self._address_group.name), @@ -380,13 +413,13 @@ def test_set_multiple_addresses(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_addresses_to_address_group.assert_called_once_with( - self._address_group, ['10.0.0.2/32', '2001::/16']) + self.network_client.add_addresses_to_address_group.assert_called_once_with( + self._address_group, ['10.0.0.2/32', '2001::/16'] + ) self.assertIsNone(result) class TestShowAddressGroup(TestAddressGroup): - # The address group to show. _address_group = network_fakes.create_one_address_group() columns = ( @@ -405,20 +438,26 @@ class TestShowAddressGroup(TestAddressGroup): ) def setUp(self): - super(TestShowAddressGroup, self).setUp() - self.network.find_address_group = mock.Mock( - return_value=self._address_group) + super().setUp() + self.network_client.find_address_group.return_value = ( + self._address_group + ) # Get the command object to test - self.cmd = address_group.ShowAddressGroup(self.app, self.namespace) + self.cmd = address_group.ShowAddressGroup(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -431,28 +470,32 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_address_group.assert_called_once_with( - self._address_group.name, ignore_missing=False) + self.network_client.find_address_group.assert_called_once_with( + self._address_group.name, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) class TestUnsetAddressGroup(TestAddressGroup): - # The address group to unset. _address_group = network_fakes.create_one_address_group() def setUp(self): - super(TestUnsetAddressGroup, self).setUp() - self.network.find_address_group = mock.Mock( - return_value=self._address_group) - self.network.remove_addresses_from_address_group = mock.Mock( - return_value=self._address_group) + super().setUp() + self.network_client.find_address_group.return_value = ( + self._address_group + ) + + self.network_client.remove_addresses_from_address_group.return_value = self._address_group + # Get the command object to test - self.cmd = address_group.UnsetAddressGroup(self.app, self.namespace) + self.cmd = address_group.UnsetAddressGroup(self.app, None) def test_unset_nothing(self): - arglist = [self._address_group.name, ] + arglist = [ + self._address_group.name, + ] verifylist = [ ('address_group', self._address_group.name), ] @@ -460,13 +503,14 @@ def test_unset_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.remove_addresses_from_address_group.assert_not_called() + self.network_client.remove_addresses_from_address_group.assert_not_called() self.assertIsNone(result) def test_unset_one_address(self): arglist = [ self._address_group.name, - '--address', '10.0.0.2', + '--address', + '10.0.0.2', ] verifylist = [ ('address_group', self._address_group.name), @@ -475,15 +519,18 @@ def test_unset_one_address(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.remove_addresses_from_address_group.\ - assert_called_once_with(self._address_group, ['10.0.0.2/32']) + self.network_client.remove_addresses_from_address_group.assert_called_once_with( # noqa: E501 + self._address_group, ['10.0.0.2/32'] + ) self.assertIsNone(result) def test_unset_multiple_addresses(self): arglist = [ self._address_group.name, - '--address', '10.0.0.2', - '--address', '2001::/16', + '--address', + '10.0.0.2', + '--address', + '2001::/16', ] verifylist = [ ('address_group', self._address_group.name), @@ -492,7 +539,7 @@ def test_unset_multiple_addresses(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.remove_addresses_from_address_group.\ - assert_called_once_with(self._address_group, - ['10.0.0.2/32', '2001::/16']) + self.network_client.remove_addresses_from_address_group.assert_called_once_with( # noqa: E501 + self._address_group, ['10.0.0.2/32', '2001::/16'] + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/network/v2/test_address_scope.py b/openstackclient/tests/unit/network/v2/test_address_scope.py index d4c8352813..6e2c05ed9c 100644 --- a/openstackclient/tests/unit/network/v2/test_address_scope.py +++ b/openstackclient/tests/unit/network/v2/test_address_scope.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -23,34 +22,25 @@ class TestAddressScope(network_fakes.TestNetworkV2): - def setUp(self): - super(TestAddressScope, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateAddressScope(TestAddressScope): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() # The new address scope created. new_address_scope = network_fakes.create_one_address_scope( attrs={ 'project_id': project.id, - }) - columns = ( - 'id', - 'ip_version', - 'name', - 'project_id', - 'shared' + } ) + columns = ('id', 'ip_version', 'name', 'project_id', 'shared') data = ( new_address_scope.id, new_address_scope.ip_version, @@ -60,12 +50,13 @@ class TestCreateAddressScope(TestAddressScope): ) def setUp(self): - super(TestCreateAddressScope, self).setUp() - self.network.create_address_scope = mock.Mock( - return_value=self.new_address_scope) + super().setUp() + self.network_client.create_address_scope.return_value = ( + self.new_address_scope + ) # Get the command object to test - self.cmd = address_scope.CreateAddressScope(self.app, self.namespace) + self.cmd = address_scope.CreateAddressScope(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain @@ -75,8 +66,13 @@ def test_create_no_options(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ @@ -89,21 +85,26 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_address_scope.assert_called_once_with(**{ - 'ip_version': self.new_address_scope.ip_version, - 'name': self.new_address_scope.name, - }) + self.network_client.create_address_scope.assert_called_once_with( + **{ + 'ip_version': self.new_address_scope.ip_version, + 'name': self.new_address_scope.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_all_options(self): arglist = [ - '--ip-version', str(self.new_address_scope.ip_version), + '--ip-version', + str(self.new_address_scope.ip_version), '--share', - '--project', self.project.name, - '--project-domain', self.domain.name, + '--project', + self.project.name, + '--project-domain', + self.domain.name, self.new_address_scope.name, ] verifylist = [ @@ -115,14 +116,16 @@ def test_create_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_address_scope.assert_called_once_with(**{ - 'ip_version': self.new_address_scope.ip_version, - 'shared': True, - 'project_id': self.project.id, - 'name': self.new_address_scope.name, - }) + self.network_client.create_address_scope.assert_called_once_with( + **{ + 'ip_version': self.new_address_scope.ip_version, + 'shared': True, + 'project_id': self.project.id, + 'name': self.new_address_scope.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -139,28 +142,32 @@ def test_create_no_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_address_scope.assert_called_once_with(**{ - 'ip_version': self.new_address_scope.ip_version, - 'shared': False, - 'name': self.new_address_scope.name, - }) + self.network_client.create_address_scope.assert_called_once_with( + **{ + 'ip_version': self.new_address_scope.ip_version, + 'shared': False, + 'name': self.new_address_scope.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteAddressScope(TestAddressScope): - # The address scope to delete. _address_scopes = network_fakes.create_address_scopes(count=2) def setUp(self): - super(TestDeleteAddressScope, self).setUp() - self.network.delete_address_scope = mock.Mock(return_value=None) - self.network.find_address_scope = network_fakes.get_address_scopes( - address_scopes=self._address_scopes) + super().setUp() + self.network_client.delete_address_scope.return_value = None + self.network_client.find_address_scope = ( + network_fakes.get_address_scopes( + address_scopes=self._address_scopes + ) + ) # Get the command object to test - self.cmd = address_scope.DeleteAddressScope(self.app, self.namespace) + self.cmd = address_scope.DeleteAddressScope(self.app, None) def test_address_scope_delete(self): arglist = [ @@ -173,10 +180,12 @@ def test_address_scope_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_address_scope.assert_called_once_with( - self._address_scopes[0].name, ignore_missing=False) - self.network.delete_address_scope.assert_called_once_with( - self._address_scopes[0]) + self.network_client.find_address_scope.assert_called_once_with( + self._address_scopes[0].name, ignore_missing=False + ) + self.network_client.delete_address_scope.assert_called_once_with( + self._address_scopes[0] + ) self.assertIsNone(result) def test_multi_address_scopes_delete(self): @@ -195,7 +204,7 @@ def test_multi_address_scopes_delete(self): calls = [] for a in self._address_scopes: calls.append(call(a)) - self.network.delete_address_scope.assert_has_calls(calls) + self.network_client.delete_address_scope.assert_has_calls(calls) self.assertIsNone(result) def test_multi_address_scopes_delete_with_exception(self): @@ -204,15 +213,15 @@ def test_multi_address_scopes_delete_with_exception(self): 'unexist_address_scope', ] verifylist = [ - ('address_scope', - [self._address_scopes[0].name, 'unexist_address_scope']), + ( + 'address_scope', + [self._address_scopes[0].name, 'unexist_address_scope'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._address_scopes[0], exceptions.CommandError] - self.network.find_address_scope = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_address_scope.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -220,17 +229,18 @@ def test_multi_address_scopes_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 address scopes failed to delete.', str(e)) - self.network.find_address_scope.assert_any_call( - self._address_scopes[0].name, ignore_missing=False) - self.network.find_address_scope.assert_any_call( - 'unexist_address_scope', ignore_missing=False) - self.network.delete_address_scope.assert_called_once_with( + self.network_client.find_address_scope.assert_any_call( + self._address_scopes[0].name, ignore_missing=False + ) + self.network_client.find_address_scope.assert_any_call( + 'unexist_address_scope', ignore_missing=False + ) + self.network_client.delete_address_scope.assert_called_once_with( self._address_scopes[0] ) class TestListAddressScope(TestAddressScope): - # The address scopes to list up. address_scopes = network_fakes.create_address_scopes(count=3) columns = ( @@ -242,21 +252,22 @@ class TestListAddressScope(TestAddressScope): ) data = [] for scope in address_scopes: - data.append(( - scope.id, - scope.name, - scope.ip_version, - scope.is_shared, - scope.project_id, - )) + data.append( + ( + scope.id, + scope.name, + scope.ip_version, + scope.is_shared, + scope.project_id, + ) + ) def setUp(self): - super(TestListAddressScope, self).setUp() - self.network.address_scopes = mock.Mock( - return_value=self.address_scopes) + super().setUp() + self.network_client.address_scopes.return_value = self.address_scopes # Get the command object to test - self.cmd = address_scope.ListAddressScope(self.app, self.namespace) + self.cmd = address_scope.ListAddressScope(self.app, None) def test_address_scope_list(self): arglist = [] @@ -265,13 +276,14 @@ def test_address_scope_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_scopes.assert_called_once_with(**{}) + self.network_client.address_scopes.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_address_scope_list_name(self): arglist = [ - '--name', self.address_scopes[0].name, + '--name', + self.address_scopes[0].name, ] verifylist = [ ('name', self.address_scopes[0].name), @@ -279,14 +291,16 @@ def test_address_scope_list_name(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_scopes.assert_called_once_with( - **{'name': self.address_scopes[0].name}) + self.network_client.address_scopes.assert_called_once_with( + **{'name': self.address_scopes[0].name} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_address_scope_list_ip_version(self): arglist = [ - '--ip-version', str(4), + '--ip-version', + str(4), ] verifylist = [ ('ip_version', 4), @@ -294,8 +308,9 @@ def test_address_scope_list_ip_version(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_scopes.assert_called_once_with( - **{'ip_version': 4}) + self.network_client.address_scopes.assert_called_once_with( + **{'ip_version': 4} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -303,7 +318,8 @@ def test_address_scope_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -311,8 +327,9 @@ def test_address_scope_list_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_scopes.assert_called_once_with( - **{'project_id': project.id}) + self.network_client.address_scopes.assert_called_once_with( + **{'project_id': project.id} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -320,8 +337,10 @@ def test_address_scope_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -331,7 +350,7 @@ def test_address_scope_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.address_scopes.assert_called_once_with(**filters) + self.network_client.address_scopes.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -345,7 +364,7 @@ def test_address_scope_list_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_scopes.assert_called_once_with( + self.network_client.address_scopes.assert_called_once_with( **{'is_shared': True} ) self.assertEqual(self.columns, columns) @@ -361,7 +380,7 @@ def test_address_scope_list_no_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.address_scopes.assert_called_once_with( + self.network_client.address_scopes.assert_called_once_with( **{'is_shared': False} ) self.assertEqual(self.columns, columns) @@ -369,21 +388,23 @@ def test_address_scope_list_no_share(self): class TestSetAddressScope(TestAddressScope): - # The address scope to set. _address_scope = network_fakes.create_one_address_scope() def setUp(self): - super(TestSetAddressScope, self).setUp() - self.network.update_address_scope = mock.Mock(return_value=None) - self.network.find_address_scope = mock.Mock( - return_value=self._address_scope) + super().setUp() + self.network_client.update_address_scope.return_value = None + self.network_client.find_address_scope.return_value = ( + self._address_scope + ) # Get the command object to test - self.cmd = address_scope.SetAddressScope(self.app, self.namespace) + self.cmd = address_scope.SetAddressScope(self.app, None) def test_set_nothing(self): - arglist = [self._address_scope.name, ] + arglist = [ + self._address_scope.name, + ] verifylist = [ ('address_scope', self._address_scope.name), ] @@ -392,13 +413,15 @@ def test_set_nothing(self): result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_address_scope.assert_called_with( - self._address_scope, **attrs) + self.network_client.update_address_scope.assert_called_with( + self._address_scope, **attrs + ) self.assertIsNone(result) def test_set_name_and_share(self): arglist = [ - '--name', 'new_address_scope', + '--name', + 'new_address_scope', '--share', self._address_scope.name, ] @@ -414,8 +437,9 @@ def test_set_name_and_share(self): 'name': "new_address_scope", 'shared': True, } - self.network.update_address_scope.assert_called_with( - self._address_scope, **attrs) + self.network_client.update_address_scope.assert_called_with( + self._address_scope, **attrs + ) self.assertIsNone(result) def test_set_no_share(self): @@ -433,16 +457,15 @@ def test_set_no_share(self): attrs = { 'shared': False, } - self.network.update_address_scope.assert_called_with( - self._address_scope, **attrs) + self.network_client.update_address_scope.assert_called_with( + self._address_scope, **attrs + ) self.assertIsNone(result) class TestShowAddressScope(TestAddressScope): - # The address scope to show. - _address_scope = ( - network_fakes.create_one_address_scope()) + _address_scope = network_fakes.create_one_address_scope() columns = ( 'id', 'ip_version', @@ -459,20 +482,26 @@ class TestShowAddressScope(TestAddressScope): ) def setUp(self): - super(TestShowAddressScope, self).setUp() - self.network.find_address_scope = mock.Mock( - return_value=self._address_scope) + super().setUp() + self.network_client.find_address_scope.return_value = ( + self._address_scope + ) # Get the command object to test - self.cmd = address_scope.ShowAddressScope(self.app, self.namespace) + self.cmd = address_scope.ShowAddressScope(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -485,7 +514,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_address_scope.assert_called_once_with( - self._address_scope.name, ignore_missing=False) + self.network_client.find_address_scope.assert_called_once_with( + self._address_scope.name, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) diff --git a/openstackclient/tests/unit/network/v2/test_default_security_group_rule.py b/openstackclient/tests/unit/network/v2/test_default_security_group_rule.py new file mode 100644 index 0000000000..c44e553c76 --- /dev/null +++ b/openstackclient/tests/unit/network/v2/test_default_security_group_rule.py @@ -0,0 +1,1144 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from unittest.mock import call +import uuid + +from openstack.network.v2 import ( + default_security_group_rule as _default_security_group_rule, +) +from openstack.test import fakes as sdk_fakes +from osc_lib import exceptions + +from openstackclient.network import utils as network_utils +from openstackclient.network.v2 import default_security_group_rule +from openstackclient.tests.unit.network.v2 import fakes as network_fakes +from openstackclient.tests.unit import utils as tests_utils + + +class TestCreateDefaultSecurityGroupRule(network_fakes.TestNetworkV2): + expected_columns = ( + 'description', + 'direction', + 'ether_type', + 'id', + 'port_range_max', + 'port_range_min', + 'protocol', + 'remote_address_group_id', + 'remote_group_id', + 'remote_ip_prefix', + 'used_in_default_sg', + 'used_in_non_default_sg', + ) + + expected_data = None + + def _setup_default_security_group_rule(self, attrs=None): + default_security_group_rule_attrs = { + 'description': 'default-security-group-rule-description-' + + uuid.uuid4().hex, + 'direction': 'ingress', + 'ether_type': 'IPv4', + 'id': 'default-security-group-rule-id-' + uuid.uuid4().hex, + 'port_range_max': None, + 'port_range_min': None, + 'protocol': None, + 'remote_group_id': None, + 'remote_address_group_id': None, + 'remote_ip_prefix': '0.0.0.0/0', + 'location': 'MUNCHMUNCHMUNCH', + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + attrs = attrs or {} + # Overwrite default attributes. + default_security_group_rule_attrs.update(attrs) + self._default_sg_rule = sdk_fakes.generate_fake_resource( + _default_security_group_rule.DefaultSecurityGroupRule, + **default_security_group_rule_attrs, + ) + + self.network_client.create_default_security_group_rule.return_value = ( + self._default_sg_rule + ) + self.expected_data = ( + self._default_sg_rule.description, + self._default_sg_rule.direction, + self._default_sg_rule.ether_type, + self._default_sg_rule.id, + self._default_sg_rule.port_range_max, + self._default_sg_rule.port_range_min, + self._default_sg_rule.protocol, + self._default_sg_rule.remote_address_group_id, + self._default_sg_rule.remote_group_id, + self._default_sg_rule.remote_ip_prefix, + self._default_sg_rule.used_in_default_sg, + self._default_sg_rule.used_in_non_default_sg, + ) + + def setUp(self): + super().setUp() + + # Get the command object to test + self.cmd = default_security_group_rule.CreateDefaultSecurityGroupRule( + self.app, None + ) + + def test_create_all_remote_options(self): + arglist = [ + '--remote-ip', + '10.10.0.0/24', + '--remote-group', + 'test-remote-group-id', + '--remote-address-group', + 'test-remote-address-group-id', + ] + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + + def test_create_bad_ethertype(self): + arglist = [ + '--ethertype', + 'foo', + ] + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) + + def test_lowercase_ethertype(self): + arglist = [ + '--ethertype', + 'ipv4', + ] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.assertEqual('IPv4', parsed_args.ethertype) + + def test_lowercase_v6_ethertype(self): + arglist = [ + '--ethertype', + 'ipv6', + ] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.assertEqual('IPv6', parsed_args.ethertype) + + def test_proper_case_ethertype(self): + arglist = [ + '--ethertype', + 'IPv6', + ] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.assertEqual('IPv6', parsed_args.ethertype) + + def test_create_all_port_range_options(self): + arglist = [ + '--dst-port', + '80:80', + '--icmp-type', + '3', + '--icmp-code', + '1', + ] + verifylist = [ + ('dst_port', (80, 80)), + ('icmp_type', 3), + ('icmp_code', 1), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_create_default_rule(self): + self._setup_default_security_group_rule( + { + 'protocol': 'tcp', + 'port_range_max': 443, + 'port_range_min': 443, + } + ) + arglist = [ + '--protocol', + 'tcp', + '--dst-port', + str(self._default_sg_rule.port_range_min), + ] + verifylist = [ + ( + 'dst_port', + ( + self._default_sg_rule.port_range_min, + self._default_sg_rule.port_range_max, + ), + ), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_max': self._default_sg_rule.port_range_max, + 'port_range_min': self._default_sg_rule.port_range_min, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def _test_create_protocol_any_helper( + self, for_default_sg=False, for_custom_sg=False + ): + self._setup_default_security_group_rule( + { + 'protocol': None, + 'remote_ip_prefix': '10.0.2.0/24', + } + ) + arglist = [ + '--protocol', + 'any', + '--remote-ip', + self._default_sg_rule.remote_ip_prefix, + ] + if for_default_sg: + arglist.append('--for-default-sg') + if for_custom_sg: + arglist.append('--for-custom-sg') + verifylist = [ + ('protocol', 'any'), + ('remote_ip', self._default_sg_rule.remote_ip_prefix), + ('for_default_sg', for_default_sg), + ('for_custom_sg', for_custom_sg), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': for_default_sg, + 'used_in_non_default_sg': for_custom_sg, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_protocol_any_not_for_default_sg(self): + self._test_create_protocol_any_helper() + + def test_create_protocol_any_for_default_sg(self): + self._test_create_protocol_any_helper(for_default_sg=True) + + def test_create_protocol_any_for_custom_sg(self): + self._test_create_protocol_any_helper(for_custom_sg=True) + + def test_create_protocol_any_for_default_and_custom_sg(self): + self._test_create_protocol_any_helper( + for_default_sg=True, for_custom_sg=True + ) + + def test_create_remote_address_group(self): + self._setup_default_security_group_rule( + { + 'protocol': 'icmp', + 'remote_address_group_id': 'remote-address-group-id', + } + ) + arglist = [ + '--protocol', + 'icmp', + '--remote-address-group', + self._default_sg_rule.remote_address_group_id, + ] + verifylist = [ + ( + 'remote_address_group', + self._default_sg_rule.remote_address_group_id, + ), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_address_group_id': self._default_sg_rule.remote_address_group_id, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_remote_group(self): + self._setup_default_security_group_rule( + { + 'protocol': 'tcp', + 'port_range_max': 22, + 'port_range_min': 22, + } + ) + arglist = [ + '--protocol', + 'tcp', + '--dst-port', + str(self._default_sg_rule.port_range_min), + '--ingress', + '--remote-group', + 'remote-group-id', + ] + verifylist = [ + ( + 'dst_port', + ( + self._default_sg_rule.port_range_min, + self._default_sg_rule.port_range_max, + ), + ), + ('ingress', True), + ('remote_group', 'remote-group-id'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_max': self._default_sg_rule.port_range_max, + 'port_range_min': self._default_sg_rule.port_range_min, + 'protocol': self._default_sg_rule.protocol, + 'remote_group_id': 'remote-group-id', + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_source_group(self): + self._setup_default_security_group_rule( + { + 'remote_group_id': 'remote-group-id', + } + ) + arglist = [ + '--ingress', + '--remote-group', + 'remote-group-id', + ] + verifylist = [ + ('ingress', True), + ('remote_group', 'remote-group-id'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_group_id': 'remote-group-id', + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_source_ip(self): + self._setup_default_security_group_rule( + { + 'protocol': 'icmp', + 'remote_ip_prefix': '10.0.2.0/24', + } + ) + arglist = [ + '--protocol', + self._default_sg_rule.protocol, + '--remote-ip', + self._default_sg_rule.remote_ip_prefix, + ] + verifylist = [ + ('protocol', self._default_sg_rule.protocol), + ('remote_ip', self._default_sg_rule.remote_ip_prefix), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_remote_ip(self): + self._setup_default_security_group_rule( + { + 'protocol': 'icmp', + 'remote_ip_prefix': '10.0.2.0/24', + } + ) + arglist = [ + '--protocol', + self._default_sg_rule.protocol, + '--remote-ip', + self._default_sg_rule.remote_ip_prefix, + ] + verifylist = [ + ('protocol', self._default_sg_rule.protocol), + ('remote_ip', self._default_sg_rule.remote_ip_prefix), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_tcp_with_icmp_type(self): + arglist = [ + '--protocol', + 'tcp', + '--icmp-type', + '15', + ] + verifylist = [ + ('protocol', 'tcp'), + ('icmp_type', 15), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_create_icmp_code(self): + arglist = [ + '--protocol', + '1', + '--icmp-code', + '1', + ] + verifylist = [ + ('protocol', '1'), + ('icmp_code', 1), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_create_icmp_code_zero(self): + self._setup_default_security_group_rule( + { + 'port_range_min': 15, + 'port_range_max': 0, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--protocol', + self._default_sg_rule.protocol, + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--icmp-code', + str(self._default_sg_rule.port_range_max), + ] + verifylist = [ + ('protocol', self._default_sg_rule.protocol), + ('icmp_code', self._default_sg_rule.port_range_max), + ('icmp_type', self._default_sg_rule.port_range_min), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmp_code_greater_than_zero(self): + self._setup_default_security_group_rule( + { + 'port_range_min': 15, + 'port_range_max': 18, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--protocol', + self._default_sg_rule.protocol, + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--icmp-code', + str(self._default_sg_rule.port_range_max), + ] + verifylist = [ + ('protocol', self._default_sg_rule.protocol), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', self._default_sg_rule.port_range_max), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmp_code_negative_value(self): + self._setup_default_security_group_rule( + { + 'port_range_min': 15, + 'port_range_max': None, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--protocol', + self._default_sg_rule.protocol, + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--icmp-code', + '-2', + ] + verifylist = [ + ('protocol', self._default_sg_rule.protocol), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', -2), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmp_type(self): + self._setup_default_security_group_rule( + { + 'port_range_min': 15, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--protocol', + self._default_sg_rule.protocol, + ] + verifylist = [ + ('dst_port', None), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', None), + ('protocol', self._default_sg_rule.protocol), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_min': self._default_sg_rule.port_range_min, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmp_type_zero(self): + self._setup_default_security_group_rule( + { + 'port_range_min': 0, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--protocol', + self._default_sg_rule.protocol, + ] + verifylist = [ + ('dst_port', None), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', None), + ('protocol', self._default_sg_rule.protocol), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_min': self._default_sg_rule.port_range_min, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmp_type_greater_than_zero(self): + self._setup_default_security_group_rule( + { + 'port_range_min': 13, # timestamp + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--protocol', + self._default_sg_rule.protocol, + ] + verifylist = [ + ('dst_port', None), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', None), + ('protocol', self._default_sg_rule.protocol), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_min': self._default_sg_rule.port_range_min, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmp_type_negative_value(self): + self._setup_default_security_group_rule( + { + 'port_range_min': None, # timestamp + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) + arglist = [ + '--icmp-type', + '-13', + '--protocol', + self._default_sg_rule.protocol, + ] + verifylist = [ + ('dst_port', None), + ('icmp_type', -13), + ('icmp_code', None), + ('protocol', self._default_sg_rule.protocol), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_ipv6_icmp_type_code(self): + self._setup_default_security_group_rule( + { + 'ether_type': 'IPv6', + 'port_range_min': 139, + 'port_range_max': 2, + 'protocol': 'ipv6-icmp', + 'remote_ip_prefix': '::/0', + } + ) + arglist = [ + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--icmp-code', + str(self._default_sg_rule.port_range_max), + '--protocol', + self._default_sg_rule.protocol, + ] + verifylist = [ + ('dst_port', None), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', self._default_sg_rule.port_range_max), + ('protocol', self._default_sg_rule.protocol), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_min': self._default_sg_rule.port_range_min, + 'port_range_max': self._default_sg_rule.port_range_max, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_icmpv6_type(self): + self._setup_default_security_group_rule( + { + 'ether_type': 'IPv6', + 'port_range_min': 139, + 'protocol': 'icmpv6', + 'remote_ip_prefix': '::/0', + } + ) + arglist = [ + '--icmp-type', + str(self._default_sg_rule.port_range_min), + '--protocol', + self._default_sg_rule.protocol, + ] + verifylist = [ + ('dst_port', None), + ('icmp_type', self._default_sg_rule.port_range_min), + ('icmp_code', None), + ('protocol', self._default_sg_rule.protocol), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'port_range_min': self._default_sg_rule.port_range_min, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + def test_create_with_description(self): + self._setup_default_security_group_rule( + { + 'description': 'Setting SGR', + } + ) + arglist = [ + '--description', + self._default_sg_rule.description, + ] + verifylist = [ + ('description', self._default_sg_rule.description), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_default_security_group_rule.assert_called_once_with( + **{ + 'description': self._default_sg_rule.description, + 'direction': self._default_sg_rule.direction, + 'ethertype': self._default_sg_rule.ether_type, + 'protocol': self._default_sg_rule.protocol, + 'remote_ip_prefix': self._default_sg_rule.remote_ip_prefix, + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, data) + + +class TestDeleteDefaultSecurityGroupRule(network_fakes.TestNetworkV2): + # The default security group rules to be deleted. + default_security_group_rule_attrs = { + 'direction': 'ingress', + 'ether_type': 'IPv4', + 'port_range_max': None, + 'port_range_min': None, + 'protocol': None, + 'remote_group_id': None, + 'remote_address_group_id': None, + 'remote_ip_prefix': '0.0.0.0/0', + 'location': 'MUNCHMUNCHMUNCH', + 'used_in_default_sg': False, + 'used_in_non_default_sg': False, + } + _default_sg_rules = list( + sdk_fakes.generate_fake_resources( + _default_security_group_rule.DefaultSecurityGroupRule, + count=2, + attrs=default_security_group_rule_attrs, + ) + ) + + def setUp(self): + super().setUp() + + self.network_client.delete_default_security_group_rule.return_value = ( + None + ) + + # Get the command object to test + self.cmd = default_security_group_rule.DeleteDefaultSecurityGroupRule( + self.app, None + ) + + def test_default_security_group_rule_delete(self): + arglist = [ + self._default_sg_rules[0].id, + ] + verifylist = [ + ('rule', [self._default_sg_rules[0].id]), + ] + self.network_client.find_default_security_group_rule.return_value = ( + self._default_sg_rules[0] + ) + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.network_client.delete_default_security_group_rule.assert_called_once_with( + self._default_sg_rules[0] + ) + self.assertIsNone(result) + + def test_multi_default_security_group_rules_delete(self): + arglist = [] + verifylist = [] + + for s in self._default_sg_rules: + arglist.append(s.id) + verifylist = [ + ('rule', arglist), + ] + self.network_client.find_default_security_group_rule.side_effect = ( + self._default_sg_rules + ) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + calls = [] + for s in self._default_sg_rules: + calls.append(call(s)) + self.network_client.delete_default_security_group_rule.assert_has_calls( + calls + ) + self.assertIsNone(result) + + def test_multi_default_security_group_rules_delete_with_exception(self): + arglist = [ + self._default_sg_rules[0].id, + 'unexist_rule', + ] + verifylist = [ + ('rule', [self._default_sg_rules[0].id, 'unexist_rule']), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + find_mock_result = [ + self._default_sg_rules[0], + exceptions.CommandError, + ] + self.network_client.find_default_security_group_rule.side_effect = ( + find_mock_result + ) + + try: + self.cmd.take_action(parsed_args) + self.fail('CommandError should be raised.') + except exceptions.CommandError as e: + self.assertEqual('1 of 2 default rules failed to delete.', str(e)) + + self.network_client.find_default_security_group_rule.assert_any_call( + self._default_sg_rules[0].id, ignore_missing=False + ) + self.network_client.find_default_security_group_rule.assert_any_call( + 'unexist_rule', ignore_missing=False + ) + self.network_client.delete_default_security_group_rule.assert_called_once_with( + self._default_sg_rules[0] + ) + + +class TestListDefaultSecurityGroupRule(network_fakes.TestNetworkV2): + # The security group rule to be listed. + _default_sg_rule_tcp = sdk_fakes.generate_fake_resource( + _default_security_group_rule.DefaultSecurityGroupRule, + **{'protocol': 'tcp', 'port_range_max': 80, 'port_range_min': 80}, + ) + _default_sg_rule_icmp = sdk_fakes.generate_fake_resource( + _default_security_group_rule.DefaultSecurityGroupRule, + **{'protocol': 'icmp', 'remote_ip_prefix': '10.0.2.0/24'}, + ) + _default_sg_rules = [ + _default_sg_rule_tcp, + _default_sg_rule_icmp, + ] + + expected_columns = ( + 'ID', + 'IP Protocol', + 'Ethertype', + 'IP Range', + 'Port Range', + 'Direction', + 'Remote Security Group', + 'Remote Address Group', + 'Used in default Security Group', + 'Used in custom Security Group', + ) + + expected_data = [] + expected_data_no_group = [] + for _default_sg_rule in _default_sg_rules: + expected_data.append( + ( + _default_sg_rule.id, + _default_sg_rule.protocol, + _default_sg_rule.ether_type, + _default_sg_rule.remote_ip_prefix, + network_utils.format_network_port_range(_default_sg_rule), + _default_sg_rule.direction, + _default_sg_rule.remote_group_id, + _default_sg_rule.remote_address_group_id, + _default_sg_rule.used_in_default_sg, + _default_sg_rule.used_in_non_default_sg, + ) + ) + + def setUp(self): + super().setUp() + + self.network_client.default_security_group_rules.return_value = ( + self._default_sg_rules + ) + + # Get the command object to test + self.cmd = default_security_group_rule.ListDefaultSecurityGroupRule( + self.app, None + ) + + def test_list_default(self): + self._default_sg_rule_tcp.port_range_min = 80 + parsed_args = self.check_parser(self.cmd, [], []) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.default_security_group_rules.assert_called_once_with( + **{} + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, list(data)) + + def test_list_with_protocol(self): + self._default_sg_rule_tcp.port_range_min = 80 + arglist = [ + '--protocol', + 'tcp', + ] + verifylist = [ + ('protocol', 'tcp'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.default_security_group_rules.assert_called_once_with( + **{ + 'protocol': 'tcp', + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, list(data)) + + def test_list_with_ingress(self): + self._default_sg_rule_tcp.port_range_min = 80 + arglist = [ + '--ingress', + ] + verifylist = [ + ('ingress', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.default_security_group_rules.assert_called_once_with( + **{ + 'direction': 'ingress', + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, list(data)) + + def test_list_with_wrong_egress(self): + self._default_sg_rule_tcp.port_range_min = 80 + arglist = [ + '--egress', + ] + verifylist = [ + ('egress', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.default_security_group_rules.assert_called_once_with( + **{ + 'direction': 'egress', + } + ) + self.assertEqual(self.expected_columns, columns) + self.assertEqual(self.expected_data, list(data)) + + +class TestShowDefaultSecurityGroupRule(network_fakes.TestNetworkV2): + # The default security group rule to be shown. + _default_sg_rule = sdk_fakes.generate_fake_resource( + _default_security_group_rule.DefaultSecurityGroupRule + ) + + columns = ( + 'description', + 'direction', + 'ether_type', + 'id', + 'port_range_max', + 'port_range_min', + 'protocol', + 'remote_address_group_id', + 'remote_group_id', + 'remote_ip_prefix', + 'used_in_default_sg', + 'used_in_non_default_sg', + ) + + data = ( + _default_sg_rule.description, + _default_sg_rule.direction, + _default_sg_rule.ether_type, + _default_sg_rule.id, + _default_sg_rule.port_range_max, + _default_sg_rule.port_range_min, + _default_sg_rule.protocol, + _default_sg_rule.remote_address_group_id, + _default_sg_rule.remote_group_id, + _default_sg_rule.remote_ip_prefix, + _default_sg_rule.used_in_default_sg, + _default_sg_rule.used_in_non_default_sg, + ) + + def setUp(self): + super().setUp() + + self.network_client.find_default_security_group_rule.return_value = ( + self._default_sg_rule + ) + + # Get the command object to test + self.cmd = default_security_group_rule.ShowDefaultSecurityGroupRule( + self.app, None + ) + + def test_show_no_options(self): + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) + + def test_show_all_options(self): + arglist = [ + self._default_sg_rule.id, + ] + verifylist = [ + ('rule', self._default_sg_rule.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.find_default_security_group_rule.assert_called_once_with( + self._default_sg_rule.id, ignore_missing=False + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_compute.py b/openstackclient/tests/unit/network/v2/test_floating_ip_compute.py index 18212cf707..89137d6e98 100644 --- a/openstackclient/tests/unit/network/v2/test_floating_ip_compute.py +++ b/openstackclient/tests/unit/network/v2/test_floating_ip_compute.py @@ -12,33 +12,18 @@ # from unittest import mock -from unittest.mock import call from osc_lib import exceptions +from openstackclient.api import compute_v2 from openstackclient.network.v2 import floating_ip as fip from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit import utils as tests_utils -# Tests for Nova network - -class TestFloatingIPCompute(compute_fakes.TestComputev2): - - def setUp(self): - super(TestFloatingIPCompute, self).setUp() - - # Get a shortcut to the compute client - self.compute = self.app.client_manager.compute - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_create' -) -class TestCreateFloatingIPCompute(TestFloatingIPCompute): - - # The floating ip to be deleted. - _floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip() +@mock.patch.object(compute_v2, 'create_floating_ip') +class TestCreateFloatingIPCompute(compute_fakes.TestComputev2): + _floating_ip = compute_fakes.create_one_floating_ip() columns = ( 'fixed_ip', @@ -57,21 +42,23 @@ class TestCreateFloatingIPCompute(TestFloatingIPCompute): ) def setUp(self): - super(TestCreateFloatingIPCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # self.compute.floating_ips.create.return_value = self.floating_ip - - # Get the command object to test self.cmd = fip.CreateFloatingIP(self.app, None) def test_floating_ip_create_no_arg(self, fip_mock): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_floating_ip_create_default(self, fip_mock): fip_mock.return_value = self._floating_ip @@ -85,25 +72,22 @@ def test_floating_ip_create_default(self, fip_mock): columns, data = self.cmd.take_action(parsed_args) - fip_mock.assert_called_once_with(self._floating_ip['pool']) + fip_mock.assert_called_once_with( + self.compute_client, self._floating_ip['pool'] + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_delete' -) -class TestDeleteFloatingIPCompute(TestFloatingIPCompute): - - # The floating ips to be deleted. - _floating_ips = compute_fakes.FakeFloatingIP.create_floating_ips(count=2) +@mock.patch.object(compute_v2, 'delete_floating_ip') +class TestDeleteFloatingIPCompute(compute_fakes.TestComputev2): + _floating_ips = compute_fakes.create_floating_ips(count=2) def setUp(self): - super(TestDeleteFloatingIPCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = fip.DeleteFloatingIP(self.app, None) def test_floating_ip_delete(self, fip_mock): @@ -119,44 +103,47 @@ def test_floating_ip_delete(self, fip_mock): result = self.cmd.take_action(parsed_args) fip_mock.assert_called_once_with( - self._floating_ips[0]['id'] + self.compute_client, self._floating_ips[0]['id'] ) self.assertIsNone(result) def test_floating_ip_delete_multi(self, fip_mock): fip_mock.return_value = mock.Mock(return_value=None) - arglist = [] - verifylist = [] - - for f in self._floating_ips: - arglist.append(f['id']) + arglist = [ + self._floating_ips[0]['id'], + self._floating_ips[1]['id'], + ] verifylist = [ ('floating_ip', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [] - for f in self._floating_ips: - calls.append(call(f['id'])) - fip_mock.assert_has_calls(calls) + fip_mock.assert_has_calls( + [ + mock.call(self.compute_client, self._floating_ips[0]['id']), + mock.call(self.compute_client, self._floating_ips[1]['id']), + ] + ) self.assertIsNone(result) def test_floating_ip_delete_multi_exception(self, fip_mock): fip_mock.return_value = mock.Mock(return_value=None) - fip_mock.side_effect = ([ + fip_mock.side_effect = [ mock.Mock(return_value=None), exceptions.CommandError, - ]) + ] arglist = [ self._floating_ips[0]['id'], 'unexist_floating_ip', ] - verifylist = [( - 'floating_ip', - [self._floating_ips[0]['id'], 'unexist_floating_ip'], - )] + verifylist = [ + ( + 'floating_ip', + [self._floating_ips[0]['id'], 'unexist_floating_ip'], + ) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) try: @@ -165,17 +152,15 @@ def test_floating_ip_delete_multi_exception(self, fip_mock): except exceptions.CommandError as e: self.assertEqual('1 of 2 floating_ips failed to delete.', str(e)) - fip_mock.assert_any_call(self._floating_ips[0]['id']) - fip_mock.assert_any_call('unexist_floating_ip') - + fip_mock.assert_any_call( + self.compute_client, self._floating_ips[0]['id'] + ) + fip_mock.assert_any_call(self.compute_client, 'unexist_floating_ip') -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_list' -) -class TestListFloatingIPCompute(TestFloatingIPCompute): - # The floating ips to be list up - _floating_ips = compute_fakes.FakeFloatingIP.create_floating_ips(count=3) +@mock.patch.object(compute_v2, 'list_floating_ips') +class TestListFloatingIPCompute(compute_fakes.TestComputev2): + _floating_ips = compute_fakes.create_floating_ips(count=3) columns = ( 'ID', @@ -187,20 +172,21 @@ class TestListFloatingIPCompute(TestFloatingIPCompute): data = [] for ip in _floating_ips: - data.append(( - ip['id'], - ip['ip'], - ip['fixed_ip'], - ip['instance_id'], - ip['pool'], - )) + data.append( + ( + ip['id'], + ip['ip'], + ip['fixed_ip'], + ip['instance_id'], + ip['pool'], + ) + ) def setUp(self): - super(TestListFloatingIPCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = fip.ListFloatingIP(self.app, None) def test_floating_ip_list(self, fip_mock): @@ -211,18 +197,14 @@ def test_floating_ip_list(self, fip_mock): columns, data = self.cmd.take_action(parsed_args) - fip_mock.assert_called_once_with() + fip_mock.assert_called_once_with(self.compute_client) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_find' -) -class TestShowFloatingIPCompute(TestFloatingIPCompute): - - # The floating ip to display. - _floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip() +@mock.patch.object(compute_v2, 'get_floating_ip') +class TestShowFloatingIPCompute(compute_fakes.TestComputev2): + _floating_ip = compute_fakes.create_one_floating_ip() columns = ( 'fixed_ip', @@ -241,11 +223,10 @@ class TestShowFloatingIPCompute(TestFloatingIPCompute): ) def setUp(self): - super(TestShowFloatingIPCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = fip.ShowFloatingIP(self.app, None) def test_floating_ip_show(self, fip_mock): @@ -260,6 +241,8 @@ def test_floating_ip_show(self, fip_mock): columns, data = self.cmd.take_action(parsed_args) - fip_mock.assert_called_once_with(self._floating_ip['id']) + fip_mock.assert_called_once_with( + self.compute_client, self._floating_ip['id'] + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_network.py b/openstackclient/tests/unit/network/v2/test_floating_ip_network.py index f76dcc792a..ab0ec176a0 100644 --- a/openstackclient/tests/unit/network/v2/test_floating_ip_network.py +++ b/openstackclient/tests/unit/network/v2/test_floating_ip_network.py @@ -11,9 +11,11 @@ # under the License. # -from unittest import mock from unittest.mock import call +from openstack.network.v2 import floating_ip as _floating_ip +from openstack.test import fakes as sdk_fakes +from osc_lib.cli import format_columns from osc_lib import exceptions from openstackclient.network.v2 import floating_ip as fip @@ -22,23 +24,17 @@ from openstackclient.tests.unit import utils as tests_utils -# Tests for Neutron network - class TestFloatingIPNetwork(network_fakes.TestNetworkV2): - def setUp(self): - super(TestFloatingIPNetwork, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateFloatingIPNetwork(TestFloatingIPNetwork): - # Fake data for option tests. floating_network = network_fakes.create_one_network() subnet = network_fakes.FakeSubnet.create_one_subnet() @@ -87,25 +83,31 @@ class TestCreateFloatingIPNetwork(TestFloatingIPNetwork): ) def setUp(self): - super(TestCreateFloatingIPNetwork, self).setUp() + super().setUp() - self.network.create_ip = mock.Mock(return_value=self.floating_ip) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.create_ip.return_value = self.floating_ip - self.network.find_network = mock.Mock( - return_value=self.floating_network) - self.network.find_subnet = mock.Mock(return_value=self.subnet) - self.network.find_port = mock.Mock(return_value=self.port) + self.network_client.set_tags.return_value = None + + self.network_client.find_network.return_value = self.floating_network + + self.network_client.find_subnet.return_value = self.subnet + self.network_client.find_port.return_value = self.port # Get the command object to test - self.cmd = fip.CreateFloatingIP(self.app, self.namespace) + self.cmd = fip.CreateFloatingIP(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ @@ -118,21 +120,30 @@ def test_create_default_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_ip.assert_called_once_with(**{ - 'floating_network_id': self.floating_ip.floating_network_id, - }) + self.network_client.create_ip.assert_called_once_with( + **{ + 'floating_network_id': self.floating_ip.floating_network_id, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_all_options(self): arglist = [ - '--subnet', self.subnet.id, - '--port', self.floating_ip.port_id, - '--floating-ip-address', self.floating_ip.floating_ip_address, - '--fixed-ip-address', self.floating_ip.fixed_ip_address, - '--description', self.floating_ip.description, - '--dns-domain', self.floating_ip.dns_domain, - '--dns-name', self.floating_ip.dns_name, + '--subnet', + self.subnet.id, + '--port', + self.floating_ip.port_id, + '--floating-ip-address', + self.floating_ip.floating_ip_address, + '--fixed-ip-address', + self.floating_ip.fixed_ip_address, + '--description', + self.floating_ip.description, + '--dns-domain', + self.floating_ip.dns_domain, + '--dns-name', + self.floating_ip.dns_name, self.floating_ip.floating_network_id, ] verifylist = [ @@ -149,16 +160,18 @@ def test_create_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_ip.assert_called_once_with(**{ - 'subnet_id': self.subnet.id, - 'port_id': self.floating_ip.port_id, - 'floating_ip_address': self.floating_ip.floating_ip_address, - 'fixed_ip_address': self.floating_ip.fixed_ip_address, - 'floating_network_id': self.floating_ip.floating_network_id, - 'description': self.floating_ip.description, - 'dns_domain': self.floating_ip.dns_domain, - 'dns_name': self.floating_ip.dns_name, - }) + self.network_client.create_ip.assert_called_once_with( + **{ + 'subnet_id': self.subnet.id, + 'port_id': self.floating_ip.port_id, + 'floating_ip_address': self.floating_ip.floating_ip_address, + 'fixed_ip_address': self.floating_ip.fixed_ip_address, + 'floating_network_id': self.floating_ip.floating_network_id, + 'description': self.floating_ip.description, + 'dns_domain': self.floating_ip.dns_domain, + 'dns_name': self.floating_ip.dns_name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -166,7 +179,8 @@ def test_floating_ip_create_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, self.floating_ip.floating_network_id, ] verifylist = [ @@ -177,10 +191,12 @@ def test_floating_ip_create_project(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_ip.assert_called_once_with(**{ - 'floating_network_id': self.floating_ip.floating_network_id, - 'project_id': project.id, - }) + self.network_client.create_ip.assert_called_once_with( + **{ + 'floating_network_id': self.floating_ip.floating_network_id, + 'project_id': project.id, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -189,8 +205,10 @@ def test_floating_ip_create_project_domain(self): domain = identity_fakes_v3.FakeDomain.create_one_domain() self.projects_mock.get.return_value = project arglist = [ - "--project", project.name, - "--project-domain", domain.name, + "--project", + project.name, + "--project-domain", + domain.name, self.floating_ip.floating_network_id, ] verifylist = [ @@ -203,18 +221,21 @@ def test_floating_ip_create_project_domain(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_ip.assert_called_once_with(**{ - 'floating_network_id': self.floating_ip.floating_network_id, - 'project_id': project.id, - }) + self.network_client.create_ip.assert_called_once_with( + **{ + 'floating_network_id': self.floating_ip.floating_network_id, + 'project_id': project.id, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_floating_ip_with_qos(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy arglist = [ - '--qos-policy', qos_policy.id, + '--qos-policy', + qos_policy.id, self.floating_ip.floating_network_id, ] verifylist = [ @@ -225,10 +246,12 @@ def test_create_floating_ip_with_qos(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_ip.assert_called_once_with(**{ - 'floating_network_id': self.floating_ip.floating_network_id, - 'qos_policy_id': qos_policy.id, - }) + self.network_client.create_ip.assert_called_once_with( + **{ + 'floating_network_id': self.floating_ip.floating_network_id, + 'qos_policy_id': qos_policy.id, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -248,17 +271,19 @@ def _test_create_with_tag(self, add_tags=True): verifylist.append(('no_tag', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_ip.assert_called_once_with(**{ - 'floating_network_id': self.floating_ip.floating_network_id, - }) + self.network_client.create_ip.assert_called_once_with( + **{ + 'floating_network_id': self.floating_ip.floating_network_id, + } + ) if add_tags: - self.network.set_tags.assert_called_once_with( - self.floating_ip, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self.floating_ip, tests_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -270,21 +295,19 @@ def test_create_with_no_tag(self): class TestDeleteFloatingIPNetwork(TestFloatingIPNetwork): - # The floating ips to be deleted. floating_ips = network_fakes.FakeFloatingIP.create_floating_ips(count=2) def setUp(self): - super(TestDeleteFloatingIPNetwork, self).setUp() + super().setUp() - self.network.find_ip = mock.Mock() - self.network.delete_ip = mock.Mock(return_value=None) + self.network_client.delete_ip.return_value = None # Get the command object to test - self.cmd = fip.DeleteFloatingIP(self.app, self.namespace) + self.cmd = fip.DeleteFloatingIP(self.app, None) def test_floating_ip_delete(self): - self.network.find_ip.side_effect = [ + self.network_client.find_ip.side_effect = [ self.floating_ips[0], self.floating_ips[1], ] @@ -298,15 +321,17 @@ def test_floating_ip_delete(self): result = self.cmd.take_action(parsed_args) - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ips[0].id, ignore_missing=False, ) - self.network.delete_ip.assert_called_once_with(self.floating_ips[0]) + self.network_client.delete_ip.assert_called_once_with( + self.floating_ips[0] + ) self.assertIsNone(result) def test_floating_ip_delete_multi(self): - self.network.find_ip.side_effect = [ + self.network_client.find_ip.side_effect = [ self.floating_ips[0], self.floating_ips[1], ] @@ -332,16 +357,16 @@ def test_floating_ip_delete_multi(self): ignore_missing=False, ), ] - self.network.find_ip.assert_has_calls(calls) + self.network_client.find_ip.assert_has_calls(calls) calls = [] for f in self.floating_ips: calls.append(call(f)) - self.network.delete_ip.assert_has_calls(calls) + self.network_client.delete_ip.assert_has_calls(calls) self.assertIsNone(result) def test_floating_ip_delete_multi_exception(self): - self.network.find_ip.side_effect = [ + self.network_client.find_ip.side_effect = [ self.floating_ips[0], exceptions.CommandError, ] @@ -350,8 +375,7 @@ def test_floating_ip_delete_multi_exception(self): 'unexist_floating_ip', ] verifylist = [ - ('floating_ip', - [self.floating_ips[0].id, 'unexist_floating_ip']), + ('floating_ip', [self.floating_ips[0].id, 'unexist_floating_ip']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -361,32 +385,37 @@ def test_floating_ip_delete_multi_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 floating_ips failed to delete.', str(e)) - self.network.find_ip.assert_any_call( + self.network_client.find_ip.assert_any_call( self.floating_ips[0].id, ignore_missing=False, ) - self.network.find_ip.assert_any_call( + self.network_client.find_ip.assert_any_call( 'unexist_floating_ip', ignore_missing=False, ) - self.network.delete_ip.assert_called_once_with( + self.network_client.delete_ip.assert_called_once_with( self.floating_ips[0] ) class TestListFloatingIPNetwork(TestFloatingIPNetwork): - # The floating ips to list up floating_ips = network_fakes.FakeFloatingIP.create_floating_ips(count=3) - fake_network = network_fakes.create_one_network({ - 'id': 'fake_network_id', - }) - fake_port = network_fakes.create_one_port({ - 'id': 'fake_port_id', - }) - fake_router = network_fakes.FakeRouter.create_one_router({ - 'id': 'fake_router_id', - }) + fake_network = network_fakes.create_one_network( + { + 'id': 'fake_network_id', + } + ) + fake_port = network_fakes.create_one_port( + { + 'id': 'fake_port_id', + } + ) + fake_router = network_fakes.create_one_router( + { + 'id': 'fake_router_id', + } + ) columns = ( 'ID', @@ -408,39 +437,44 @@ class TestListFloatingIPNetwork(TestFloatingIPNetwork): data = [] data_long = [] for ip in floating_ips: - data.append(( - ip.id, - ip.floating_ip_address, - ip.fixed_ip_address, - ip.port_id, - ip.floating_network_id, - ip.project_id, - )) - data_long.append(( - ip.id, - ip.floating_ip_address, - ip.fixed_ip_address, - ip.port_id, - ip.floating_network_id, - ip.project_id, - ip.router_id, - ip.status, - ip.description, - ip.tags, - ip.dns_domain, - ip.dns_name, - )) + data.append( + ( + ip.id, + ip.floating_ip_address, + ip.fixed_ip_address, + ip.port_id, + ip.floating_network_id, + ip.project_id, + ) + ) + data_long.append( + ( + ip.id, + ip.floating_ip_address, + ip.fixed_ip_address, + ip.port_id, + ip.floating_network_id, + ip.project_id, + ip.router_id, + ip.status, + ip.description, + ip.tags, + ip.dns_domain, + ip.dns_name, + ) + ) def setUp(self): - super(TestListFloatingIPNetwork, self).setUp() + super().setUp() + + self.network_client.ips.return_value = self.floating_ips + self.network_client.find_network.return_value = self.fake_network - self.network.ips = mock.Mock(return_value=self.floating_ips) - self.network.find_network = mock.Mock(return_value=self.fake_network) - self.network.find_port = mock.Mock(return_value=self.fake_port) - self.network.find_router = mock.Mock(return_value=self.fake_router) + self.network_client.find_port.return_value = self.fake_port + self.network_client.find_router.return_value = self.fake_router # Get the command object to test - self.cmd = fip.ListFloatingIP(self.app, self.namespace) + self.cmd = fip.ListFloatingIP(self.app, None) def test_floating_ip_list(self): arglist = [] @@ -449,47 +483,54 @@ def test_floating_ip_list(self): columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with() + self.network_client.ips.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_floating_ip_list_network(self): arglist = [ - '--network', 'fake_network_id', + '--network', + 'fake_network_id', ] verifylist = [ - ('network', 'fake_network_id'), + ('networks', ['fake_network_id']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with(**{ - 'floating_network_id': 'fake_network_id', - }) + self.network_client.ips.assert_called_once_with( + **{ + 'floating_network_id': ['fake_network_id'], + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_floating_ip_list_port(self): arglist = [ - '--port', 'fake_port_id', + '--port', + 'fake_port_id', ] verifylist = [ - ('port', 'fake_port_id'), + ('ports', ['fake_port_id']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with(**{ - 'port_id': 'fake_port_id', - }) + self.network_client.ips.assert_called_once_with( + **{ + 'port_id': ['fake_port_id'], + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_floating_ip_list_fixed_ip_address(self): arglist = [ - '--fixed-ip-address', self.floating_ips[0].fixed_ip_address, + '--fixed-ip-address', + self.floating_ips[0].fixed_ip_address, ] verifylist = [ ('fixed_ip_address', self.floating_ips[0].fixed_ip_address), @@ -498,15 +539,18 @@ def test_floating_ip_list_fixed_ip_address(self): columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with(**{ - 'fixed_ip_address': self.floating_ips[0].fixed_ip_address, - }) + self.network_client.ips.assert_called_once_with( + **{ + 'fixed_ip_address': self.floating_ips[0].fixed_ip_address, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_floating_ip_list_floating_ip_address(self): arglist = [ - '--floating-ip-address', self.floating_ips[0].floating_ip_address, + '--floating-ip-address', + self.floating_ips[0].floating_ip_address, ] verifylist = [ ('floating_ip_address', self.floating_ips[0].floating_ip_address), @@ -515,27 +559,36 @@ def test_floating_ip_list_floating_ip_address(self): columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with(**{ - 'floating_ip_address': self.floating_ips[0].floating_ip_address, - }) + self.network_client.ips.assert_called_once_with( + **{ + 'floating_ip_address': self.floating_ips[ + 0 + ].floating_ip_address, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_floating_ip_list_long(self): - arglist = ['--long', ] - verifylist = [('long', True), ] + arglist = [ + '--long', + ] + verifylist = [ + ('long', True), + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with() + self.network_client.ips.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) def test_floating_ip_list_status(self): arglist = [ - '--status', 'ACTIVE', + '--status', + 'ACTIVE', '--long', ] verifylist = [ @@ -545,9 +598,11 @@ def test_floating_ip_list_status(self): columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with(**{ - 'status': 'ACTIVE', - }) + self.network_client.ips.assert_called_once_with( + **{ + 'status': 'ACTIVE', + } + ) self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) @@ -555,7 +610,8 @@ def test_floating_ip_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -565,7 +621,7 @@ def test_floating_ip_list_project(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.ips.assert_called_once_with(**filters) + self.network_client.ips.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -574,8 +630,10 @@ def test_floating_ip_list_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -585,34 +643,41 @@ def test_floating_ip_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.ips.assert_called_once_with(**filters) + self.network_client.ips.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_floating_ip_list_router(self): arglist = [ - '--router', 'fake_router_id', + '--router', + 'fake_router_id', '--long', ] verifylist = [ - ('router', 'fake_router_id'), + ('routers', ['fake_router_id']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with(**{ - 'router_id': 'fake_router_id', - }) + self.network_client.ips.assert_called_once_with( + **{ + 'router_id': ['fake_router_id'], + } + ) self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -623,60 +688,72 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ips.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white'} + self.network_client.ips.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestShowFloatingIPNetwork(TestFloatingIPNetwork): - - # The floating ip to display. - floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip() - - columns = ( - 'description', - 'dns_domain', - 'dns_name', - 'fixed_ip_address', - 'floating_ip_address', - 'floating_network_id', - 'id', - 'port_id', - 'project_id', - 'qos_policy_id', - 'router_id', - 'status', - 'tags', - ) - - data = ( - floating_ip.description, - floating_ip.dns_domain, - floating_ip.dns_name, - floating_ip.fixed_ip_address, - floating_ip.floating_ip_address, - floating_ip.floating_network_id, - floating_ip.id, - floating_ip.port_id, - floating_ip.project_id, - floating_ip.qos_policy_id, - floating_ip.router_id, - floating_ip.status, - floating_ip.tags, - ) - def setUp(self): - super(TestShowFloatingIPNetwork, self).setUp() + super().setUp() - self.network.find_ip = mock.Mock(return_value=self.floating_ip) + self.floating_ip = sdk_fakes.generate_fake_resource( + _floating_ip.FloatingIP + ) + self.network_client.find_ip.return_value = self.floating_ip + + self.columns = ( + 'created_at', + 'description', + 'dns_domain', + 'dns_name', + 'fixed_ip_address', + 'floating_ip_address', + 'floating_network_id', + 'id', + 'name', + 'port_details', + 'port_id', + 'project_id', + 'qos_policy_id', + 'revision_number', + 'router_id', + 'status', + 'subnet_id', + 'tags', + 'updated_at', + ) + self.data = ( + self.floating_ip.created_at, + self.floating_ip.description, + self.floating_ip.dns_domain, + self.floating_ip.dns_name, + self.floating_ip.fixed_ip_address, + self.floating_ip.floating_ip_address, + self.floating_ip.floating_network_id, + self.floating_ip.id, + self.floating_ip.name, + format_columns.DictColumn(self.floating_ip.port_details), + self.floating_ip.port_id, + self.floating_ip.project_id, + self.floating_ip.qos_policy_id, + self.floating_ip.revision_number, + self.floating_ip.router_id, + self.floating_ip.status, + self.floating_ip.subnet_id, + self.floating_ip.tags, + self.floating_ip.updated_at, + ) # Get the command object to test - self.cmd = fip.ShowFloatingIP(self.app, self.namespace) + self.cmd = fip.ShowFloatingIP(self.app, None) def test_floating_ip_show(self): arglist = [ @@ -689,7 +766,7 @@ def test_floating_ip_show(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) @@ -698,7 +775,6 @@ def test_floating_ip_show(self): class TestSetFloatingIP(TestFloatingIPNetwork): - # Fake data for option tests. floating_network = network_fakes.create_one_network() subnet = network_fakes.FakeSubnet.create_one_subnet() @@ -714,19 +790,20 @@ class TestSetFloatingIP(TestFloatingIPNetwork): ) def setUp(self): - super(TestSetFloatingIP, self).setUp() - self.network.find_ip = mock.Mock(return_value=self.floating_ip) - self.network.find_port = mock.Mock(return_value=self.port) - self.network.update_ip = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + super().setUp() + self.network_client.find_ip.return_value = self.floating_ip + self.network_client.find_port.return_value = self.port + self.network_client.update_ip.return_value = None + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = fip.SetFloatingIP(self.app, self.namespace) + self.cmd = fip.SetFloatingIP(self.app, None) def test_port_option(self): arglist = [ self.floating_ip.id, - '--port', self.floating_ip.port_id, + '--port', + self.floating_ip.port_id, ] verifylist = [ ('floating_ip', self.floating_ip.id), @@ -740,19 +817,22 @@ def test_port_option(self): 'port_id': self.floating_ip.port_id, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def test_fixed_ip_option(self): arglist = [ self.floating_ip.id, - '--port', self.floating_ip.port_id, - "--fixed-ip-address", self.floating_ip.fixed_ip_address, + '--port', + self.floating_ip.port_id, + "--fixed-ip-address", + self.floating_ip.fixed_ip_address, ] verifylist = [ ('floating_ip', self.floating_ip.id), @@ -767,18 +847,21 @@ def test_fixed_ip_option(self): 'port_id': self.floating_ip.port_id, 'fixed_ip_address': self.floating_ip.fixed_ip_address, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def test_description_option(self): arglist = [ self.floating_ip.id, - '--port', self.floating_ip.port_id, - '--description', self.floating_ip.description, + '--port', + self.floating_ip.port_id, + '--description', + self.floating_ip.description, ] verifylist = [ ('floating_ip', self.floating_ip.id), @@ -793,18 +876,20 @@ def test_description_option(self): 'port_id': self.floating_ip.port_id, 'description': self.floating_ip.description, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def test_qos_policy_option(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy arglist = [ - "--qos-policy", qos_policy.id, + "--qos-policy", + qos_policy.id, self.floating_ip.id, ] verifylist = [ @@ -818,19 +903,22 @@ def test_qos_policy_option(self): attrs = { 'qos_policy_id': qos_policy.id, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def test_port_and_qos_policy_option(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy arglist = [ - "--qos-policy", qos_policy.id, - '--port', self.floating_ip.port_id, + "--qos-policy", + qos_policy.id, + '--port', + self.floating_ip.port_id, self.floating_ip.id, ] verifylist = [ @@ -846,12 +934,13 @@ def test_port_and_qos_policy_option(self): 'qos_policy_id': qos_policy.id, 'port_id': self.floating_ip.port_id, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def test_no_qos_policy_option(self): arglist = [ @@ -869,17 +958,19 @@ def test_no_qos_policy_option(self): attrs = { 'qos_policy_id': None, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def test_port_and_no_qos_policy_option(self): arglist = [ "--no-qos-policy", - '--port', self.floating_ip.port_id, + '--port', + self.floating_ip.port_id, self.floating_ip.id, ] verifylist = [ @@ -895,12 +986,13 @@ def test_port_and_no_qos_policy_option(self): 'qos_policy_id': None, 'port_id': self.floating_ip.port_id, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) def _test_set_tags(self, with_tags=True): if with_tags: @@ -917,10 +1009,10 @@ def _test_set_tags(self, with_tags=True): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_ip.called) - self.network.set_tags.assert_called_once_with( - self.floating_ip, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_ip.called) + self.network_client.set_tags.assert_called_once_with( + self.floating_ip, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -931,7 +1023,6 @@ def test_set_with_no_tag(self): class TestUnsetFloatingIP(TestFloatingIPNetwork): - floating_network = network_fakes.create_one_network() subnet = network_fakes.FakeSubnet.create_one_subnet() port = network_fakes.create_one_port() @@ -946,13 +1037,13 @@ class TestUnsetFloatingIP(TestFloatingIPNetwork): ) def setUp(self): - super(TestUnsetFloatingIP, self).setUp() - self.network.find_ip = mock.Mock(return_value=self.floating_ip) - self.network.update_ip = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + super().setUp() + self.network_client.find_ip.return_value = self.floating_ip + self.network_client.update_ip.return_value = None + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = fip.UnsetFloatingIP(self.app, self.namespace) + self.cmd = fip.UnsetFloatingIP(self.app, None) def test_floating_ip_unset_port(self): arglist = [ @@ -970,12 +1061,13 @@ def test_floating_ip_unset_port(self): attrs = { 'port_id': None, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) self.assertIsNone(result) @@ -995,12 +1087,13 @@ def test_floating_ip_unset_qos_policy(self): attrs = { 'qos_policy_id': None, } - self.network.find_ip.assert_called_once_with( + self.network_client.find_ip.assert_called_once_with( self.floating_ip.id, ignore_missing=False, ) - self.network.update_ip.assert_called_once_with( - self.floating_ip, **attrs) + self.network_client.update_ip.assert_called_once_with( + self.floating_ip, **attrs + ) self.assertIsNone(result) @@ -1014,16 +1107,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self.floating_ip.id) - verifylist.append( - ('floating_ip', self.floating_ip.id)) + verifylist.append(('floating_ip', self.floating_ip.id)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_ip.called) - self.network.set_tags.assert_called_once_with( - self.floating_ip, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_ip.called) + self.network_client.set_tags.assert_called_once_with( + self.floating_ip, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_pool_compute.py b/openstackclient/tests/unit/network/v2/test_floating_ip_pool_compute.py index 3dd99362c1..90ded06280 100644 --- a/openstackclient/tests/unit/network/v2/test_floating_ip_pool_compute.py +++ b/openstackclient/tests/unit/network/v2/test_floating_ip_pool_compute.py @@ -13,46 +13,27 @@ from unittest import mock +from openstackclient.api import compute_v2 from openstackclient.network.v2 import floating_ip_pool from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes -# Tests for Compute network - -class TestFloatingIPPoolCompute(compute_fakes.TestComputev2): - - def setUp(self): - super(TestFloatingIPPoolCompute, self).setUp() - - # Get a shortcut to the compute client - self.compute = self.app.client_manager.compute - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.floating_ip_pool_list' -) -class TestListFloatingIPPoolCompute(TestFloatingIPPoolCompute): - +@mock.patch.object(compute_v2, 'list_floating_ip_pools') +class TestListFloatingIPPoolCompute(compute_fakes.TestComputev2): # The floating ip pools to list up - _floating_ip_pools = \ - compute_fakes.FakeFloatingIPPool.create_floating_ip_pools(count=3) + _floating_ip_pools = compute_fakes.create_floating_ip_pools(count=3) - columns = ( - 'Name', - ) + columns = ('Name',) data = [] for pool in _floating_ip_pools: - data.append(( - pool['name'], - )) + data.append((pool['name'],)) def setUp(self): - super(TestListFloatingIPPoolCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = floating_ip_pool.ListFloatingIPPool(self.app, None) def test_floating_ip_list(self, fipp_mock): @@ -63,6 +44,6 @@ def test_floating_ip_list(self, fipp_mock): columns, data = self.cmd.take_action(parsed_args) - fipp_mock.assert_called_once_with() + fipp_mock.assert_called_once_with(self.compute_client) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_pool_network.py b/openstackclient/tests/unit/network/v2/test_floating_ip_pool_network.py index 95ff5549b0..fee8f43907 100644 --- a/openstackclient/tests/unit/network/v2/test_floating_ip_pool_network.py +++ b/openstackclient/tests/unit/network/v2/test_floating_ip_pool_network.py @@ -17,30 +17,23 @@ from openstackclient.tests.unit.network.v2 import fakes as network_fakes -# Tests for Network API v2 - class TestFloatingIPPoolNetwork(network_fakes.TestNetworkV2): - def setUp(self): - super(TestFloatingIPPoolNetwork, self).setUp() - - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() class TestListFloatingIPPoolNetwork(TestFloatingIPPoolNetwork): - def setUp(self): - super(TestListFloatingIPPoolNetwork, self).setUp() + super().setUp() # Get the command object to test - self.cmd = floating_ip_pool.ListFloatingIPPool(self.app, - self.namespace) + self.cmd = floating_ip_pool.ListFloatingIPPool(self.app, None) def test_floating_ip_list(self): arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py b/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py index d0f5af8caf..33b9011c65 100644 --- a/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py +++ b/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py @@ -25,52 +25,45 @@ class TestFloatingIPPortForwarding(network_fakes.TestNetworkV2): - def setUp(self): - super(TestFloatingIPPortForwarding, self).setUp() - self.network = self.app.client_manager.network - self.floating_ip = (network_fakes.FakeFloatingIP. - create_one_floating_ip()) + super().setUp() + + self.floating_ip = ( + network_fakes.FakeFloatingIP.create_one_floating_ip() + ) self.port = network_fakes.create_one_port() self.project = identity_fakes_v2.FakeProject.create_one_project() - self.network.find_port = mock.Mock(return_value=self.port) + self.network_client.find_port.return_value = self.port class TestCreateFloatingIPPortForwarding(TestFloatingIPPortForwarding): - def setUp(self): - super(TestCreateFloatingIPPortForwarding, self).setUp() - self.new_port_forwarding = ( - network_fakes.FakeFloatingIPPortForwarding. - create_one_port_forwarding( - attrs={ - 'internal_port_id': self.port.id, - 'floatingip_id': self.floating_ip.id, - } - ) + super().setUp() + self.new_port_forwarding = network_fakes.FakeFloatingIPPortForwarding.create_one_port_forwarding( # noqa: E501 + attrs={ + 'internal_port_id': self.port.id, + 'floatingip_id': self.floating_ip.id, + } ) - self.new_port_forwarding_with_ranges = ( - network_fakes.FakeFloatingIPPortForwarding. - create_one_port_forwarding( - use_range=True, - attrs={ - 'internal_port_id': self.port.id, - 'floatingip_id': self.floating_ip.id, - } - ) + self.new_port_forwarding_with_ranges = network_fakes.FakeFloatingIPPortForwarding.create_one_port_forwarding( # noqa: E501 + use_range=True, + attrs={ + 'internal_port_id': self.port.id, + 'floatingip_id': self.floating_ip.id, + }, ) - self.network.create_floating_ip_port_forwarding = mock.Mock( - return_value=self.new_port_forwarding) - - self.network.find_ip = mock.Mock( - return_value=self.floating_ip + self.network_client.create_floating_ip_port_forwarding.return_value = ( + self.new_port_forwarding ) + self.network_client.find_ip.return_value = self.floating_ip + # Get the command object to test self.cmd = floating_ip_port_forwarding.CreateFloatingIPPortForwarding( - self.app, self.namespace) + self.app, None + ) self.columns = ( 'description', @@ -82,7 +75,7 @@ def setUp(self): 'internal_port', 'internal_port_id', 'internal_port_range', - 'protocol' + 'protocol', ) self.data = ( @@ -103,17 +96,24 @@ def test_create_no_options(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_all_options_with_range(self): arglist = [ - '--port', self.new_port_forwarding_with_ranges.internal_port_id, + '--port', + self.new_port_forwarding_with_ranges.internal_port_id, '--internal-protocol-port', self.new_port_forwarding_with_ranges.internal_port_range, '--external-protocol-port', self.new_port_forwarding_with_ranges.external_port_range, - '--protocol', self.new_port_forwarding_with_ranges.protocol, + '--protocol', + self.new_port_forwarding_with_ranges.protocol, self.new_port_forwarding_with_ranges.floatingip_id, '--internal-ip-address', self.new_port_forwarding_with_ranges.internal_ip_address, @@ -122,49 +122,53 @@ def test_create_all_options_with_range(self): ] verifylist = [ ('port', self.new_port_forwarding_with_ranges.internal_port_id), - ('internal_protocol_port', - self.new_port_forwarding_with_ranges.internal_port_range), - ('external_protocol_port', - self.new_port_forwarding_with_ranges.external_port_range), + ( + 'internal_protocol_port', + self.new_port_forwarding_with_ranges.internal_port_range, + ), + ( + 'external_protocol_port', + self.new_port_forwarding_with_ranges.external_port_range, + ), ('protocol', self.new_port_forwarding_with_ranges.protocol), - ('floating_ip', - self.new_port_forwarding_with_ranges.floatingip_id), - ('internal_ip_address', self.new_port_forwarding_with_ranges. - internal_ip_address), + ( + 'floating_ip', + self.new_port_forwarding_with_ranges.floatingip_id, + ), + ( + 'internal_ip_address', + self.new_port_forwarding_with_ranges.internal_ip_address, + ), ('description', self.new_port_forwarding_with_ranges.description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_floating_ip_port_forwarding.\ - assert_called_once_with( - self.new_port_forwarding.floatingip_id, - **{ - 'external_port_range': - self.new_port_forwarding_with_ranges. - external_port_range, - 'internal_ip_address': - self.new_port_forwarding_with_ranges. - internal_ip_address, - 'internal_port_range': - self.new_port_forwarding_with_ranges. - internal_port_range, - 'internal_port_id': - self.new_port_forwarding_with_ranges.internal_port_id, - 'protocol': self.new_port_forwarding_with_ranges.protocol, - 'description': - self.new_port_forwarding_with_ranges.description, - }) + self.network_client.create_floating_ip_port_forwarding.assert_called_once_with( # noqa: E501 + self.new_port_forwarding.floatingip_id, + **{ + 'external_port_range': self.new_port_forwarding_with_ranges.external_port_range, # noqa: E501 + 'internal_ip_address': self.new_port_forwarding_with_ranges.internal_ip_address, # noqa: E501 + 'internal_port_range': self.new_port_forwarding_with_ranges.internal_port_range, # noqa: E501 + 'internal_port_id': self.new_port_forwarding_with_ranges.internal_port_id, # noqa: E501 + 'protocol': self.new_port_forwarding_with_ranges.protocol, + 'description': self.new_port_forwarding_with_ranges.description, # noqa: E501 + }, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_all_options_with_range_invalid_port_exception(self): invalid_port_range = '999999:999999' arglist = [ - '--port', self.new_port_forwarding_with_ranges.internal_port_id, - '--internal-protocol-port', invalid_port_range, - '--external-protocol-port', invalid_port_range, - '--protocol', self.new_port_forwarding_with_ranges.protocol, + '--port', + self.new_port_forwarding_with_ranges.internal_port_id, + '--internal-protocol-port', + invalid_port_range, + '--external-protocol-port', + invalid_port_range, + '--protocol', + self.new_port_forwarding_with_ranges.protocol, self.new_port_forwarding_with_ranges.floatingip_id, '--internal-ip-address', self.new_port_forwarding_with_ranges.internal_ip_address, @@ -176,10 +180,14 @@ def test_create_all_options_with_range_invalid_port_exception(self): ('internal_protocol_port', invalid_port_range), ('external_protocol_port', invalid_port_range), ('protocol', self.new_port_forwarding_with_ranges.protocol), - ('floating_ip', - self.new_port_forwarding_with_ranges.floatingip_id), - ('internal_ip_address', self.new_port_forwarding_with_ranges. - internal_ip_address), + ( + 'floating_ip', + self.new_port_forwarding_with_ranges.floatingip_id, + ), + ( + 'internal_ip_address', + self.new_port_forwarding_with_ranges.internal_ip_address, + ), ('description', self.new_port_forwarding_with_ranges.description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -189,15 +197,19 @@ def test_create_all_options_with_range_invalid_port_exception(self): self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual(msg, str(e)) - self.network.create_floating_ip_port_forwarding.assert_not_called() + self.network_client.create_floating_ip_port_forwarding.assert_not_called() def test_create_all_options_with_invalid_range_exception(self): invalid_port_range = '80:70' arglist = [ - '--port', self.new_port_forwarding_with_ranges.internal_port_id, - '--internal-protocol-port', invalid_port_range, - '--external-protocol-port', invalid_port_range, - '--protocol', self.new_port_forwarding_with_ranges.protocol, + '--port', + self.new_port_forwarding_with_ranges.internal_port_id, + '--internal-protocol-port', + invalid_port_range, + '--external-protocol-port', + invalid_port_range, + '--protocol', + self.new_port_forwarding_with_ranges.protocol, self.new_port_forwarding_with_ranges.floatingip_id, '--internal-ip-address', self.new_port_forwarding_with_ranges.internal_ip_address, @@ -209,30 +221,40 @@ def test_create_all_options_with_invalid_range_exception(self): ('internal_protocol_port', invalid_port_range), ('external_protocol_port', invalid_port_range), ('protocol', self.new_port_forwarding_with_ranges.protocol), - ('floating_ip', - self.new_port_forwarding_with_ranges.floatingip_id), - ('internal_ip_address', self.new_port_forwarding_with_ranges. - internal_ip_address), + ( + 'floating_ip', + self.new_port_forwarding_with_ranges.floatingip_id, + ), + ( + 'internal_ip_address', + self.new_port_forwarding_with_ranges.internal_ip_address, + ), ('description', self.new_port_forwarding_with_ranges.description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - msg = 'The last number in port range must be greater or equal to ' \ - 'the first' + msg = ( + 'The last number in port range must be greater or equal to ' + 'the first' + ) try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual(msg, str(e)) - self.network.create_floating_ip_port_forwarding.assert_not_called() + self.network_client.create_floating_ip_port_forwarding.assert_not_called() def test_create_all_options_with_unmatch_ranges_exception(self): internal_range = '80:90' external_range = '8080:8100' arglist = [ - '--port', self.new_port_forwarding_with_ranges.internal_port_id, - '--internal-protocol-port', internal_range, - '--external-protocol-port', external_range, - '--protocol', self.new_port_forwarding_with_ranges.protocol, + '--port', + self.new_port_forwarding_with_ranges.internal_port_id, + '--internal-protocol-port', + internal_range, + '--external-protocol-port', + external_range, + '--protocol', + self.new_port_forwarding_with_ranges.protocol, self.new_port_forwarding_with_ranges.floatingip_id, '--internal-ip-address', self.new_port_forwarding_with_ranges.internal_ip_address, @@ -244,30 +266,38 @@ def test_create_all_options_with_unmatch_ranges_exception(self): ('internal_protocol_port', internal_range), ('external_protocol_port', external_range), ('protocol', self.new_port_forwarding_with_ranges.protocol), - ('floating_ip', - self.new_port_forwarding_with_ranges.floatingip_id), - ('internal_ip_address', self.new_port_forwarding_with_ranges. - internal_ip_address), + ( + 'floating_ip', + self.new_port_forwarding_with_ranges.floatingip_id, + ), + ( + 'internal_ip_address', + self.new_port_forwarding_with_ranges.internal_ip_address, + ), ('description', self.new_port_forwarding_with_ranges.description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - msg = "The relation between internal and external ports does not " \ - "match the pattern 1:N and N:N" + msg = ( + "The relation between internal and external ports does not " + "match the pattern 1:N and N:N" + ) try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual(msg, str(e)) - self.network.create_floating_ip_port_forwarding.assert_not_called() + self.network_client.create_floating_ip_port_forwarding.assert_not_called() def test_create_all_options(self): arglist = [ - '--port', self.new_port_forwarding.internal_port_id, + '--port', + self.new_port_forwarding.internal_port_id, '--internal-protocol-port', str(self.new_port_forwarding.internal_port), '--external-protocol-port', str(self.new_port_forwarding.external_port), - '--protocol', self.new_port_forwarding.protocol, + '--protocol', + self.new_port_forwarding.protocol, self.new_port_forwarding.floatingip_id, '--internal-ip-address', self.new_port_forwarding.internal_ip_address, @@ -276,57 +306,60 @@ def test_create_all_options(self): ] verifylist = [ ('port', self.new_port_forwarding.internal_port_id), - ('internal_protocol_port', - str(self.new_port_forwarding.internal_port)), - ('external_protocol_port', - str(self.new_port_forwarding.external_port)), + ( + 'internal_protocol_port', + str(self.new_port_forwarding.internal_port), + ), + ( + 'external_protocol_port', + str(self.new_port_forwarding.external_port), + ), ('protocol', self.new_port_forwarding.protocol), ('floating_ip', self.new_port_forwarding.floatingip_id), - ('internal_ip_address', self.new_port_forwarding. - internal_ip_address), + ( + 'internal_ip_address', + self.new_port_forwarding.internal_ip_address, + ), ('description', self.new_port_forwarding.description), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_floating_ip_port_forwarding.\ - assert_called_once_with( - self.new_port_forwarding.floatingip_id, - **{ - 'external_port': self.new_port_forwarding.external_port, - 'internal_ip_address': self.new_port_forwarding. - internal_ip_address, - 'internal_port': self.new_port_forwarding.internal_port, - 'internal_port_id': self.new_port_forwarding. - internal_port_id, - 'protocol': self.new_port_forwarding.protocol, - 'description': self.new_port_forwarding.description, - }) + self.network_client.create_floating_ip_port_forwarding.assert_called_once_with( # noqa: E501 + self.new_port_forwarding.floatingip_id, + **{ + 'external_port': self.new_port_forwarding.external_port, + 'internal_ip_address': self.new_port_forwarding.internal_ip_address, # noqa: E501 + 'internal_port': self.new_port_forwarding.internal_port, + 'internal_port_id': self.new_port_forwarding.internal_port_id, + 'protocol': self.new_port_forwarding.protocol, + 'description': self.new_port_forwarding.description, + }, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteFloatingIPPortForwarding(TestFloatingIPPortForwarding): - def setUp(self): - super(TestDeleteFloatingIPPortForwarding, self).setUp() + super().setUp() self._port_forwarding = ( network_fakes.FakeFloatingIPPortForwarding.create_port_forwardings( - count=2, attrs={ + count=2, + attrs={ 'floatingip_id': self.floating_ip.id, - } + }, ) ) - self.network.delete_floating_ip_port_forwarding = mock.Mock( - return_value=None + self.network_client.delete_floating_ip_port_forwarding.return_value = ( + None ) - self.network.find_ip = mock.Mock( - return_value=self.floating_ip - ) + self.network_client.find_ip.return_value = self.floating_ip # Get the command object to test self.cmd = floating_ip_port_forwarding.DeleteFloatingIPPortForwarding( - self.app, self.namespace) + self.app, None + ) def test_port_forwarding_delete(self): arglist = [ @@ -342,12 +375,11 @@ def test_port_forwarding_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_floating_ip_port_forwarding.\ - assert_called_once_with( - self.floating_ip.id, - self._port_forwarding[0].id, - ignore_missing=False - ) + self.network_client.delete_floating_ip_port_forwarding.assert_called_once_with( # noqa: E501 + self.floating_ip.id, + self._port_forwarding[0].id, + ignore_missing=False, + ) self.assertIsNone(result) @@ -373,7 +405,9 @@ def test_multi_port_forwardings_delete(self): for a in self._port_forwarding: calls.append(call(a.floatingip_id, a.id, ignore_missing=False)) - self.network.delete_floating_ip_port_forwarding.assert_has_calls(calls) + self.network_client.delete_floating_ip_port_forwarding.assert_has_calls( + calls + ) self.assertIsNone(result) def test_multi_port_forwarding_delete_with_exception(self): @@ -384,14 +418,16 @@ def test_multi_port_forwarding_delete_with_exception(self): ] verifylist = [ ('floating_ip', self.floating_ip.id), - ('port_forwarding_id', - [self._port_forwarding[0].id, 'unexist_port_forwarding_id']), + ( + 'port_forwarding_id', + [self._port_forwarding[0].id, 'unexist_port_forwarding_id'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) delete_mock_result = [None, exceptions.CommandError] - self.network.delete_floating_ip_port_forwarding = ( + self.network_client.delete_floating_ip_port_forwarding = ( mock.MagicMock(side_effect=delete_mock_result) ) @@ -400,26 +436,22 @@ def test_multi_port_forwarding_delete_with_exception(self): self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual( - '1 of 2 Port forwarding failed to delete.', - str(e) + '1 of 2 Port forwarding failed to delete.', str(e) ) - self.network.delete_floating_ip_port_forwarding.\ - assert_any_call( - self.floating_ip.id, - 'unexist_port_forwarding_id', - ignore_missing=False - ) - self.network.delete_floating_ip_port_forwarding.\ - assert_any_call( - self.floating_ip.id, - self._port_forwarding[0].id, - ignore_missing=False - ) + self.network_client.delete_floating_ip_port_forwarding.assert_any_call( + self.floating_ip.id, + 'unexist_port_forwarding_id', + ignore_missing=False, + ) + self.network_client.delete_floating_ip_port_forwarding.assert_any_call( + self.floating_ip.id, + self._port_forwarding[0].id, + ignore_missing=False, + ) class TestListFloatingIPPortForwarding(TestFloatingIPPortForwarding): - columns = ( 'ID', 'Internal Port ID', @@ -433,71 +465,71 @@ class TestListFloatingIPPortForwarding(TestFloatingIPPortForwarding): ) def setUp(self): - super(TestListFloatingIPPortForwarding, self).setUp() + super().setUp() self.port_forwardings = ( network_fakes.FakeFloatingIPPortForwarding.create_port_forwardings( - count=3, attrs={ + count=3, + attrs={ 'internal_port_id': self.port.id, 'floatingip_id': self.floating_ip.id, - } + }, ) ) self.data = [] for port_forwarding in self.port_forwardings: - self.data.append(( - port_forwarding.id, - port_forwarding.internal_port_id, - port_forwarding.internal_ip_address, - port_forwarding.internal_port, - port_forwarding.internal_port_range, - port_forwarding.external_port, - port_forwarding.external_port_range, - port_forwarding.protocol, - port_forwarding.description, - )) - self.network.floating_ip_port_forwardings = mock.Mock( - return_value=self.port_forwardings - ) - self.network.find_ip = mock.Mock( - return_value=self.floating_ip + self.data.append( + ( + port_forwarding.id, + port_forwarding.internal_port_id, + port_forwarding.internal_ip_address, + port_forwarding.internal_port, + port_forwarding.internal_port_range, + port_forwarding.external_port, + port_forwarding.external_port_range, + port_forwarding.protocol, + port_forwarding.description, + ) + ) + self.network_client.floating_ip_port_forwardings.return_value = ( + self.port_forwardings ) + + self.network_client.find_ip.return_value = self.floating_ip # Get the command object to test self.cmd = floating_ip_port_forwarding.ListFloatingIPPortForwarding( - self.app, - self.namespace + self.app, None ) def test_port_forwarding_list(self): - arglist = [ - self.floating_ip.id - ] - verifylist = [ - ('floating_ip', self.floating_ip.id) - ] + arglist = [self.floating_ip.id] + verifylist = [('floating_ip', self.floating_ip.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.floating_ip_port_forwardings.assert_called_once_with( - self.floating_ip, - **{} + self.network_client.floating_ip_port_forwardings.assert_called_once_with( + self.floating_ip, **{} ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_port_forwarding_list_all_options(self): arglist = [ - '--port', self.port_forwardings[0].internal_port_id, + '--port', + self.port_forwardings[0].internal_port_id, '--external-protocol-port', str(self.port_forwardings[0].external_port), - '--protocol', self.port_forwardings[0].protocol, + '--protocol', + self.port_forwardings[0].protocol, self.port_forwardings[0].floatingip_id, ] verifylist = [ ('port', self.port_forwardings[0].internal_port_id), - ('external_protocol_port', - str(self.port_forwardings[0].external_port)), + ( + 'external_protocol_port', + str(self.port_forwardings[0].external_port), + ), ('protocol', self.port_forwardings[0].protocol), ('floating_ip', self.port_forwardings[0].floatingip_id), ] @@ -510,40 +542,34 @@ def test_port_forwarding_list_all_options(self): 'protocol': self.port_forwardings[0].protocol, } - self.network.floating_ip_port_forwardings.assert_called_once_with( - self.floating_ip, - **query + self.network_client.floating_ip_port_forwardings.assert_called_once_with( + self.floating_ip, **query ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestSetFloatingIPPortForwarding(TestFloatingIPPortForwarding): - # The Port Forwarding to set. def setUp(self): - super(TestSetFloatingIPPortForwarding, self).setUp() - self._port_forwarding = ( - network_fakes.FakeFloatingIPPortForwarding. - create_one_port_forwarding( - attrs={ - 'floatingip_id': self.floating_ip.id, - } - ) + super().setUp() + self._port_forwarding = network_fakes.FakeFloatingIPPortForwarding.create_one_port_forwarding( # noqa: E501 + attrs={ + 'floatingip_id': self.floating_ip.id, + } ) - self.network.update_floating_ip_port_forwarding = mock.Mock( - return_value=None + self.network_client.update_floating_ip_port_forwarding.return_value = ( + None ) - self.network.find_floating_ip_port_forwarding = mock.Mock( - return_value=self._port_forwarding) - self.network.find_ip = mock.Mock( - return_value=self.floating_ip + self.network_client.find_floating_ip_port_forwarding.return_value = ( + self._port_forwarding ) + + self.network_client.find_ip.return_value = self.floating_ip # Get the command object to test self.cmd = floating_ip_port_forwarding.SetFloatingIPPortForwarding( - self.app, - self.namespace + self.app, None ) def test_set_nothing(self): @@ -560,21 +586,27 @@ def test_set_nothing(self): result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_floating_ip_port_forwarding.assert_called_with( + self.network_client.update_floating_ip_port_forwarding.assert_called_with( self._port_forwarding.floatingip_id, self._port_forwarding.id, - **attrs + **attrs, ) self.assertIsNone(result) def test_set_all_thing(self): arglist_single = [ - '--port', self.port.id, - '--internal-ip-address', 'new_internal_ip_address', - '--internal-protocol-port', '100', - '--external-protocol-port', '200', - '--protocol', 'tcp', - '--description', 'some description', + '--port', + self.port.id, + '--internal-ip-address', + 'new_internal_ip_address', + '--internal-protocol-port', + '100', + '--external-protocol-port', + '200', + '--protocol', + 'tcp', + '--description', + 'some description', self._port_forwarding.floatingip_id, self._port_forwarding.id, ] @@ -602,8 +634,11 @@ def test_set_all_thing(self): 'protocol': 'tcp', 'description': 'some description', } - attrs_range = dict(attrs_single, internal_port_range='100:110', - external_port_range='200:210') + attrs_range = dict( + attrs_single, + internal_port_range='100:110', + external_port_range='200:210', + ) attrs_range.pop('internal_port') attrs_range.pop('external_port') @@ -612,10 +647,10 @@ def run_and_validate(arglist, verifylist, attrs): result = self.cmd.take_action(parsed_args) - self.network.update_floating_ip_port_forwarding.assert_called_with( + self.network_client.update_floating_ip_port_forwarding.assert_called_with( self._port_forwarding.floatingip_id, self._port_forwarding.id, - **attrs + **attrs, ) self.assertIsNone(result) @@ -624,7 +659,6 @@ def run_and_validate(arglist, verifylist, attrs): class TestShowFloatingIPPortForwarding(TestFloatingIPPortForwarding): - # The port forwarding to show. columns = ( 'description', @@ -640,14 +674,11 @@ class TestShowFloatingIPPortForwarding(TestFloatingIPPortForwarding): ) def setUp(self): - super(TestShowFloatingIPPortForwarding, self).setUp() - self._port_forwarding = ( - network_fakes.FakeFloatingIPPortForwarding. - create_one_port_forwarding( - attrs={ - 'floatingip_id': self.floating_ip.id, - } - ) + super().setUp() + self._port_forwarding = network_fakes.FakeFloatingIPPortForwarding.create_one_port_forwarding( # noqa: E501 + attrs={ + 'floatingip_id': self.floating_ip.id, + } ) self.data = ( self._port_forwarding.description, @@ -661,16 +692,14 @@ def setUp(self): self._port_forwarding.internal_port_range, self._port_forwarding.protocol, ) - self.network.find_floating_ip_port_forwarding = mock.Mock( - return_value=self._port_forwarding - ) - self.network.find_ip = mock.Mock( - return_value=self.floating_ip + self.network_client.find_floating_ip_port_forwarding.return_value = ( + self._port_forwarding ) + + self.network_client.find_ip.return_value = self.floating_ip # Get the command object to test self.cmd = floating_ip_port_forwarding.ShowFloatingIPPortForwarding( - self.app, - self.namespace + self.app, None ) def test_show_no_options(self): @@ -678,8 +707,13 @@ def test_show_no_options(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_default_options(self): arglist = [ @@ -694,10 +728,8 @@ def test_show_default_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_floating_ip_port_forwarding.assert_called_once_with( - self.floating_ip, - self._port_forwarding.id, - ignore_missing=False + self.network_client.find_floating_ip_port_forwarding.assert_called_once_with( + self.floating_ip, self._port_forwarding.id, ignore_missing=False ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_ip_availability.py b/openstackclient/tests/unit/network/v2/test_ip_availability.py index fbe3b1ab5b..def3e17da0 100644 --- a/openstackclient/tests/unit/network/v2/test_ip_availability.py +++ b/openstackclient/tests/unit/network/v2/test_ip_availability.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from osc_lib.cli import format_columns @@ -22,22 +21,17 @@ class TestIPAvailability(network_fakes.TestNetworkV2): - def setUp(self): - super(TestIPAvailability, self).setUp() - - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.project = identity_fakes.FakeProject.create_one_project() self.projects_mock.get.return_value = self.project class TestListIPAvailability(TestIPAvailability): - _ip_availability = network_fakes.create_ip_availability(count=3) columns = ( 'Network ID', @@ -47,20 +41,22 @@ class TestListIPAvailability(TestIPAvailability): ) data = [] for net in _ip_availability: - data.append(( - net.network_id, - net.network_name, - net.total_ips, - net.used_ips, - )) + data.append( + ( + net.network_id, + net.network_name, + net.total_ips, + net.used_ips, + ) + ) def setUp(self): - super(TestListIPAvailability, self).setUp() + super().setUp() - self.cmd = ip_availability.ListIPAvailability( - self.app, self.namespace) - self.network.network_ip_availabilities = mock.Mock( - return_value=self._ip_availability) + self.cmd = ip_availability.ListIPAvailability(self.app, None) + self.network_client.network_ip_availabilities.return_value = ( + self._ip_availability + ) def test_list_no_options(self): arglist = [] @@ -71,54 +67,51 @@ def test_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) filters = {'ip_version': 4} - self.network.network_ip_availabilities.assert_called_once_with( - **filters) + self.network_client.network_ip_availabilities.assert_called_once_with( + **filters + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_ip_version(self): arglist = [ - '--ip-version', str(4), - ] - verifylist = [ - ('ip_version', 4) + '--ip-version', + str(4), ] + verifylist = [('ip_version', 4)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) filters = {'ip_version': 4} - self.network.network_ip_availabilities.assert_called_once_with( - **filters) + self.network_client.network_ip_availabilities.assert_called_once_with( + **filters + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_project(self): - arglist = [ - '--project', self.project.name - ] - verifylist = [ - ('project', self.project.name) - ] + arglist = ['--project', self.project.name] + verifylist = [('project', self.project.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - filters = {'project_id': self.project.id, - 'ip_version': 4} + filters = {'project_id': self.project.id, 'ip_version': 4} - self.network.network_ip_availabilities.assert_called_once_with( - **filters) + self.network_client.network_ip_availabilities.assert_called_once_with( + **filters + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) class TestShowIPAvailability(TestIPAvailability): - _network = network_fakes.create_one_network() _ip_availability = network_fakes.create_one_ip_availability( - attrs={'network_id': _network.id}) + attrs={'network_id': _network.id} + ) columns = ( 'network_id', @@ -132,45 +125,47 @@ class TestShowIPAvailability(TestIPAvailability): _ip_availability.network_id, _ip_availability.network_name, _ip_availability.project_id, - format_columns.ListDictColumn( - _ip_availability.subnet_ip_availability), + format_columns.ListDictColumn(_ip_availability.subnet_ip_availability), _ip_availability.total_ips, _ip_availability.used_ips, ) def setUp(self): - super(TestShowIPAvailability, self).setUp() + super().setUp() - self.network.find_network_ip_availability = mock.Mock( - return_value=self._ip_availability) - self.network.find_network = mock.Mock( - return_value=self._network) + self.network_client.find_network_ip_availability.return_value = ( + self._ip_availability + ) + + self.network_client.find_network.return_value = self._network # Get the command object to test - self.cmd = ip_availability.ShowIPAvailability( - self.app, self.namespace) + self.cmd = ip_availability.ShowIPAvailability(self.app, None) def test_show_no_option(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ self._ip_availability.network_name, ] - verifylist = [ - ('network', self._ip_availability.network_name) - ] + verifylist = [('network', self._ip_availability.network_name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_network_ip_availability.assert_called_once_with( - self._ip_availability.network_id, - ignore_missing=False) - self.network.find_network.assert_called_once_with( - self._ip_availability.network_name, - ignore_missing=False) + self.network_client.find_network_ip_availability.assert_called_once_with( + self._ip_availability.network_id, ignore_missing=False + ) + self.network_client.find_network.assert_called_once_with( + self._ip_availability.network_name, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_l3_conntrack_helper.py b/openstackclient/tests/unit/network/v2/test_l3_conntrack_helper.py index b3d026a7ef..0769e2e561 100644 --- a/openstackclient/tests/unit/network/v2/test_l3_conntrack_helper.py +++ b/openstackclient/tests/unit/network/v2/test_l3_conntrack_helper.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from osc_lib import exceptions @@ -21,58 +20,59 @@ class TestConntrackHelper(network_fakes.TestNetworkV2): - def setUp(self): - super(TestConntrackHelper, self).setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network - self.router = network_fakes.FakeRouter.create_one_router() - self.network.find_router = mock.Mock(return_value=self.router) + super().setUp() + self.router = network_fakes.create_one_router() + self.network_client.find_router.return_value = self.router -class TestCreateL3ConntrackHelper(TestConntrackHelper): +class TestCreateL3ConntrackHelper(TestConntrackHelper): def setUp(self): - super(TestCreateL3ConntrackHelper, self).setUp() + super().setUp() attrs = {'router_id': self.router.id} self.ct_helper = ( network_fakes.FakeL3ConntrackHelper.create_one_l3_conntrack_helper( - attrs)) - self.columns = ( - 'helper', - 'id', - 'port', - 'protocol', - 'router_id' + attrs + ) ) + self.columns = ('helper', 'id', 'port', 'protocol', 'router_id') self.data = ( self.ct_helper.helper, self.ct_helper.id, self.ct_helper.port, self.ct_helper.protocol, - self.ct_helper.router_id + self.ct_helper.router_id, + ) + self.network_client.create_conntrack_helper.return_value = ( + self.ct_helper ) - self.network.create_conntrack_helper = mock.Mock( - return_value=self.ct_helper) # Get the command object to test - self.cmd = l3_conntrack_helper.CreateConntrackHelper(self.app, - self.namespace) + self.cmd = l3_conntrack_helper.CreateConntrackHelper(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--helper', 'tftp', - '--protocol', 'udp', - '--port', '69', + '--helper', + 'tftp', + '--protocol', + 'udp', + '--port', + '69', self.router.id, ] @@ -83,85 +83,83 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_conntrack_helper.assert_called_once_with( - self.router.id, - **{'helper': 'tftp', 'protocol': 'udp', - 'port': 69} + self.network_client.create_conntrack_helper.assert_called_once_with( + self.router.id, **{'helper': 'tftp', 'protocol': 'udp', 'port': 69} ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_wrong_options(self): arglist = [ - '--protocol', 'udp', - '--port', '69', + '--protocol', + 'udp', + '--port', + '69', self.router.id, ] self.assertRaises( tests_utils.ParserException, self.check_parser, - self.cmd, arglist, None) + self.cmd, + arglist, + None, + ) class TestDeleteL3ConntrackHelper(TestConntrackHelper): - def setUp(self): - super(TestDeleteL3ConntrackHelper, self).setUp() + super().setUp() attrs = {'router_id': self.router.id} self.ct_helper = ( network_fakes.FakeL3ConntrackHelper.create_one_l3_conntrack_helper( - attrs)) - self.network.delete_conntrack_helper = mock.Mock( - return_value=None) + attrs + ) + ) + self.network_client.delete_conntrack_helper.return_value = None # Get the command object to test - self.cmd = l3_conntrack_helper.DeleteConntrackHelper(self.app, - self.namespace) + self.cmd = l3_conntrack_helper.DeleteConntrackHelper(self.app, None) def test_delete(self): - arglist = [ - self.ct_helper.router_id, - self.ct_helper.id - ] + arglist = [self.ct_helper.router_id, self.ct_helper.id] verifylist = [ ('conntrack_helper_id', [self.ct_helper.id]), ('router', self.ct_helper.router_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_conntrack_helper.assert_called_once_with( - self.ct_helper.id, self.router.id, - ignore_missing=False) + self.network_client.delete_conntrack_helper.assert_called_once_with( + self.ct_helper.id, self.router.id, ignore_missing=False + ) self.assertIsNone(result) def test_delete_error(self): - arglist = [ - self.router.id, - self.ct_helper.id - ] + arglist = [self.router.id, self.ct_helper.id] verifylist = [ ('conntrack_helper_id', [self.ct_helper.id]), ('router', self.router.id), ] - self.network.delete_conntrack_helper.side_effect = Exception( - 'Error message') + self.network_client.delete_conntrack_helper.side_effect = Exception( + 'Error message' + ) parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestListL3ConntrackHelper(TestConntrackHelper): - def setUp(self): - super(TestListL3ConntrackHelper, self).setUp() + super().setUp() attrs = {'router_id': self.router.id} ct_helpers = ( network_fakes.FakeL3ConntrackHelper.create_l3_conntrack_helpers( - attrs, count=3)) + attrs, count=3 + ) + ) self.columns = ( 'ID', 'Router ID', @@ -171,24 +169,22 @@ def setUp(self): ) self.data = [] for ct_helper in ct_helpers: - self.data.append(( - ct_helper.id, - ct_helper.router_id, - ct_helper.helper, - ct_helper.protocol, - ct_helper.port, - )) - self.network.conntrack_helpers = mock.Mock( - return_value=ct_helpers) + self.data.append( + ( + ct_helper.id, + ct_helper.router_id, + ct_helper.helper, + ct_helper.protocol, + ct_helper.port, + ) + ) + self.network_client.conntrack_helpers.return_value = ct_helpers # Get the command object to test - self.cmd = l3_conntrack_helper.ListConntrackHelper(self.app, - self.namespace) + self.cmd = l3_conntrack_helper.ListConntrackHelper(self.app, None) def test_conntrack_helpers_list(self): - arglist = [ - self.router.id - ] + arglist = [self.router.id] verifylist = [ ('router', self.router.id), ] @@ -196,8 +192,9 @@ def test_conntrack_helpers_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.conntrack_helpers.assert_called_once_with( - self.router.id) + self.network_client.conntrack_helpers.assert_called_once_with( + self.router.id + ) self.assertEqual(self.columns, columns) list_data = list(data) self.assertEqual(len(self.data), len(list_data)) @@ -206,18 +203,18 @@ def test_conntrack_helpers_list(self): class TestSetL3ConntrackHelper(TestConntrackHelper): - def setUp(self): - super(TestSetL3ConntrackHelper, self).setUp() + super().setUp() attrs = {'router_id': self.router.id} self.ct_helper = ( network_fakes.FakeL3ConntrackHelper.create_one_l3_conntrack_helper( - attrs)) - self.network.update_conntrack_helper = mock.Mock(return_value=None) + attrs + ) + ) + self.network_client.update_conntrack_helper.return_value = None # Get the command object to test - self.cmd = l3_conntrack_helper.SetConntrackHelper(self.app, - self.namespace) + self.cmd = l3_conntrack_helper.SetConntrackHelper(self.app, None) def test_set_nothing(self): arglist = [ @@ -230,9 +227,9 @@ def test_set_nothing(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = (self.cmd.take_action(parsed_args)) + result = self.cmd.take_action(parsed_args) - self.network.update_conntrack_helper.assert_called_once_with( + self.network_client.update_conntrack_helper.assert_called_once_with( self.ct_helper.id, self.router.id ) self.assertIsNone(result) @@ -241,7 +238,8 @@ def test_set_port(self): arglist = [ self.router.id, self.ct_helper.id, - '--port', '124', + '--port', + '124', ] verifylist = [ ('router', self.router.id), @@ -250,51 +248,49 @@ def test_set_port(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = (self.cmd.take_action(parsed_args)) + result = self.cmd.take_action(parsed_args) - self.network.update_conntrack_helper.assert_called_once_with( + self.network_client.update_conntrack_helper.assert_called_once_with( self.ct_helper.id, self.router.id, port=124 ) self.assertIsNone(result) class TestShowL3ConntrackHelper(TestConntrackHelper): - def setUp(self): - super(TestShowL3ConntrackHelper, self).setUp() + super().setUp() attrs = {'router_id': self.router.id} self.ct_helper = ( network_fakes.FakeL3ConntrackHelper.create_one_l3_conntrack_helper( - attrs)) - self.columns = ( - 'helper', - 'id', - 'port', - 'protocol', - 'router_id' + attrs + ) ) + self.columns = ('helper', 'id', 'port', 'protocol', 'router_id') self.data = ( self.ct_helper.helper, self.ct_helper.id, self.ct_helper.port, self.ct_helper.protocol, - self.ct_helper.router_id + self.ct_helper.router_id, ) - self.network.get_conntrack_helper = mock.Mock( - return_value=self.ct_helper) + self.network_client.get_conntrack_helper.return_value = self.ct_helper # Get the command object to test - self.cmd = l3_conntrack_helper.ShowConntrackHelper(self.app, - self.namespace) + self.cmd = l3_conntrack_helper.ShowConntrackHelper(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_default_options(self): arglist = [ @@ -307,9 +303,9 @@ def test_show_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.get_conntrack_helper.assert_called_once_with( + self.network_client.get_conntrack_helper.assert_called_once_with( self.ct_helper.id, self.router.id ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_local_ip.py b/openstackclient/tests/unit/network/v2/test_local_ip.py index be23365e08..585fec767c 100644 --- a/openstackclient/tests/unit/network/v2/test_local_ip.py +++ b/openstackclient/tests/unit/network/v2/test_local_ip.py @@ -13,7 +13,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -25,16 +24,13 @@ class TestLocalIP(network_fakes.TestNetworkV2): - def setUp(self): super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateLocalIP(TestLocalIP): @@ -44,9 +40,12 @@ class TestCreateLocalIP(TestLocalIP): port = network_fakes.create_one_port() # The new local ip created. new_local_ip = network_fakes.create_one_local_ip( - attrs={'project_id': project.id, - 'network_id': local_ip_network.id, - 'local_port_id': port.id}) + attrs={ + 'project_id': project.id, + 'network_id': local_ip_network.id, + 'local_port_id': port.id, + } + ) columns = ( 'created_at', @@ -77,36 +76,42 @@ class TestCreateLocalIP(TestLocalIP): def setUp(self): super().setUp() - self.network.create_local_ip = mock.Mock( - return_value=self.new_local_ip) - self.network.find_network = mock.Mock( - return_value=self.local_ip_network) - self.network.find_port = mock.Mock( - return_value=self.port) + self.network_client.create_local_ip.return_value = self.new_local_ip + + self.network_client.find_network.return_value = self.local_ip_network + + self.network_client.find_port.return_value = self.port # Get the command object to test - self.cmd = local_ip.CreateLocalIP(self.app, self.namespace) + self.cmd = local_ip.CreateLocalIP(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain def test_create_no_options(self): parsed_args = self.check_parser(self.cmd, [], []) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_local_ip.assert_called_once_with(**{}) + self.network_client.create_local_ip.assert_called_once_with(**{}) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) def test_create_all_options(self): arglist = [ - '--project-domain', self.domain.name, - '--description', self.new_local_ip.description, - '--name', self.new_local_ip.name, - '--network', self.new_local_ip.network_id, - '--local-port', self.new_local_ip.local_port_id, - '--local-ip-address', '10.0.0.1', - '--ip-mode', self.new_local_ip.ip_mode, + '--project-domain', + self.domain.name, + '--description', + self.new_local_ip.description, + '--name', + self.new_local_ip.name, + '--network', + self.new_local_ip.network_id, + '--local-port', + self.new_local_ip.local_port_id, + '--local-ip-address', + '10.0.0.1', + '--ip-mode', + self.new_local_ip.ip_mode, ] verifylist = [ ('project_domain', self.domain.name), @@ -119,16 +124,18 @@ def test_create_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) - - self.network.create_local_ip.assert_called_once_with(**{ - 'name': self.new_local_ip.name, - 'description': self.new_local_ip.description, - 'network_id': self.new_local_ip.network_id, - 'local_port_id': self.new_local_ip.local_port_id, - 'local_ip_address': '10.0.0.1', - 'ip_mode': self.new_local_ip.ip_mode, - }) + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_local_ip.assert_called_once_with( + **{ + 'name': self.new_local_ip.name, + 'description': self.new_local_ip.description, + 'network_id': self.new_local_ip.network_id, + 'local_port_id': self.new_local_ip.local_port_id, + 'local_ip_address': '10.0.0.1', + 'ip_mode': self.new_local_ip.ip_mode, + } + ) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) @@ -139,12 +146,13 @@ class TestDeleteLocalIP(TestLocalIP): def setUp(self): super().setUp() - self.network.delete_local_ip = mock.Mock(return_value=None) - self.network.find_local_ip = network_fakes.get_local_ips( - local_ips=self._local_ips) + self.network_client.delete_local_ip.return_value = None + self.network_client.find_local_ip = network_fakes.get_local_ips( + local_ips=self._local_ips + ) # Get the command object to test - self.cmd = local_ip.DeleteLocalIP(self.app, self.namespace) + self.cmd = local_ip.DeleteLocalIP(self.app, None) def test_local_ip_delete(self): arglist = [ @@ -157,10 +165,12 @@ def test_local_ip_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_local_ip.assert_called_once_with( - self._local_ips[0].name, ignore_missing=False) - self.network.delete_local_ip.assert_called_once_with( - self._local_ips[0]) + self.network_client.find_local_ip.assert_called_once_with( + self._local_ips[0].name, ignore_missing=False + ) + self.network_client.delete_local_ip.assert_called_once_with( + self._local_ips[0] + ) self.assertIsNone(result) def test_multi_local_ips_delete(self): @@ -178,7 +188,7 @@ def test_multi_local_ips_delete(self): calls = [] for a in self._local_ips: calls.append(call(a)) - self.network.delete_local_ip.assert_has_calls(calls) + self.network_client.delete_local_ip.assert_has_calls(calls) self.assertIsNone(result) def test_multi_local_ips_delete_with_exception(self): @@ -187,15 +197,12 @@ def test_multi_local_ips_delete_with_exception(self): 'unexist_local_ip', ] verifylist = [ - ('local_ip', - [self._local_ips[0].name, 'unexist_local_ip']), + ('local_ip', [self._local_ips[0].name, 'unexist_local_ip']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._local_ips[0], exceptions.CommandError] - self.network.find_local_ip = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_local_ip.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -203,22 +210,21 @@ def test_multi_local_ips_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 local IPs failed to delete.', str(e)) - self.network.find_local_ip.assert_any_call( - self._local_ips[0].name, ignore_missing=False) - self.network.find_local_ip.assert_any_call( - 'unexist_local_ip', ignore_missing=False) - self.network.delete_local_ip.assert_called_once_with( + self.network_client.find_local_ip.assert_any_call( + self._local_ips[0].name, ignore_missing=False + ) + self.network_client.find_local_ip.assert_any_call( + 'unexist_local_ip', ignore_missing=False + ) + self.network_client.delete_local_ip.assert_called_once_with( self._local_ips[0] ) class TestListLocalIP(TestLocalIP): # The local ip to list up. - local_ips = ( - network_fakes.create_local_ips(count=3)) - fake_network = network_fakes.create_one_network( - {'id': 'fake_network_id'} - ) + local_ips = network_fakes.create_local_ips(count=3) + fake_network = network_fakes.create_one_network({'id': 'fake_network_id'}) columns = ( 'ID', @@ -232,27 +238,26 @@ class TestListLocalIP(TestLocalIP): ) data = [] for lip in local_ips: - data.append(( - lip.id, - lip.name, - lip.description, - lip.project_id, - lip.local_port_id, - lip.network_id, - lip.local_ip_address, - lip.ip_mode, - )) + data.append( + ( + lip.id, + lip.name, + lip.description, + lip.project_id, + lip.local_port_id, + lip.network_id, + lip.local_ip_address, + lip.ip_mode, + ) + ) def setUp(self): super().setUp() - self.network.local_ips = mock.Mock( - return_value=self.local_ips) - self.network.find_network = mock.Mock( - return_value=self.fake_network - ) + self.network_client.local_ips.return_value = self.local_ips + self.network_client.find_network.return_value = self.fake_network # Get the command object to test - self.cmd = local_ip.ListLocalIP(self.app, self.namespace) + self.cmd = local_ip.ListLocalIP(self.app, None) def test_local_ip_list(self): arglist = [] @@ -261,13 +266,14 @@ def test_local_ip_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.local_ips.assert_called_once_with(**{}) + self.network_client.local_ips.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_local_ip_list_name(self): arglist = [ - '--name', self.local_ips[0].name, + '--name', + self.local_ips[0].name, ] verifylist = [ ('name', self.local_ips[0].name), @@ -275,8 +281,9 @@ def test_local_ip_list_name(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.local_ips.assert_called_once_with( - **{'name': self.local_ips[0].name}) + self.network_client.local_ips.assert_called_once_with( + **{'name': self.local_ips[0].name} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -284,7 +291,8 @@ def test_local_ip_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -292,8 +300,9 @@ def test_local_ip_list_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.local_ips.assert_called_once_with( - **{'project_id': project.id}) + self.network_client.local_ips.assert_called_once_with( + **{'project_id': project.id} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -301,8 +310,10 @@ def test_local_ip_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -312,13 +323,14 @@ def test_local_ip_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.local_ips.assert_called_once_with(**filters) + self.network_client.local_ips.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_local_ip_list_network(self): arglist = [ - '--network', 'fake_network_id', + '--network', + 'fake_network_id', ] verifylist = [ ('network', 'fake_network_id'), @@ -327,16 +339,19 @@ def test_local_ip_list_network(self): columns, data = self.cmd.take_action(parsed_args) - self.network.local_ips.assert_called_once_with(**{ - 'network_id': 'fake_network_id', - }) + self.network_client.local_ips.assert_called_once_with( + **{ + 'network_id': 'fake_network_id', + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_local_ip_list_local_ip_address(self): arglist = [ - '--local-ip-address', self.local_ips[0].local_ip_address, + '--local-ip-address', + self.local_ips[0].local_ip_address, ] verifylist = [ ('local_ip_address', self.local_ips[0].local_ip_address), @@ -345,15 +360,18 @@ def test_local_ip_list_local_ip_address(self): columns, data = self.cmd.take_action(parsed_args) - self.network.local_ips.assert_called_once_with(**{ - 'local_ip_address': self.local_ips[0].local_ip_address, - }) + self.network_client.local_ips.assert_called_once_with( + **{ + 'local_ip_address': self.local_ips[0].local_ip_address, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_local_ip_list_ip_mode(self): arglist = [ - '--ip-mode', self.local_ips[0].ip_mode, + '--ip-mode', + self.local_ips[0].ip_mode, ] verifylist = [ ('ip_mode', self.local_ips[0].ip_mode), @@ -362,9 +380,11 @@ def test_local_ip_list_ip_mode(self): columns, data = self.cmd.take_action(parsed_args) - self.network.local_ips.assert_called_once_with(**{ - 'ip_mode': self.local_ips[0].ip_mode, - }) + self.network_client.local_ips.assert_called_once_with( + **{ + 'ip_mode': self.local_ips[0].ip_mode, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -375,15 +395,16 @@ class TestSetLocalIP(TestLocalIP): def setUp(self): super().setUp() - self.network.update_local_ip = mock.Mock(return_value=None) - self.network.find_local_ip = mock.Mock( - return_value=self._local_ip) + self.network_client.update_local_ip.return_value = None + self.network_client.find_local_ip.return_value = self._local_ip # Get the command object to test - self.cmd = local_ip.SetLocalIP(self.app, self.namespace) + self.cmd = local_ip.SetLocalIP(self.app, None) def test_set_nothing(self): - arglist = [self._local_ip.name, ] + arglist = [ + self._local_ip.name, + ] verifylist = [ ('local_ip', self._local_ip.name), ] @@ -391,13 +412,15 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_local_ip.assert_not_called() + self.network_client.update_local_ip.assert_not_called() self.assertIsNone(result) def test_set_name_and_description(self): arglist = [ - '--name', 'new_local_ip_name', - '--description', 'new_local_ip_description', + '--name', + 'new_local_ip_name', + '--description', + 'new_local_ip_description', self._local_ip.name, ] verifylist = [ @@ -412,8 +435,9 @@ def test_set_name_and_description(self): 'name': "new_local_ip_name", 'description': 'new_local_ip_description', } - self.network.update_local_ip.assert_called_with( - self._local_ip, **attrs) + self.network_client.update_local_ip.assert_called_with( + self._local_ip, **attrs + ) self.assertIsNone(result) @@ -449,19 +473,23 @@ class TestShowLocalIP(TestLocalIP): def setUp(self): super().setUp() - self.network.find_local_ip = mock.Mock( - return_value=self._local_ip) + self.network_client.find_local_ip.return_value = self._local_ip # Get the command object to test - self.cmd = local_ip.ShowLocalIP(self.app, self.namespace) + self.cmd = local_ip.ShowLocalIP(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -474,7 +502,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_local_ip.assert_called_once_with( - self._local_ip.name, ignore_missing=False) + self.network_client.find_local_ip.assert_called_once_with( + self._local_ip.name, ignore_missing=False + ) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, list(data)) diff --git a/openstackclient/tests/unit/network/v2/test_local_ip_association.py b/openstackclient/tests/unit/network/v2/test_local_ip_association.py index 0e45374122..9efdc295f1 100644 --- a/openstackclient/tests/unit/network/v2/test_local_ip_association.py +++ b/openstackclient/tests/unit/network/v2/test_local_ip_association.py @@ -24,18 +24,15 @@ class TestLocalIPAssociation(network_fakes.TestNetworkV2): - def setUp(self): super().setUp() - self.network = self.app.client_manager.network self.local_ip = network_fakes.create_one_local_ip() self.fixed_port = network_fakes.create_one_port() self.project = identity_fakes_v2.FakeProject.create_one_project() - self.network.find_port = mock.Mock(return_value=self.fixed_port) + self.network_client.find_port.return_value = self.fixed_port class TestCreateLocalIPAssociation(TestLocalIPAssociation): - def setUp(self): super().setUp() self.new_local_ip_association = ( @@ -46,16 +43,16 @@ def setUp(self): } ) ) - self.network.create_local_ip_association = mock.Mock( - return_value=self.new_local_ip_association) - - self.network.find_local_ip = mock.Mock( - return_value=self.local_ip + self.network_client.create_local_ip_association.return_value = ( + self.new_local_ip_association ) + self.network_client.find_local_ip.return_value = self.local_ip + # Get the command object to test self.cmd = local_ip_association.CreateLocalIPAssociation( - self.app, self.namespace) + self.app, None + ) self.columns = ( 'local_ip_address', @@ -83,13 +80,12 @@ def test_create_no_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_local_ip_association.\ - assert_called_once_with( - self.new_local_ip_association.local_ip_id, - **{ - 'fixed_port_id': - self.new_local_ip_association.fixed_port_id, - }) + self.network_client.create_local_ip_association.assert_called_once_with( + self.new_local_ip_association.local_ip_id, + **{ + 'fixed_port_id': self.new_local_ip_association.fixed_port_id, + }, + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(data)) @@ -97,7 +93,8 @@ def test_create_all_options(self): arglist = [ self.new_local_ip_association.local_ip_id, self.new_local_ip_association.fixed_port_id, - '--fixed-ip', self.new_local_ip_association.fixed_ip, + '--fixed-ip', + self.new_local_ip_association.fixed_ip, ] verifylist = [ ('local_ip', self.new_local_ip_association.local_ip_id), @@ -107,40 +104,36 @@ def test_create_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_local_ip_association.\ - assert_called_once_with( - self.new_local_ip_association.local_ip_id, - **{ - 'fixed_port_id': - self.new_local_ip_association.fixed_port_id, - 'fixed_ip': - self.new_local_ip_association.fixed_ip, - }) + self.network_client.create_local_ip_association.assert_called_once_with( + self.new_local_ip_association.local_ip_id, + **{ + 'fixed_port_id': self.new_local_ip_association.fixed_port_id, + 'fixed_ip': self.new_local_ip_association.fixed_ip, + }, + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(data)) class TestDeleteLocalIPAssociation(TestLocalIPAssociation): - def setUp(self): super().setUp() self._local_ip_association = ( network_fakes.create_local_ip_associations( - count=2, attrs={ + count=2, + attrs={ 'local_ip_id': self.local_ip.id, - } + }, ) ) - self.network.delete_local_ip_association = mock.Mock( - return_value=None - ) + self.network_client.delete_local_ip_association.return_value = None + + self.network_client.find_local_ip.return_value = self.local_ip - self.network.find_local_ip = mock.Mock( - return_value=self.local_ip - ) # Get the command object to test self.cmd = local_ip_association.DeleteLocalIPAssociation( - self.app, self.namespace) + self.app, None + ) def test_local_ip_association_delete(self): arglist = [ @@ -156,12 +149,11 @@ def test_local_ip_association_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_local_ip_association.\ - assert_called_once_with( - self.local_ip.id, - self._local_ip_association[0].fixed_port_id, - ignore_missing=False - ) + self.network_client.delete_local_ip_association.assert_called_once_with( + self.local_ip.id, + self._local_ip_association[0].fixed_port_id, + ignore_missing=False, + ) self.assertIsNone(result) @@ -185,10 +177,11 @@ def test_multi_local_ip_associations_delete(self): calls = [] for a in self._local_ip_association: - calls.append(call(a.local_ip_id, a.fixed_port_id, - ignore_missing=False)) + calls.append( + call(a.local_ip_id, a.fixed_port_id, ignore_missing=False) + ) - self.network.delete_local_ip_association.assert_has_calls(calls) + self.network_client.delete_local_ip_association.assert_has_calls(calls) self.assertIsNone(result) def test_multi_local_ip_association_delete_with_exception(self): @@ -199,16 +192,20 @@ def test_multi_local_ip_association_delete_with_exception(self): ] verifylist = [ ('local_ip', self.local_ip.id), - ('fixed_port_id', - [self._local_ip_association[0].fixed_port_id, - 'unexist_fixed_port_id']), + ( + 'fixed_port_id', + [ + self._local_ip_association[0].fixed_port_id, + 'unexist_fixed_port_id', + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) delete_mock_result = [None, exceptions.CommandError] - self.network.delete_local_ip_association = ( - mock.MagicMock(side_effect=delete_mock_result) + self.network_client.delete_local_ip_association = mock.MagicMock( + side_effect=delete_mock_result ) try: @@ -216,92 +213,82 @@ def test_multi_local_ip_association_delete_with_exception(self): self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual( - '1 of 2 Local IP Associations failed to delete.', - str(e) + '1 of 2 Local IP Associations failed to delete.', str(e) ) - self.network.delete_local_ip_association.\ - assert_any_call( - self.local_ip.id, - 'unexist_fixed_port_id', - ignore_missing=False - ) - self.network.delete_local_ip_association.\ - assert_any_call( - self.local_ip.id, - self._local_ip_association[0].fixed_port_id, - ignore_missing=False - ) + self.network_client.delete_local_ip_association.assert_any_call( + self.local_ip.id, 'unexist_fixed_port_id', ignore_missing=False + ) + self.network_client.delete_local_ip_association.assert_any_call( + self.local_ip.id, + self._local_ip_association[0].fixed_port_id, + ignore_missing=False, + ) class TestListLocalIPAssociation(TestLocalIPAssociation): - columns = ( 'Local IP ID', 'Local IP Address', 'Fixed port ID', 'Fixed IP', - 'Host' + 'Host', ) def setUp(self): super().setUp() self.local_ip_associations = ( network_fakes.create_local_ip_associations( - count=3, attrs={ + count=3, + attrs={ 'local_ip_id': self.local_ip.id, 'fixed_port_id': self.fixed_port.id, - } + }, ) ) self.data = [] for lip_assoc in self.local_ip_associations: - self.data.append(( - lip_assoc.local_ip_id, - lip_assoc.local_ip_address, - lip_assoc.fixed_port_id, - lip_assoc.fixed_ip, - lip_assoc.host, - )) - self.network.local_ip_associations = mock.Mock( - return_value=self.local_ip_associations - ) - self.network.find_local_ip = mock.Mock( - return_value=self.local_ip - ) - self.network.find_port = mock.Mock( - return_value=self.fixed_port + self.data.append( + ( + lip_assoc.local_ip_id, + lip_assoc.local_ip_address, + lip_assoc.fixed_port_id, + lip_assoc.fixed_ip, + lip_assoc.host, + ) + ) + self.network_client.local_ip_associations.return_value = ( + self.local_ip_associations ) + + self.network_client.find_local_ip.return_value = self.local_ip + + self.network_client.find_port.return_value = self.fixed_port # Get the command object to test - self.cmd = local_ip_association.ListLocalIPAssociation( - self.app, - self.namespace - ) + self.cmd = local_ip_association.ListLocalIPAssociation(self.app, None) def test_local_ip_association_list(self): - arglist = [ - self.local_ip.id - ] - verifylist = [ - ('local_ip', self.local_ip.id) - ] + arglist = [self.local_ip.id] + verifylist = [('local_ip', self.local_ip.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.local_ip_associations.assert_called_once_with( - self.local_ip, - **{} + self.network_client.local_ip_associations.assert_called_once_with( + self.local_ip, **{} ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(list(data))) def test_local_ip_association_list_all_options(self): arglist = [ - '--fixed-port', self.local_ip_associations[0].fixed_port_id, - '--fixed-ip', self.local_ip_associations[0].fixed_ip, - '--host', self.local_ip_associations[0].host, - self.local_ip_associations[0].local_ip_id + '--fixed-port', + self.local_ip_associations[0].fixed_port_id, + '--fixed-ip', + self.local_ip_associations[0].fixed_ip, + '--host', + self.local_ip_associations[0].host, + self.local_ip_associations[0].local_ip_id, ] verifylist = [ @@ -320,9 +307,8 @@ def test_local_ip_association_list_all_options(self): 'host': self.local_ip_associations[0].host, } - self.network.local_ip_associations.assert_called_once_with( - self.local_ip, - **attrs + self.network_client.local_ip_associations.assert_called_once_with( + self.local_ip, **attrs ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(list(data))) diff --git a/openstackclient/tests/unit/network/v2/test_ndp_proxy.py b/openstackclient/tests/unit/network/v2/test_ndp_proxy.py index 48c5deb224..0fe8740da5 100644 --- a/openstackclient/tests/unit/network/v2/test_ndp_proxy.py +++ b/openstackclient/tests/unit/network/v2/test_ndp_proxy.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -23,29 +22,24 @@ class TestNDPProxy(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNDPProxy, self).setUp() + super().setUp() # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains - # Get a shortcut to the network client - self.network = self.app.client_manager.network - self.router = network_fakes.FakeRouter.create_one_router( - {'id': 'fake-router-id'}) - self.network.find_router = mock.Mock(return_value=self.router) + self.domains_mock = self.identity_client.domains + + self.router = network_fakes.create_one_router({'id': 'fake-router-id'}) + self.network_client.find_router.return_value = self.router self.port = network_fakes.create_one_port() - self.network.find_port = mock.Mock(return_value=self.port) + self.network_client.find_port.return_value = self.port class TestCreateNDPProxy(TestNDPProxy): def setUp(self): - super(TestCreateNDPProxy, self).setUp() + super().setUp() attrs = {'router_id': self.router.id, 'port_id': self.port.id} - self.ndp_proxy = ( - network_fakes.create_one_ndp_proxy( - attrs)) + self.ndp_proxy = network_fakes.create_one_ndp_proxy(attrs) self.columns = ( 'created_at', 'description', @@ -56,7 +50,8 @@ def setUp(self): 'project_id', 'revision_number', 'router_id', - 'updated_at') + 'updated_at', + ) self.data = ( self.ndp_proxy.created_at, @@ -68,29 +63,37 @@ def setUp(self): self.ndp_proxy.project_id, self.ndp_proxy.revision_number, self.ndp_proxy.router_id, - self.ndp_proxy.updated_at + self.ndp_proxy.updated_at, ) - self.network.create_ndp_proxy = mock.Mock( - return_value=self.ndp_proxy) + self.network_client.create_ndp_proxy.return_value = self.ndp_proxy # Get the command object to test - self.cmd = ndp_proxy.CreateNDPProxy(self.app, self.namespace) + self.cmd = ndp_proxy.CreateNDPProxy(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_all_options(self): arglist = [ self.ndp_proxy.router_id, - '--name', self.ndp_proxy.name, - '--port', self.ndp_proxy.port_id, - '--ip-address', self.ndp_proxy.ip_address, - '--description', self.ndp_proxy.description, + '--name', + self.ndp_proxy.name, + '--port', + self.ndp_proxy.port_id, + '--ip-address', + self.ndp_proxy.ip_address, + '--description', + self.ndp_proxy.description, ] verifylist = [ ('name', self.ndp_proxy.name), @@ -102,57 +105,53 @@ def test_create_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_ndp_proxy.assert_called_once_with( - **{'name': self.ndp_proxy.name, - 'router_id': self.ndp_proxy.router_id, - 'ip_address': self.ndp_proxy.ip_address, - 'port_id': self.ndp_proxy.port_id, - 'description': self.ndp_proxy.description}) + self.network_client.create_ndp_proxy.assert_called_once_with( + **{ + 'name': self.ndp_proxy.name, + 'router_id': self.ndp_proxy.router_id, + 'ip_address': self.ndp_proxy.ip_address, + 'port_id': self.ndp_proxy.port_id, + 'description': self.ndp_proxy.description, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteNDPProxy(TestNDPProxy): - def setUp(self): - super(TestDeleteNDPProxy, self).setUp() + super().setUp() attrs = {'router_id': self.router.id, 'port_id': self.port.id} - self.ndp_proxies = ( - network_fakes.create_ndp_proxies(attrs)) + self.ndp_proxies = network_fakes.create_ndp_proxies(attrs) self.ndp_proxy = self.ndp_proxies[0] - self.network.delete_ndp_proxy = mock.Mock( - return_value=None) - self.network.find_ndp_proxy = mock.Mock( - return_value=self.ndp_proxy) + self.network_client.delete_ndp_proxy.return_value = None + self.network_client.find_ndp_proxy.return_value = self.ndp_proxy # Get the command object to test - self.cmd = ndp_proxy.DeleteNDPProxy(self.app, self.namespace) + self.cmd = ndp_proxy.DeleteNDPProxy(self.app, None) def test_delete(self): - arglist = [ - self.ndp_proxy.id - ] - verifylist = [ - ('ndp_proxy', [self.ndp_proxy.id]) - ] + arglist = [self.ndp_proxy.id] + verifylist = [('ndp_proxy', [self.ndp_proxy.id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_ndp_proxy.assert_called_once_with(self.ndp_proxy) + self.network_client.delete_ndp_proxy.assert_called_once_with( + self.ndp_proxy + ) self.assertIsNone(result) def test_delete_error(self): arglist = [ self.ndp_proxy.id, ] - verifylist = [ - ('ndp_proxy', [self.ndp_proxy.id]) - ] - self.network.delete_ndp_proxy.side_effect = Exception( - 'Error message') + verifylist = [('ndp_proxy', [self.ndp_proxy.id])] + self.network_client.delete_ndp_proxy.side_effect = Exception( + 'Error message' + ) parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_multi_ndp_proxies_delete(self): arglist = [] @@ -169,18 +168,17 @@ def test_multi_ndp_proxies_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_ndp_proxy.assert_has_calls( - [call(self.ndp_proxy), call(self.ndp_proxy)]) + self.network_client.delete_ndp_proxy.assert_has_calls( + [call(self.ndp_proxy), call(self.ndp_proxy)] + ) self.assertIsNone(result) class TestListNDPProxy(TestNDPProxy): - def setUp(self): - super(TestListNDPProxy, self).setUp() + super().setUp() attrs = {'router_id': self.router.id, 'port_id': self.port.id} - ndp_proxies = ( - network_fakes.create_ndp_proxies(attrs, count=3)) + ndp_proxies = network_fakes.create_ndp_proxies(attrs, count=3) self.columns = ( 'ID', 'Name', @@ -190,19 +188,20 @@ def setUp(self): ) self.data = [] for np in ndp_proxies: - self.data.append(( - np.id, - np.name, - np.router_id, - np.ip_address, - np.project_id, - )) - - self.network.ndp_proxies = mock.Mock( - return_value=ndp_proxies) + self.data.append( + ( + np.id, + np.name, + np.router_id, + np.ip_address, + np.project_id, + ) + ) + + self.network_client.ndp_proxies.return_value = ndp_proxies # Get the command object to test - self.cmd = ndp_proxy.ListNDPProxy(self.app, self.namespace) + self.cmd = ndp_proxy.ListNDPProxy(self.app, None) def test_ndp_proxy_list(self): arglist = [] @@ -211,7 +210,7 @@ def test_ndp_proxy_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ndp_proxies.assert_called_once_with() + self.network_client.ndp_proxies.assert_called_once_with() self.assertEqual(self.columns, columns) list_data = list(data) self.assertEqual(len(self.data), len(list_data)) @@ -220,73 +219,73 @@ def test_ndp_proxy_list(self): def test_ndp_proxy_list_router(self): arglist = [ - '--router', 'fake-router-name', + '--router', + 'fake-router-name', ] - verifylist = [ - ('router', 'fake-router-name') - ] + verifylist = [('router', 'fake-router-name')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ndp_proxies.assert_called_once_with(**{ - 'router_id': 'fake-router-id'}) + self.network_client.ndp_proxies.assert_called_once_with( + **{'router_id': 'fake-router-id'} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_ndp_proxy_list_port(self): arglist = [ - '--port', self.port.id, + '--port', + self.port.id, ] - verifylist = [ - ('port', self.port.id) - ] + verifylist = [('port', self.port.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ndp_proxies.assert_called_once_with(**{ - 'port_id': self.port.id}) + self.network_client.ndp_proxies.assert_called_once_with( + **{'port_id': self.port.id} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_ndp_proxy_list_name(self): arglist = [ - '--name', 'fake-ndp-proxy-name', + '--name', + 'fake-ndp-proxy-name', ] - verifylist = [ - ('name', 'fake-ndp-proxy-name') - ] + verifylist = [('name', 'fake-ndp-proxy-name')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ndp_proxies.assert_called_once_with(**{ - 'name': 'fake-ndp-proxy-name'}) + self.network_client.ndp_proxies.assert_called_once_with( + **{'name': 'fake-ndp-proxy-name'} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_ndp_proxy_list_ip_address(self): arglist = [ - '--ip-address', '2001::1:2', + '--ip-address', + '2001::1:2', ] - verifylist = [ - ('ip_address', '2001::1:2') - ] + verifylist = [('ip_address', '2001::1:2')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ndp_proxies.assert_called_once_with(**{ - 'ip_address': '2001::1:2'}) + self.network_client.ndp_proxies.assert_called_once_with( + **{'ip_address': '2001::1:2'} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -294,7 +293,8 @@ def test_ndp_proxy_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -302,17 +302,20 @@ def test_ndp_proxy_list_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ndp_proxies.assert_called_once_with( - **{'project_id': project.id}) + self.network_client.ndp_proxies.assert_called_once_with( + **{'project_id': project.id} + ) self.assertEqual(self.columns, columns) - self.assertItemsEqual(self.data, list(data)) + self.assertCountEqual(self.data, list(data)) def test_ndp_proxy_list_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -322,24 +325,21 @@ def test_ndp_proxy_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.ndp_proxies.assert_called_once_with(**filters) + self.network_client.ndp_proxies.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) - self.assertItemsEqual(self.data, list(data)) + self.assertCountEqual(self.data, list(data)) class TestSetNDPProxy(TestNDPProxy): - def setUp(self): - super(TestSetNDPProxy, self).setUp() + super().setUp() attrs = {'router_id': self.router.id, 'port_id': self.port.id} - self.ndp_proxy = ( - network_fakes.create_one_ndp_proxy(attrs)) - self.network.update_ndp_proxy = mock.Mock(return_value=None) - self.network.find_ndp_proxy = mock.Mock( - return_value=self.ndp_proxy) + self.ndp_proxy = network_fakes.create_one_ndp_proxy(attrs) + self.network_client.update_ndp_proxy.return_value = None + self.network_client.find_ndp_proxy.return_value = self.ndp_proxy # Get the command object to test - self.cmd = ndp_proxy.SetNDPProxy(self.app, self.namespace) + self.cmd = ndp_proxy.SetNDPProxy(self.app, None) def test_set_nothing(self): arglist = [ @@ -350,16 +350,18 @@ def test_set_nothing(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = (self.cmd.take_action(parsed_args)) + result = self.cmd.take_action(parsed_args) - self.network.update_ndp_proxy.assert_called_once_with( - self.ndp_proxy) + self.network_client.update_ndp_proxy.assert_called_once_with( + self.ndp_proxy + ) self.assertIsNone(result) def test_set_name(self): arglist = [ self.ndp_proxy.id, - '--name', 'fake-name', + '--name', + 'fake-name', ] verifylist = [ ('ndp_proxy', self.ndp_proxy.id), @@ -367,16 +369,18 @@ def test_set_name(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = (self.cmd.take_action(parsed_args)) + result = self.cmd.take_action(parsed_args) - self.network.update_ndp_proxy.assert_called_once_with( - self.ndp_proxy, name='fake-name') + self.network_client.update_ndp_proxy.assert_called_once_with( + self.ndp_proxy, name='fake-name' + ) self.assertIsNone(result) def test_set_description(self): arglist = [ self.ndp_proxy.id, - '--description', 'balala', + '--description', + 'balala', ] verifylist = [ ('ndp_proxy', self.ndp_proxy.id), @@ -384,20 +388,19 @@ def test_set_description(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = (self.cmd.take_action(parsed_args)) + result = self.cmd.take_action(parsed_args) - self.network.update_ndp_proxy.assert_called_once_with( - self.ndp_proxy, description='balala') + self.network_client.update_ndp_proxy.assert_called_once_with( + self.ndp_proxy, description='balala' + ) self.assertIsNone(result) class TestShowNDPProxy(TestNDPProxy): - def setUp(self): - super(TestShowNDPProxy, self).setUp() + super().setUp() attrs = {'router_id': self.router.id, 'port_id': self.port.id} - self.ndp_proxy = ( - network_fakes.create_one_ndp_proxy(attrs)) + self.ndp_proxy = network_fakes.create_one_ndp_proxy(attrs) self.columns = ( 'created_at', @@ -409,7 +412,8 @@ def setUp(self): 'project_id', 'revision_number', 'router_id', - 'updated_at') + 'updated_at', + ) self.data = ( self.ndp_proxy.created_at, @@ -421,21 +425,27 @@ def setUp(self): self.ndp_proxy.project_id, self.ndp_proxy.revision_number, self.ndp_proxy.router_id, - self.ndp_proxy.updated_at + self.ndp_proxy.updated_at, ) - self.network.get_ndp_proxy = mock.Mock(return_value=self.ndp_proxy) - self.network.find_ndp_proxy = mock.Mock(return_value=self.ndp_proxy) + self.network_client.get_ndp_proxy.return_value = self.ndp_proxy + + self.network_client.find_ndp_proxy.return_value = self.ndp_proxy # Get the command object to test - self.cmd = ndp_proxy.ShowNDPProxy(self.app, self.namespace) + self.cmd = ndp_proxy.ShowNDPProxy(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_default_options(self): arglist = [ @@ -446,9 +456,10 @@ def test_show_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.find_ndp_proxy.assert_called_once_with( - self.ndp_proxy.id, ignore_missing=False) + self.network_client.find_ndp_proxy.assert_called_once_with( + self.ndp_proxy.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_network.py b/openstackclient/tests/unit/network/v2/test_network.py index 6adb9e16e9..1e923d053f 100644 --- a/openstackclient/tests/unit/network/v2/test_network.py +++ b/openstackclient/tests/unit/network/v2/test_network.py @@ -12,14 +12,12 @@ # import random -from unittest import mock from unittest.mock import call from osc_lib.cli import format_columns from osc_lib import exceptions from openstackclient.network.v2 import network -from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes_v2 from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3 from openstackclient.tests.unit.network.v2 import fakes as network_fakes @@ -29,20 +27,16 @@ # Tests for Neutron network # class TestNetwork(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetwork, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateNetworkIdentityV3(TestNetwork): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() # The new network created. @@ -52,8 +46,9 @@ class TestCreateNetworkIdentityV3(TestNetwork): 'availability_zone_hints': ["nova"], } ) - qos_policy = (network_fakes.FakeNetworkQosPolicy. - create_one_qos_policy(attrs={'id': _network.qos_policy_id})) + qos_policy = network_fakes.create_one_qos_policy( + attrs={'id': _network.qos_policy_id} + ) columns = ( 'admin_state_up', @@ -67,6 +62,7 @@ class TestCreateNetworkIdentityV3(TestNetwork): 'ipv6_address_scope', 'is_default', 'is_vlan_transparent', + 'is_vlan_qinq', 'mtu', 'name', 'port_security_enabled', @@ -107,6 +103,7 @@ class TestCreateNetworkIdentityV3(TestNetwork): network.RouterExternalColumn(_network.is_router_external), _network.is_shared, _network.is_vlan_transparent, + _network.is_vlan_qinq, _network.status, _network.segments, format_columns.ListColumn(_network.subnet_ids), @@ -116,24 +113,30 @@ class TestCreateNetworkIdentityV3(TestNetwork): ) def setUp(self): - super(TestCreateNetworkIdentityV3, self).setUp() + super().setUp() + + self.network_client.create_network.return_value = self._network - self.network.create_network = mock.Mock(return_value=self._network) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = network.CreateNetwork(self.app, self.namespace) + self.cmd = network.CreateNetwork(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain - self.network.find_qos_policy = mock.Mock(return_value=self.qos_policy) + self.network_client.find_qos_policy.return_value = self.qos_policy def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ @@ -150,11 +153,13 @@ def test_create_default_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self._network.name, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_network.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self._network.name, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) @@ -162,19 +167,31 @@ def test_create_all_options(self): arglist = [ "--disable", "--share", - "--description", self._network.description, - "--mtu", str(self._network.mtu), - "--project", self.project.name, - "--project-domain", self.domain.name, - "--availability-zone-hint", "nova", - "--external", "--default", - "--provider-network-type", "vlan", - "--provider-physical-network", "physnet1", - "--provider-segment", "400", - "--qos-policy", self.qos_policy.id, + "--description", + self._network.description, + "--mtu", + str(self._network.mtu), + "--project", + self.project.name, + "--project-domain", + self.domain.name, + "--availability-zone-hint", + "nova", + "--external", + "--default", + "--provider-network-type", + "vlan", + "--provider-physical-network", + "physnet1", + "--provider-segment", + "400", + "--qos-policy", + self.qos_policy.id, "--transparent-vlan", + "--no-qinq-vlan", "--enable-port-security", - "--dns-domain", "example.org.", + "--dns-domain", + "example.org.", self._network.name, ] verifylist = [ @@ -192,32 +209,36 @@ def test_create_all_options(self): ('segmentation_id', '400'), ('qos_policy', self.qos_policy.id), ('transparent_vlan', True), + ('qinq_vlan', False), ('enable_port_security', True), ('name', self._network.name), ('dns_domain', 'example.org.'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_network.assert_called_once_with(**{ - 'admin_state_up': False, - 'availability_zone_hints': ["nova"], - 'name': self._network.name, - 'shared': True, - 'description': self._network.description, - 'mtu': str(self._network.mtu), - 'project_id': self.project.id, - 'is_default': True, - 'router:external': True, - 'provider:network_type': 'vlan', - 'provider:physical_network': 'physnet1', - 'provider:segmentation_id': '400', - 'qos_policy_id': self.qos_policy.id, - 'vlan_transparent': True, - 'port_security_enabled': True, - 'dns_domain': 'example.org.', - }) + self.network_client.create_network.assert_called_once_with( + **{ + 'admin_state_up': False, + 'availability_zone_hints': ["nova"], + 'name': self._network.name, + 'shared': True, + 'description': self._network.description, + 'mtu': str(self._network.mtu), + 'project_id': self.project.id, + 'is_default': True, + 'router:external': True, + 'provider:network_type': 'vlan', + 'provider:physical_network': 'physnet1', + 'provider:segmentation_id': '400', + 'qos_policy_id': self.qos_policy.id, + 'vlan_transparent': True, + 'vlan_qinq': False, + 'port_security_enabled': True, + 'dns_domain': 'example.org.', + } + ) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) @@ -226,6 +247,7 @@ def test_create_other_options(self): "--enable", "--no-share", "--disable-port-security", + "--qinq-vlan", self._network.name, ] verifylist = [ @@ -233,18 +255,22 @@ def test_create_other_options(self): ('no_share', True), ('name', self._network.name), ('external', False), + ('qinq_vlan', True), ('disable_port_security', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self._network.name, - 'shared': False, - 'port_security_enabled': False, - }) + self.network_client.create_network.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self._network.name, + 'shared': False, + 'vlan_qinq': True, + 'port_security_enabled': False, + } + ) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) @@ -267,16 +293,17 @@ def _test_create_with_tag(self, add_tags=True): verifylist.append(('no_tag', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_network.assert_called_once_with( - name=self._network.name, admin_state_up=True) + self.network_client.create_network.assert_called_once_with( + name=self._network.name, admin_state_up=True + ) if add_tags: - self.network.set_tags.assert_called_once_with( - self._network, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self._network, tests_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) @@ -286,9 +313,42 @@ def test_create_with_tags(self): def test_create_with_no_tag(self): self._test_create_with_tag(add_tags=False) + def test_create_with_vlan_qinq_and_transparency_enabled(self): + arglist = [ + "--transparent-vlan", + "--qinq-vlan", + self._network.name, + ] + verifylist = [] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_create_with_provider_segment_without_provider_type(self): + arglist = [ + "--provider-segment", + "123", + self._network.name, + ] + verifylist = [ + ('provider_network_type', None), + ('segmentation_id', "123"), + ('name', self._network.name), + ] -class TestCreateNetworkIdentityV2(TestNetwork): + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + +class TestCreateNetworkIdentityV2( + identity_fakes_v2.FakeClientMixin, + network_fakes.FakeClientMixin, + tests_utils.TestCommand, +): project = identity_fakes_v2.FakeProject.create_one_project() # The new network created. _network = network_fakes.create_one_network( @@ -306,6 +366,7 @@ class TestCreateNetworkIdentityV2(TestNetwork): 'ipv6_address_scope', 'is_default', 'is_vlan_transparent', + 'is_vlan_qinq', 'mtu', 'name', 'port_security_enabled', @@ -346,6 +407,7 @@ class TestCreateNetworkIdentityV2(TestNetwork): network.RouterExternalColumn(_network.is_router_external), _network.is_shared, _network.is_vlan_transparent, + _network.is_vlan_qinq, _network.status, _network.segments, format_columns.ListColumn(_network.subnet_ids), @@ -355,31 +417,25 @@ class TestCreateNetworkIdentityV2(TestNetwork): ) def setUp(self): - super(TestCreateNetworkIdentityV2, self).setUp() + super().setUp() - self.network.create_network = mock.Mock(return_value=self._network) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.create_network.return_value = self._network - # Get the command object to test - self.cmd = network.CreateNetwork(self.app, self.namespace) + self.network_client.set_tags.return_value = None - # Set identity client v2. And get a shortcut to Identity client. - identity_client = identity_fakes_v2.FakeIdentityv2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - self.app.client_manager.identity = identity_client - self.identity = self.app.client_manager.identity + # Get the command object to test + self.cmd = network.CreateNetwork(self.app, None) # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.identity.tenants + self.projects_mock = self.identity_client.tenants self.projects_mock.get.return_value = self.project # There is no DomainManager Mock in fake identity v2. def test_create_with_project_identityv2(self): arglist = [ - "--project", self.project.name, + "--project", + self.project.name, self._network.name, ] verifylist = [ @@ -393,19 +449,23 @@ def test_create_with_project_identityv2(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self._network.name, - 'project_id': self.project.id, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_network.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self._network.name, + 'project_id': self.project.id, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) def test_create_with_domain_identityv2(self): arglist = [ - "--project", self.project.name, - "--project-domain", "domain-name", + "--project", + self.project.name, + "--project-domain", + "domain-name", self._network.name, ] verifylist = [ @@ -427,20 +487,20 @@ def test_create_with_domain_identityv2(self): class TestDeleteNetwork(TestNetwork): - def setUp(self): - super(TestDeleteNetwork, self).setUp() + super().setUp() # The networks to delete self._networks = network_fakes.create_networks(count=3) - self.network.delete_network = mock.Mock(return_value=None) + self.network_client.delete_network.return_value = None - self.network.find_network = network_fakes.get_networks( - networks=self._networks) + self.network_client.find_network = network_fakes.get_networks( + networks=self._networks + ) # Get the command object to test - self.cmd = network.DeleteNetwork(self.app, self.namespace) + self.cmd = network.DeleteNetwork(self.app, None) def test_delete_one_network(self): arglist = [ @@ -453,7 +513,9 @@ def test_delete_one_network(self): result = self.cmd.take_action(parsed_args) - self.network.delete_network.assert_called_once_with(self._networks[0]) + self.network_client.delete_network.assert_called_once_with( + self._networks[0] + ) self.assertIsNone(result) def test_delete_multiple_networks(self): @@ -470,7 +532,7 @@ def test_delete_multiple_networks(self): calls = [] for n in self._networks: calls.append(call(n)) - self.network.delete_network.assert_has_calls(calls) + self.network_client.delete_network.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_networks_exception(self): @@ -490,17 +552,18 @@ def test_delete_multiple_networks_exception(self): exceptions.NotFound('404'), self._networks[1], ] - self.network.find_network = mock.Mock(side_effect=ret_find) + self.network_client.find_network.side_effect = ret_find # Fake exception in delete_network() ret_delete = [ None, exceptions.NotFound('404'), ] - self.network.delete_network = mock.Mock(side_effect=ret_delete) + self.network_client.delete_network.side_effect = ret_delete - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) # The second call of find_network() should fail. So delete_network() # was only called twice. @@ -508,11 +571,10 @@ def test_delete_multiple_networks_exception(self): call(self._networks[0]), call(self._networks[1]), ] - self.network.delete_network.assert_has_calls(calls) + self.network_client.delete_network.assert_has_calls(calls) class TestListNetwork(TestNetwork): - # The networks going to be listed up. _network = network_fakes.create_networks(count=3) @@ -537,45 +599,49 @@ class TestListNetwork(TestNetwork): data = [] for net in _network: - data.append(( - net.id, - net.name, - format_columns.ListColumn(net.subnet_ids), - )) + data.append( + ( + net.id, + net.name, + format_columns.ListColumn(net.subnet_ids), + ) + ) data_long = [] for net in _network: - data_long.append(( - net.id, - net.name, - net.status, - net.project_id, - network.AdminStateColumn(net.is_admin_state_up), - net.is_shared, - format_columns.ListColumn(net.subnet_ids), - net.provider_network_type, - network.RouterExternalColumn(net.is_router_external), - format_columns.ListColumn(net.availability_zones), - format_columns.ListColumn(net.tags), - )) + data_long.append( + ( + net.id, + net.name, + net.status, + net.project_id, + network.AdminStateColumn(net.is_admin_state_up), + net.is_shared, + format_columns.ListColumn(net.subnet_ids), + net.provider_network_type, + network.RouterExternalColumn(net.is_router_external), + format_columns.ListColumn(net.availability_zones), + format_columns.ListColumn(net.tags), + ) + ) def setUp(self): - super(TestListNetwork, self).setUp() + super().setUp() # Get the command object to test - self.cmd = network.ListNetwork(self.app, self.namespace) + self.cmd = network.ListNetwork(self.app, None) - self.network.networks = mock.Mock(return_value=self._network) + self.network_client.networks.return_value = self._network - self._agent = \ - network_fakes.create_one_network_agent() - self.network.get_agent = mock.Mock(return_value=self._agent) + self._agent = network_fakes.create_one_network_agent() + self.network_client.get_agent.return_value = self._agent - self.network.dhcp_agent_hosting_networks = mock.Mock( - return_value=self._network) + self.network_client.dhcp_agent_hosting_networks.return_value = ( + self._network + ) # TestListTagMixin - self._tag_list_resource_mock = self.network.networks + self._tag_list_resource_mock = self.network_client.networks def test_network_list_no_options(self): arglist = [] @@ -590,7 +656,7 @@ def test_network_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with() + self.network_client.networks.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -609,7 +675,7 @@ def test_list_external(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'router:external': True, 'is_router_external': True} ) self.assertEqual(self.columns, columns) @@ -626,7 +692,7 @@ def test_list_internal(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'router:external': False, 'is_router_external': False} ) self.assertEqual(self.columns, columns) @@ -647,14 +713,15 @@ def test_network_list_long(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with() + self.network_client.networks.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) def test_list_name(self): test_name = "fakename" arglist = [ - '--name', test_name, + '--name', + test_name, ] verifylist = [ ('external', False), @@ -664,7 +731,7 @@ def test_list_name(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'name': test_name} ) self.assertEqual(self.columns, columns) @@ -682,7 +749,7 @@ def test_network_list_enable(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'admin_state_up': True, 'is_admin_state_up': True} ) self.assertEqual(self.columns, columns) @@ -692,15 +759,11 @@ def test_network_list_disable(self): arglist = [ '--disable', ] - verifylist = [ - ('long', False), - ('external', False), - ('disable', True) - ] + verifylist = [('long', False), ('external', False), ('disable', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'admin_state_up': False, 'is_admin_state_up': False} ) self.assertEqual(self.columns, columns) @@ -710,7 +773,8 @@ def test_network_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -718,7 +782,7 @@ def test_network_list_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'project_id': project.id} ) @@ -729,8 +793,10 @@ def test_network_list_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -740,7 +806,7 @@ def test_network_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.networks.assert_called_once_with(**filters) + self.network_client.networks.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -755,7 +821,7 @@ def test_network_list_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'shared': True, 'is_shared': True} ) self.assertEqual(self.columns, columns) @@ -772,7 +838,7 @@ def test_network_list_no_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'shared': False, 'is_shared': False} ) self.assertEqual(self.columns, columns) @@ -782,7 +848,8 @@ def test_network_list_status(self): choices = ['ACTIVE', 'BUILD', 'DOWN', 'ERROR'] test_status = random.choice(choices) arglist = [ - '--status', test_status, + '--status', + test_status, ] verifylist = [ ('long', False), @@ -791,7 +858,7 @@ def test_network_list_status(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( + self.network_client.networks.assert_called_once_with( **{'status': test_status} ) self.assertEqual(self.columns, columns) @@ -800,7 +867,8 @@ def test_network_list_status(self): def test_network_list_provider_network_type(self): network_type = self._network[0].provider_network_type arglist = [ - '--provider-network-type', network_type, + '--provider-network-type', + network_type, ] verifylist = [ ('provider_network_type', network_type), @@ -808,9 +876,11 @@ def test_network_list_provider_network_type(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( - **{'provider:network_type': network_type, - 'provider_network_type': network_type} + self.network_client.networks.assert_called_once_with( + **{ + 'provider:network_type': network_type, + 'provider_network_type': network_type, + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -818,7 +888,8 @@ def test_network_list_provider_network_type(self): def test_network_list_provider_physical_network(self): physical_network = self._network[0].provider_physical_network arglist = [ - '--provider-physical-network', physical_network, + '--provider-physical-network', + physical_network, ] verifylist = [ ('physical_network', physical_network), @@ -826,9 +897,11 @@ def test_network_list_provider_physical_network(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( - **{'provider:physical_network': physical_network, - 'provider_physical_network': physical_network} + self.network_client.networks.assert_called_once_with( + **{ + 'provider:physical_network': physical_network, + 'provider_physical_network': physical_network, + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -836,7 +909,8 @@ def test_network_list_provider_physical_network(self): def test_network_list_provider_segment(self): segmentation_id = self._network[0].provider_segmentation_id arglist = [ - '--provider-segment', segmentation_id, + '--provider-segment', + segmentation_id, ] verifylist = [ ('segmentation_id', segmentation_id), @@ -844,17 +918,17 @@ def test_network_list_provider_segment(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( - **{'provider:segmentation_id': segmentation_id, - 'provider_segmentation_id': segmentation_id} + self.network_client.networks.assert_called_once_with( + **{ + 'provider:segmentation_id': segmentation_id, + 'provider_segmentation_id': segmentation_id, + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_network_list_dhcp_agent(self): - arglist = [ - '--agent', self._agent.id - ] + arglist = ['--agent', self._agent.id] verifylist = [ ('agent_id', self._agent.id), ] @@ -863,18 +937,23 @@ def test_network_list_dhcp_agent(self): columns, data = self.cmd.take_action(parsed_args) - self.network.dhcp_agent_hosting_networks.assert_called_once_with( - self._agent) + self.network_client.dhcp_agent_hosting_networks.assert_called_once_with( + self._agent + ) self.assertEqual(self.columns, columns) self.assertCountEqual(list(data), list(self.data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -885,51 +964,60 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.networks.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white'} + self.network_client.networks.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) class TestSetNetwork(TestNetwork): - # The network to set. - _network = network_fakes.create_one_network( - {'tags': ['green', 'red']}) - qos_policy = (network_fakes.FakeNetworkQosPolicy. - create_one_qos_policy(attrs={'id': _network.qos_policy_id})) + _network = network_fakes.create_one_network({'tags': ['green', 'red']}) + qos_policy = network_fakes.create_one_qos_policy( + attrs={'id': _network.qos_policy_id} + ) def setUp(self): - super(TestSetNetwork, self).setUp() + super().setUp() + + self.network_client.update_network.return_value = None + self.network_client.set_tags.return_value = None - self.network.update_network = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.find_network.return_value = self._network - self.network.find_network = mock.Mock(return_value=self._network) - self.network.find_qos_policy = mock.Mock(return_value=self.qos_policy) + self.network_client.find_qos_policy.return_value = self.qos_policy # Get the command object to test - self.cmd = network.SetNetwork(self.app, self.namespace) + self.cmd = network.SetNetwork(self.app, None) def test_set_this(self): arglist = [ self._network.name, '--enable', - '--name', 'noob', + '--name', + 'noob', '--share', - '--description', self._network.description, - '--dns-domain', 'example.org.', + '--description', + self._network.description, + '--dns-domain', + 'example.org.', '--external', '--default', - '--provider-network-type', 'vlan', - '--provider-physical-network', 'physnet1', - '--provider-segment', '400', + '--provider-network-type', + 'vlan', + '--provider-physical-network', + 'physnet1', + '--provider-segment', + '400', '--enable-port-security', - '--qos-policy', self.qos_policy.name, + '--qos-policy', + self.qos_policy.name, ] verifylist = [ ('network', self._network.name), @@ -964,8 +1052,9 @@ def test_set_this(self): 'qos_policy_id': self.qos_policy.id, 'dns_domain': 'example.org.', } - self.network.update_network.assert_called_once_with( - self._network, **attrs) + self.network_client.update_network.assert_called_once_with( + self._network, **attrs + ) self.assertIsNone(result) def test_set_that(self): @@ -996,19 +1085,57 @@ def test_set_that(self): 'port_security_enabled': False, 'qos_policy_id': None, } - self.network.update_network.assert_called_once_with( - self._network, **attrs) + self.network_client.update_network.assert_called_once_with( + self._network, **attrs + ) + self.assertIsNone(result) + + def test_set_to_empty(self): + # Test if empty strings are accepted to clear any of the fields, + # so once they are set to a value its possible to clear them again. + + arglist = [ + self._network.name, + '--name', + '', + '--description', + '', + '--dns-domain', + '', + ] + verifylist = [ + ('network', self._network.name), + ('description', ''), + ('name', ''), + ('dns_domain', ''), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + attrs = { + 'name': '', + 'description': '', + 'dns_domain': '', + } + self.network_client.update_network.assert_called_once_with( + self._network, **attrs + ) self.assertIsNone(result) def test_set_nothing(self): - arglist = [self._network.name, ] - verifylist = [('network', self._network.name), ] + arglist = [ + self._network.name, + ] + verifylist = [ + ('network', self._network.name), + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_network.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_network.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def _test_set_tags(self, with_tags=True): @@ -1021,16 +1148,15 @@ def _test_set_tags(self, with_tags=True): verifylist = [('no_tag', True)] expected_args = [] arglist.append(self._network.name) - verifylist.append( - ('network', self._network.name)) + verifylist.append(('network', self._network.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_network.called) - self.network.set_tags.assert_called_once_with( - self._network, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_network.called) + self.network_client.set_tags.assert_called_once_with( + self._network, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -1041,7 +1167,6 @@ def test_set_with_no_tag(self): class TestShowNetwork(TestNetwork): - # The network to show. _network = network_fakes.create_one_network() columns = ( @@ -1056,6 +1181,7 @@ class TestShowNetwork(TestNetwork): 'ipv6_address_scope', 'is_default', 'is_vlan_transparent', + 'is_vlan_qinq', 'mtu', 'name', 'port_security_enabled', @@ -1096,6 +1222,7 @@ class TestShowNetwork(TestNetwork): network.RouterExternalColumn(_network.is_router_external), _network.is_shared, _network.is_vlan_transparent, + _network.is_vlan_qinq, _network.status, _network.segments, format_columns.ListColumn(_network.subnet_ids), @@ -1105,19 +1232,24 @@ class TestShowNetwork(TestNetwork): ) def setUp(self): - super(TestShowNetwork, self).setUp() + super().setUp() - self.network.find_network = mock.Mock(return_value=self._network) + self.network_client.find_network.return_value = self._network # Get the command object to test - self.cmd = network.ShowNetwork(self.app, self.namespace) + self.cmd = network.ShowNetwork(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1130,42 +1262,47 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_network.assert_called_once_with( - self._network.name, ignore_missing=False) + self.network_client.find_network.assert_called_once_with( + self._network.name, ignore_missing=False + ) self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) class TestUnsetNetwork(TestNetwork): - # The network to set. - _network = network_fakes.create_one_network( - {'tags': ['green', 'red']}) - qos_policy = (network_fakes.FakeNetworkQosPolicy. - create_one_qos_policy(attrs={'id': _network.qos_policy_id})) + _network = network_fakes.create_one_network({'tags': ['green', 'red']}) + qos_policy = network_fakes.create_one_qos_policy( + attrs={'id': _network.qos_policy_id} + ) def setUp(self): - super(TestUnsetNetwork, self).setUp() + super().setUp() + + self.network_client.update_network.return_value = None + self.network_client.set_tags.return_value = None - self.network.update_network = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.find_network.return_value = self._network - self.network.find_network = mock.Mock(return_value=self._network) - self.network.find_qos_policy = mock.Mock(return_value=self.qos_policy) + self.network_client.find_qos_policy.return_value = self.qos_policy # Get the command object to test - self.cmd = network.UnsetNetwork(self.app, self.namespace) + self.cmd = network.UnsetNetwork(self.app, None) def test_unset_nothing(self): - arglist = [self._network.name, ] - verifylist = [('network', self._network.name), ] + arglist = [ + self._network.name, + ] + verifylist = [ + ('network', self._network.name), + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_network.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_network.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def _test_unset_tags(self, with_tags=True): @@ -1178,16 +1315,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self._network.name) - verifylist.append( - ('network', self._network.name)) + verifylist.append(('network', self._network.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_network.called) - self.network.set_tags.assert_called_once_with( - self._network, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_network.called) + self.network_client.set_tags.assert_called_once_with( + self._network, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): diff --git a/openstackclient/tests/unit/network/v2/test_network_agent.py b/openstackclient/tests/unit/network/v2/test_network_agent.py index 15c4c5de9e..48b394d7a5 100644 --- a/openstackclient/tests/unit/network/v2/test_network_agent.py +++ b/openstackclient/tests/unit/network/v2/test_network_agent.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib.cli import format_columns @@ -23,43 +22,38 @@ class TestNetworkAgent(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkAgent, self).setUp() - - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() class TestAddNetworkToAgent(TestNetworkAgent): - net = network_fakes.create_one_network() agent = network_fakes.create_one_network_agent() def setUp(self): - super(TestAddNetworkToAgent, self).setUp() + super().setUp() + + self.network_client.get_agent.return_value = self.agent + self.network_client.find_network.return_value = self.net + self.network_client.name = self.network_client.find_network.name - self.network.get_agent = mock.Mock(return_value=self.agent) - self.network.find_network = mock.Mock(return_value=self.net) - self.network.name = self.network.find_network.name - self.network.add_dhcp_agent_to_network = mock.Mock() - self.cmd = network_agent.AddNetworkToAgent( - self.app, self.namespace) + self.cmd = network_agent.AddNetworkToAgent(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_add_network_to_dhcp_agent(self): - arglist = [ - '--dhcp', - self.agent.id, - self.net.id - ] + arglist = ['--dhcp', self.agent.id, self.net.id] verifylist = [ ('dhcp', True), ('agent_id', self.agent.id), @@ -69,29 +63,35 @@ def test_add_network_to_dhcp_agent(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.add_dhcp_agent_to_network.assert_called_once_with( - self.agent, self.net) + self.network_client.add_dhcp_agent_to_network.assert_called_once_with( + self.agent, self.net + ) class TestAddRouterAgent(TestNetworkAgent): - - _router = network_fakes.FakeRouter.create_one_router() + _router = network_fakes.create_one_router() _agent = network_fakes.create_one_network_agent() def setUp(self): - super(TestAddRouterAgent, self).setUp() - self.network.add_router_to_agent = mock.Mock() - self.cmd = network_agent.AddRouterToAgent(self.app, self.namespace) - self.network.get_agent = mock.Mock(return_value=self._agent) - self.network.find_router = mock.Mock(return_value=self._router) + super().setUp() + + self.network_client.get_agent.return_value = self._agent + self.network_client.find_router.return_value = self._router + + self.cmd = network_agent.AddRouterToAgent(self.app, None) def test_add_no_options(self): arglist = [] verifylist = [] # Missing agent ID will cause command to bail - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_add_router_required_options(self): arglist = [ @@ -108,21 +108,21 @@ def test_add_router_required_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_router_to_agent.assert_called_with( - self._agent, self._router) + self.network_client.add_router_to_agent.assert_called_with( + self._agent, self._router + ) self.assertIsNone(result) class TestDeleteNetworkAgent(TestNetworkAgent): - network_agents = network_fakes.create_network_agents(count=2) def setUp(self): - super(TestDeleteNetworkAgent, self).setUp() - self.network.delete_agent = mock.Mock(return_value=None) + super().setUp() + self.network_client.delete_agent.return_value = None # Get the command object to test - self.cmd = network_agent.DeleteNetworkAgent(self.app, self.namespace) + self.cmd = network_agent.DeleteNetworkAgent(self.app, None) def test_network_agent_delete(self): arglist = [ @@ -135,8 +135,9 @@ def test_network_agent_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_agent.assert_called_once_with( - self.network_agents[0].id, ignore_missing=False) + self.network_client.delete_agent.assert_called_once_with( + self.network_agents[0].id, ignore_missing=False + ) self.assertIsNone(result) def test_multi_network_agents_delete(self): @@ -154,7 +155,7 @@ def test_multi_network_agents_delete(self): calls = [] for n in self.network_agents: calls.append(call(n.id, ignore_missing=False)) - self.network.delete_agent.assert_has_calls(calls) + self.network_client.delete_agent.assert_has_calls(calls) self.assertIsNone(result) def test_multi_network_agents_delete_with_exception(self): @@ -163,15 +164,15 @@ def test_multi_network_agents_delete_with_exception(self): 'unexist_network_agent', ] verifylist = [ - ('network_agent', - [self.network_agents[0].id, 'unexist_network_agent']), + ( + 'network_agent', + [self.network_agents[0].id, 'unexist_network_agent'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) delete_mock_result = [True, exceptions.CommandError] - self.network.delete_agent = ( - mock.Mock(side_effect=delete_mock_result) - ) + self.network_client.delete_agent.side_effect = delete_mock_result try: self.cmd.take_action(parsed_args) @@ -179,14 +180,15 @@ def test_multi_network_agents_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 network agents failed to delete.', str(e)) - self.network.delete_agent.assert_any_call( - self.network_agents[0].id, ignore_missing=False) - self.network.delete_agent.assert_any_call( - 'unexist_network_agent', ignore_missing=False) + self.network_client.delete_agent.assert_any_call( + self.network_agents[0].id, ignore_missing=False + ) + self.network_client.delete_agent.assert_any_call( + 'unexist_network_agent', ignore_missing=False + ) class TestListNetworkAgent(TestNetworkAgent): - network_agents = network_fakes.create_network_agents(count=3) columns = ( @@ -196,43 +198,45 @@ class TestListNetworkAgent(TestNetworkAgent): 'Availability Zone', 'Alive', 'State', - 'Binary' + 'Binary', ) data = [] for agent in network_agents: - data.append(( - agent.id, - agent.agent_type, - agent.host, - agent.availability_zone, - network_agent.AliveColumn(agent.is_alive), - network_agent.AdminStateColumn(agent.is_admin_state_up), - agent.binary, - )) + data.append( + ( + agent.id, + agent.agent_type, + agent.host, + agent.availability_zone, + network_agent.AliveColumn(agent.is_alive), + network_agent.AdminStateColumn(agent.is_admin_state_up), + agent.binary, + ) + ) def setUp(self): - super(TestListNetworkAgent, self).setUp() - self.network.agents = mock.Mock( - return_value=self.network_agents) + super().setUp() + + self.network_client.agents.return_value = self.network_agents + self.network_client.routers_hosting_l3_agents.return_value = ( + self.network_agents + ) _testagent = network_fakes.create_one_network_agent() - self.network.get_agent = mock.Mock(return_value=_testagent) + self.network_client.get_agent.return_value = _testagent + self.network_client.get_agent.return_value = _testagent self._testnetwork = network_fakes.create_one_network() - self.network.find_network = mock.Mock(return_value=self._testnetwork) - self.network.network_hosting_dhcp_agents = mock.Mock( - return_value=self.network_agents) - - self.network.get_agent = mock.Mock(return_value=_testagent) + self.network_client.find_network.return_value = self._testnetwork + self.network_client.network_hosting_dhcp_agents.return_value = ( + self.network_agents + ) - self._testrouter = \ - network_fakes.FakeRouter.create_one_router() - self.network.find_router = mock.Mock(return_value=self._testrouter) - self.network.routers_hosting_l3_agents = mock.Mock( - return_value=self.network_agents) + self._testrouter = network_fakes.create_one_router() + self.network_client.find_router.return_value = self._testrouter # Get the command object to test - self.cmd = network_agent.ListNetworkAgent(self.app, self.namespace) + self.cmd = network_agent.ListNetworkAgent(self.app, None) def test_network_agents_list(self): arglist = [] @@ -241,13 +245,14 @@ def test_network_agents_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.agents.assert_called_once_with(**{}) + self.network_client.agents.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_network_agents_list_agent_type(self): arglist = [ - '--agent-type', 'dhcp', + '--agent-type', + 'dhcp', ] verifylist = [ ('agent_type', 'dhcp'), @@ -256,15 +261,18 @@ def test_network_agents_list_agent_type(self): columns, data = self.cmd.take_action(parsed_args) - self.network.agents.assert_called_once_with(**{ - 'agent_type': 'DHCP agent', - }) + self.network_client.agents.assert_called_once_with( + **{ + 'agent_type': 'DHCP agent', + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_network_agents_list_host(self): arglist = [ - '--host', self.network_agents[0].host, + '--host', + self.network_agents[0].host, ] verifylist = [ ('host', self.network_agents[0].host), @@ -273,15 +281,18 @@ def test_network_agents_list_host(self): columns, data = self.cmd.take_action(parsed_args) - self.network.agents.assert_called_once_with(**{ - 'host': self.network_agents[0].host, - }) + self.network_client.agents.assert_called_once_with( + **{ + 'host': self.network_agents[0].host, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_network_agents_list_networks(self): arglist = [ - '--network', self._testnetwork.id, + '--network', + self._testnetwork.id, ] verifylist = [ ('network', self._testnetwork.id), @@ -290,48 +301,43 @@ def test_network_agents_list_networks(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.network_hosting_dhcp_agents.assert_called_once_with( - self._testnetwork) + self.network_client.network_hosting_dhcp_agents.assert_called_once_with( + self._testnetwork + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_network_agents_list_routers(self): arglist = [ - '--router', self._testrouter.id, - ] - verifylist = [ - ('router', self._testrouter.id), - ('long', False) + '--router', + self._testrouter.id, ] - - attrs = {self._testrouter, } + verifylist = [('router', self._testrouter.id), ('long', False)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.routers_hosting_l3_agents.assert_called_once_with( - *attrs) + self.network_client.routers_hosting_l3_agents.assert_called_once_with( + self._testrouter + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_network_agents_list_routers_with_long_option(self): arglist = [ - '--router', self._testrouter.id, + '--router', + self._testrouter.id, '--long', ] - verifylist = [ - ('router', self._testrouter.id), - ('long', True) - ] - - attrs = {self._testrouter, } + verifylist = [('router', self._testrouter.id), ('long', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.routers_hosting_l3_agents.assert_called_once_with( - *attrs) + self.network_client.routers_hosting_l3_agents.assert_called_once_with( + self._testrouter + ) # Add a column 'HA State' and corresponding data. router_agent_columns = self.columns + ('HA State',) @@ -342,27 +348,30 @@ def test_network_agents_list_routers_with_long_option(self): class TestRemoveNetworkFromAgent(TestNetworkAgent): - net = network_fakes.create_one_network() agent = network_fakes.create_one_network_agent() def setUp(self): - super(TestRemoveNetworkFromAgent, self).setUp() + super().setUp() + + self.network_client.get_agent.return_value = self.agent + self.network_client.find_network.return_value = self.net + self.network_client.name = self.network_client.find_network.name - self.network.get_agent = mock.Mock(return_value=self.agent) - self.network.find_network = mock.Mock(return_value=self.net) - self.network.name = self.network.find_network.name - self.network.remove_dhcp_agent_from_network = mock.Mock() - self.cmd = network_agent.RemoveNetworkFromAgent( - self.app, self.namespace) + self.cmd = network_agent.RemoveNetworkFromAgent(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_agents_list_routers_no_arg(self): arglist = [ @@ -371,15 +380,16 @@ def test_network_agents_list_routers_no_arg(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_from_dhcp_agent(self): - arglist = [ - '--dhcp', - self.agent.id, - self.net.id - ] + arglist = ['--dhcp', self.agent.id, self.net.id] verifylist = [ ('dhcp', True), ('agent_id', self.agent.id), @@ -389,29 +399,35 @@ def test_network_from_dhcp_agent(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.remove_dhcp_agent_from_network.assert_called_once_with( - self.agent, self.net) + self.network_client.remove_dhcp_agent_from_network.assert_called_once_with( + self.agent, self.net + ) class TestRemoveRouterAgent(TestNetworkAgent): - _router = network_fakes.FakeRouter.create_one_router() + _router = network_fakes.create_one_router() _agent = network_fakes.create_one_network_agent() def setUp(self): - super(TestRemoveRouterAgent, self).setUp() - self.network.remove_router_from_agent = mock.Mock() - self.cmd = network_agent.RemoveRouterFromAgent(self.app, - self.namespace) - self.network.get_agent = mock.Mock(return_value=self._agent) - self.network.find_router = mock.Mock(return_value=self._router) + super().setUp() + + self.network_client.get_agent.return_value = self._agent + self.network_client.find_router.return_value = self._router + + self.cmd = network_agent.RemoveRouterFromAgent(self.app, None) def test_remove_no_options(self): arglist = [] verifylist = [] # Missing agent ID will cause command to bail - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_remove_router_required_options(self): arglist = [ @@ -428,22 +444,22 @@ def test_remove_router_required_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.remove_router_from_agent.assert_called_with( - self._agent, self._router) + self.network_client.remove_router_from_agent.assert_called_with( + self._agent, self._router + ) self.assertIsNone(result) class TestSetNetworkAgent(TestNetworkAgent): - _network_agent = network_fakes.create_one_network_agent() def setUp(self): - super(TestSetNetworkAgent, self).setUp() - self.network.update_agent = mock.Mock(return_value=None) - self.network.get_agent = mock.Mock(return_value=self._network_agent) + super().setUp() + self.network_client.update_agent.return_value = None + self.network_client.get_agent.return_value = self._network_agent # Get the command object to test - self.cmd = network_agent.SetNetworkAgent(self.app, self.namespace) + self.cmd = network_agent.SetNetworkAgent(self.app, None) def test_set_nothing(self): arglist = [ @@ -457,13 +473,15 @@ def test_set_nothing(self): result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_agent.assert_called_once_with( - self._network_agent, **attrs) + self.network_client.update_agent.assert_called_once_with( + self._network_agent, **attrs + ) self.assertIsNone(result) def test_set_all(self): arglist = [ - '--description', 'new_description', + '--description', + 'new_description', '--enable', self._network_agent.id, ] @@ -482,8 +500,9 @@ def test_set_all(self): 'admin_state_up': True, 'is_admin_state_up': True, } - self.network.update_agent.assert_called_once_with( - self._network_agent, **attrs) + self.network_client.update_agent.assert_called_once_with( + self._network_agent, **attrs + ) self.assertIsNone(result) def test_set_with_disable(self): @@ -504,13 +523,13 @@ def test_set_with_disable(self): 'admin_state_up': False, 'is_admin_state_up': False, } - self.network.update_agent.assert_called_once_with( - self._network_agent, **attrs) + self.network_client.update_agent.assert_called_once_with( + self._network_agent, **attrs + ) self.assertIsNone(result) class TestShowNetworkAgent(TestNetworkAgent): - _network_agent = network_fakes.create_one_network_agent() columns = ( @@ -549,20 +568,24 @@ class TestShowNetworkAgent(TestNetworkAgent): ) def setUp(self): - super(TestShowNetworkAgent, self).setUp() - self.network.get_agent = mock.Mock( - return_value=self._network_agent) + super().setUp() + self.network_client.get_agent.return_value = self._network_agent # Get the command object to test - self.cmd = network_agent.ShowNetworkAgent(self.app, self.namespace) + self.cmd = network_agent.ShowNetworkAgent(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -575,7 +598,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_agent.assert_called_once_with( - self._network_agent.id) + self.network_client.get_agent.assert_called_once_with( + self._network_agent.id + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(len(list(self.data)), len(list(data))) diff --git a/openstackclient/tests/unit/network/v2/test_network_auto_allocated_topology.py b/openstackclient/tests/unit/network/v2/test_network_auto_allocated_topology.py index a5dbcde196..d13bd8cf95 100644 --- a/openstackclient/tests/unit/network/v2/test_network_auto_allocated_topology.py +++ b/openstackclient/tests/unit/network/v2/test_network_auto_allocated_topology.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock from openstackclient.network.v2 import network_auto_allocated_topology from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes @@ -22,9 +21,8 @@ class TestAutoAllocatedTopology(network_fakes.TestNetworkV2): def setUp(self): - super(TestAutoAllocatedTopology, self).setUp() - self.network = self.app.client_manager.network - self.projects_mock = self.app.client_manager.identity.projects + super().setUp() + self.projects_mock = self.identity_client.projects class TestCreateAutoAllocatedTopology(TestAutoAllocatedTopology): @@ -32,8 +30,7 @@ class TestCreateAutoAllocatedTopology(TestAutoAllocatedTopology): network_object = network_fakes.create_one_network() topology = network_fakes.create_one_topology( - attrs={'id': network_object.id, - 'project_id': project.id} + attrs={'id': network_object.id, 'project_id': project.id} ) columns = ( @@ -47,13 +44,14 @@ class TestCreateAutoAllocatedTopology(TestAutoAllocatedTopology): ) def setUp(self): - super(TestCreateAutoAllocatedTopology, self).setUp() + super().setUp() self.cmd = network_auto_allocated_topology.CreateAutoAllocatedTopology( - self.app, - self.namespace) - self.network.get_auto_allocated_topology = mock.Mock( - return_value=self.topology) + self.app, None + ) + self.network_client.get_auto_allocated_topology.return_value = ( + self.topology + ) def test_create_no_options(self): arglist = [] @@ -62,14 +60,17 @@ def test_create_no_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_auto_allocated_topology.assert_called_with(None) + self.network_client.get_auto_allocated_topology.assert_called_with( + None + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_project_option(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ @@ -79,7 +80,7 @@ def test_create_project_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_auto_allocated_topology.assert_called_with( + self.network_client.get_auto_allocated_topology.assert_called_with( self.project.id ) @@ -88,8 +89,10 @@ def test_create_project_option(self): def test_create_project_domain_option(self): arglist = [ - '--project', self.project.id, - '--project-domain', self.project.domain_id, + '--project', + self.project.id, + '--project-domain', + self.project.domain_id, ] verifylist = [ @@ -100,7 +103,7 @@ def test_create_project_domain_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_auto_allocated_topology.assert_called_with( + self.network_client.get_auto_allocated_topology.assert_called_with( self.project.id ) @@ -119,7 +122,9 @@ def test_create_or_show_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_auto_allocated_topology.assert_called_with(None) + self.network_client.get_auto_allocated_topology.assert_called_with( + None + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -130,8 +135,7 @@ class TestValidateAutoAllocatedTopology(TestAutoAllocatedTopology): network_object = network_fakes.create_one_network() topology = network_fakes.create_one_topology( - attrs={'id': network_object.id, - 'project_id': project.id} + attrs={'id': network_object.id, 'project_id': project.id} ) columns = ( @@ -145,13 +149,14 @@ class TestValidateAutoAllocatedTopology(TestAutoAllocatedTopology): ) def setUp(self): - super(TestValidateAutoAllocatedTopology, self).setUp() + super().setUp() self.cmd = network_auto_allocated_topology.CreateAutoAllocatedTopology( - self.app, - self.namespace) - self.network.validate_auto_allocated_topology = mock.Mock( - return_value=self.topology) + self.app, None + ) + self.network_client.validate_auto_allocated_topology.return_value = ( + self.topology + ) def test_show_dry_run_no_project(self): arglist = [ @@ -164,13 +169,15 @@ def test_show_dry_run_no_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.validate_auto_allocated_topology.assert_called_with( - None) + self.network_client.validate_auto_allocated_topology.assert_called_with( + None + ) def test_show_dry_run_project_option(self): arglist = [ '--check-resources', - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('check_resources', True), @@ -180,14 +187,17 @@ def test_show_dry_run_project_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.validate_auto_allocated_topology.assert_called_with( - self.project.id) + self.network_client.validate_auto_allocated_topology.assert_called_with( + self.project.id + ) def test_show_dry_run_project_domain_option(self): arglist = [ '--check-resources', - '--project', self.project.id, - '--project-domain', self.project.domain_id, + '--project', + self.project.id, + '--project-domain', + self.project.domain_id, ] verifylist = [ ('check_resources', True), @@ -198,8 +208,9 @@ def test_show_dry_run_project_domain_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.validate_auto_allocated_topology.assert_called_with( - self.project.id) + self.network_client.validate_auto_allocated_topology.assert_called_with( + self.project.id + ) class TestDeleteAutoAllocatedTopology(TestAutoAllocatedTopology): @@ -207,18 +218,16 @@ class TestDeleteAutoAllocatedTopology(TestAutoAllocatedTopology): network_object = network_fakes.create_one_network() topology = network_fakes.create_one_topology( - attrs={'id': network_object.id, - 'project_id': project.id} + attrs={'id': network_object.id, 'project_id': project.id} ) def setUp(self): - super(TestDeleteAutoAllocatedTopology, self).setUp() + super().setUp() self.cmd = network_auto_allocated_topology.DeleteAutoAllocatedTopology( - self.app, - self.namespace) - self.network.delete_auto_allocated_topology = mock.Mock( - return_value=None) + self.app, None + ) + self.network_client.delete_auto_allocated_topology.return_value = None def test_delete_no_project(self): arglist = [] @@ -227,14 +236,16 @@ def test_delete_no_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_auto_allocated_topology.assert_called_once_with( - None) + self.network_client.delete_auto_allocated_topology.assert_called_once_with( + None + ) self.assertIsNone(result) def test_delete_project_arg(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, ] verifylist = [ ('project', self.project.id), @@ -243,15 +254,18 @@ def test_delete_project_arg(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_auto_allocated_topology.assert_called_once_with( - self.project.id) + self.network_client.delete_auto_allocated_topology.assert_called_once_with( + self.project.id + ) self.assertIsNone(result) def test_delete_project_domain_arg(self): arglist = [ - '--project', self.project.id, - '--project-domain', self.project.domain_id, + '--project', + self.project.id, + '--project-domain', + self.project.domain_id, ] verifylist = [ ('project', self.project.id), @@ -261,7 +275,8 @@ def test_delete_project_domain_arg(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_auto_allocated_topology.assert_called_once_with( - self.project.id) + self.network_client.delete_auto_allocated_topology.assert_called_once_with( + self.project.id + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/network/v2/test_network_compute.py b/openstackclient/tests/unit/network/v2/test_network_compute.py index 89330fffe9..e08e6baf0c 100644 --- a/openstackclient/tests/unit/network/v2/test_network_compute.py +++ b/openstackclient/tests/unit/network/v2/test_network_compute.py @@ -12,33 +12,18 @@ # from unittest import mock -from unittest.mock import call from osc_lib import exceptions +from openstackclient.api import compute_v2 from openstackclient.network.v2 import network from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit import utils as tests_utils -# Tests for Nova network -# -class TestNetworkCompute(compute_fakes.TestComputev2): - - def setUp(self): - super(TestNetworkCompute, self).setUp() - - # Get a shortcut to the compute client - self.compute = self.app.client_manager.compute - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.network_create' -) -class TestCreateNetworkCompute(TestNetworkCompute): - - # The network to create. - _network = compute_fakes.FakeNetwork.create_one_network() +@mock.patch.object(compute_v2, 'create_network') +class TestCreateNetworkCompute(compute_fakes.TestComputev2): + _network = compute_fakes.create_one_network() columns = ( 'bridge', @@ -111,11 +96,10 @@ class TestCreateNetworkCompute(TestNetworkCompute): ) def setUp(self): - super(TestCreateNetworkCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = network.CreateNetwork(self.app, None) def test_network_create_no_options(self, net_mock): @@ -123,7 +107,6 @@ def test_network_create_no_options(self, net_mock): arglist = [] verifylist = [] - # Missing required args should raise exception here self.assertRaises( tests_utils.ParserException, self.check_parser, @@ -141,7 +124,6 @@ def test_network_create_missing_options(self, net_mock): ('name', self._network['label']), ] - # Missing required args should raise exception here self.assertRaises( tests_utils.ParserException, self.check_parser, @@ -153,7 +135,8 @@ def test_network_create_missing_options(self, net_mock): def test_network_create_default_options(self, net_mock): net_mock.return_value = self._network arglist = [ - "--subnet", self._network['cidr'], + "--subnet", + self._network['cidr'], self._network['label'], ] verifylist = [ @@ -164,36 +147,30 @@ def test_network_create_default_options(self, net_mock): columns, data = self.cmd.take_action(parsed_args) - net_mock.assert_called_once_with(**{ - 'subnet': self._network['cidr'], - 'name': self._network['label'], - }) + net_mock.assert_called_once_with( + self.compute_client, + subnet=self._network['cidr'], + name=self._network['label'], + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.network_delete' -) -class TestDeleteNetworkCompute(TestNetworkCompute): - +@mock.patch.object(compute_v2, 'delete_network') +@mock.patch.object(compute_v2, 'find_network') +class TestDeleteNetworkCompute(compute_fakes.TestComputev2): def setUp(self): - super(TestDeleteNetworkCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # The networks to delete - self._networks = compute_fakes.FakeNetwork.create_networks(count=3) - - # Return value of utils.find_resource() - self.compute.api.network_find = \ - compute_fakes.FakeNetwork.get_networks(networks=self._networks) + self._networks = compute_fakes.create_networks(count=3) - # Get the command object to test self.cmd = network.DeleteNetwork(self.app, None) - def test_network_delete_one(self, net_mock): - net_mock.return_value = mock.Mock(return_value=None) + def test_network_delete_one(self, find_net_mock, delete_net_mock): + find_net_mock.side_effect = self._networks + delete_net_mock.return_value = mock.Mock(return_value=None) arglist = [ self._networks[0]['label'], ] @@ -204,35 +181,44 @@ def test_network_delete_one(self, net_mock): result = self.cmd.take_action(parsed_args) - net_mock.assert_called_once_with( - self._networks[0]['label'], + delete_net_mock.assert_called_once_with( + self.compute_client, + self._networks[0]['id'], ) self.assertIsNone(result) - def test_network_delete_multi(self, net_mock): - net_mock.return_value = mock.Mock(return_value=None) - arglist = [] - for n in self._networks: - arglist.append(n['id']) + def test_network_delete_multi(self, find_net_mock, delete_net_mock): + find_net_mock.side_effect = self._networks + delete_net_mock.return_value = mock.Mock(return_value=None) + arglist = [ + self._networks[0]['id'], + self._networks[1]['id'], + ] verifylist = [ ('network', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [] - for n in self._networks: - calls.append(call(n['id'])) - net_mock.assert_has_calls(calls) + delete_net_mock.assert_has_calls( + [ + mock.call(self.compute_client, self._networks[0]['id']), + mock.call(self.compute_client, self._networks[1]['id']), + ] + ) self.assertIsNone(result) - def test_network_delete_multi_with_exception(self, net_mock): - net_mock.return_value = mock.Mock(return_value=None) - net_mock.side_effect = ([ - mock.Mock(return_value=None), - exceptions.CommandError, - ]) + def test_network_delete_multi_with_exception( + self, find_net_mock, delete_net_mock + ): + find_net_mock.side_effect = [ + self._networks[0], + exceptions.NotFound('foo'), + self._networks[1], + ] + delete_net_mock.return_value = mock.Mock(return_value=None) + arglist = [ self._networks[0]['id'], 'xxxx-yyyy-zzzz', @@ -243,24 +229,31 @@ def test_network_delete_multi_with_exception(self, net_mock): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('2 of 3 networks failed to delete.', str(e)) - - net_mock.assert_any_call(self._networks[0]['id']) - net_mock.assert_any_call(self._networks[1]['id']) - net_mock.assert_any_call('xxxx-yyyy-zzzz') - + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 3 networks failed to delete.', str(exc)) + + find_net_mock.assert_has_calls( + [ + mock.call(self.compute_client, self._networks[0]['id']), + mock.call(self.compute_client, 'xxxx-yyyy-zzzz'), + mock.call(self.compute_client, self._networks[1]['id']), + ] + ) + delete_net_mock.assert_has_calls( + [ + mock.call(self.compute_client, self._networks[0]['id']), + mock.call(self.compute_client, self._networks[1]['id']), + ] + ) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.network_list' -) -class TestListNetworkCompute(TestNetworkCompute): - # The networks going to be listed up. - _networks = compute_fakes.FakeNetwork.create_networks(count=3) +@mock.patch.object(compute_v2, 'list_networks') +class TestListNetworkCompute(compute_fakes.TestComputev2): + _networks = compute_fakes.create_networks(count=3) columns = ( 'ID', @@ -270,18 +263,19 @@ class TestListNetworkCompute(TestNetworkCompute): data = [] for net in _networks: - data.append(( - net['id'], - net['label'], - net['cidr'], - )) + data.append( + ( + net['id'], + net['label'], + net['cidr'], + ) + ) def setUp(self): - super(TestListNetworkCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = network.ListNetwork(self.app, None) def test_network_list_no_options(self, net_mock): @@ -290,23 +284,16 @@ def test_network_list_no_options(self, net_mock): verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class Lister in cliff, abstract method take_action() - # returns a tuple containing the column names and an iterable - # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - net_mock.assert_called_once_with() + net_mock.assert_called_once_with(self.compute_client) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.network_find' -) -class TestShowNetworkCompute(TestNetworkCompute): - - # The network to show. - _network = compute_fakes.FakeNetwork.create_one_network() +@mock.patch.object(compute_v2, 'find_network') +class TestShowNetworkCompute(compute_fakes.TestComputev2): + _network = compute_fakes.create_one_network() columns = ( 'bridge', @@ -379,11 +366,10 @@ class TestShowNetworkCompute(TestNetworkCompute): ) def setUp(self): - super(TestShowNetworkCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - # Get the command object to test self.cmd = network.ShowNetwork(self.app, None) def test_show_no_options(self, net_mock): @@ -411,6 +397,8 @@ def test_show_all_options(self, net_mock): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - net_mock.assert_called_once_with(self._network['label']) + net_mock.assert_called_once_with( + self.compute_client, self._network['label'] + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_network_flavor.py b/openstackclient/tests/unit/network/v2/test_network_flavor.py index 3149def6bb..10038e3933 100644 --- a/openstackclient/tests/unit/network/v2/test_network_flavor.py +++ b/openstackclient/tests/unit/network/v2/test_network_flavor.py @@ -25,46 +25,44 @@ class TestNetworkFlavor(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkFlavor, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestAddNetworkFlavorToProfile(TestNetworkFlavor): - network_flavor = network_fakes.create_one_network_flavor() service_profile = network_fakes.create_one_service_profile() def setUp(self): - super(TestAddNetworkFlavorToProfile, self).setUp() - self.network.find_flavor = mock.Mock(return_value=self.network_flavor) - self.network.find_service_profile = mock.Mock( - return_value=self.service_profile) - self.network.associate_flavor_with_service_profile = mock.Mock() + super().setUp() + + self.network_client.find_flavor.return_value = self.network_flavor + self.network_client.find_service_profile.return_value = ( + self.service_profile + ) - self.cmd = network_flavor.AddNetworkFlavorToProfile( - self.app, self.namespace) + self.cmd = network_flavor.AddNetworkFlavorToProfile(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_add_flavor_to_service_profile(self): - arglist = [ - self.network_flavor.id, - self.service_profile.id - ] + arglist = [self.network_flavor.id, self.service_profile.id] verifylist = [ ('flavor', self.network_flavor.id), ('service_profile', self.service_profile.id), @@ -73,12 +71,12 @@ def test_add_flavor_to_service_profile(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.associate_flavor_with_service_profile.\ - assert_called_once_with(self.network_flavor, self.service_profile) + self.network_client.associate_flavor_with_service_profile.assert_called_once_with( # noqa: E501 + self.network_flavor, self.service_profile + ) class TestCreateNetworkFlavor(TestNetworkFlavor): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() # The new network flavor created. @@ -101,12 +99,13 @@ class TestCreateNetworkFlavor(TestNetworkFlavor): ) def setUp(self): - super(TestCreateNetworkFlavor, self).setUp() - self.network.create_flavor = mock.Mock( - return_value=self.new_network_flavor) + super().setUp() + self.network_client.create_flavor.return_value = ( + self.new_network_flavor + ) # Get the command object to test - self.cmd = network_flavor.CreateNetworkFlavor(self.app, self.namespace) + self.cmd = network_flavor.CreateNetworkFlavor(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain @@ -116,12 +115,18 @@ def test_create_no_options(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--service-type', self.new_network_flavor.service_type, + '--service-type', + self.new_network_flavor.service_type, self.new_network_flavor.name, ] verifylist = [ @@ -130,22 +135,28 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_flavor.assert_called_once_with(**{ - 'service_type': self.new_network_flavor.service_type, - 'name': self.new_network_flavor.name, - }) + self.network_client.create_flavor.assert_called_once_with( + **{ + 'service_type': self.new_network_flavor.service_type, + 'name': self.new_network_flavor.name, + } + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(data)) def test_create_all_options(self): arglist = [ - '--description', self.new_network_flavor.description, + '--description', + self.new_network_flavor.description, '--enable', - '--project', self.project.id, - '--project-domain', self.domain.name, - '--service-type', self.new_network_flavor.service_type, + '--project', + self.project.id, + '--project-domain', + self.domain.name, + '--service-type', + self.new_network_flavor.service_type, self.new_network_flavor.name, ] verifylist = [ @@ -158,22 +169,25 @@ def test_create_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_flavor.assert_called_once_with(**{ - 'description': self.new_network_flavor.description, - 'enabled': True, - 'project_id': self.project.id, - 'service_type': self.new_network_flavor.service_type, - 'name': self.new_network_flavor.name, - }) + self.network_client.create_flavor.assert_called_once_with( + **{ + 'description': self.new_network_flavor.description, + 'enabled': True, + 'project_id': self.project.id, + 'service_type': self.new_network_flavor.service_type, + 'name': self.new_network_flavor.name, + } + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(data)) def test_create_disable(self): arglist = [ '--disable', - '--service-type', self.new_network_flavor.service_type, + '--service-type', + self.new_network_flavor.service_type, self.new_network_flavor.name, ] verifylist = [ @@ -185,28 +199,30 @@ def test_create_disable(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_flavor.assert_called_once_with(**{ - 'enabled': False, - 'service_type': self.new_network_flavor.service_type, - 'name': self.new_network_flavor.name, - }) + self.network_client.create_flavor.assert_called_once_with( + **{ + 'enabled': False, + 'service_type': self.new_network_flavor.service_type, + 'name': self.new_network_flavor.name, + } + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(data)) class TestDeleteNetworkFlavor(TestNetworkFlavor): - # The network flavor to delete. _network_flavors = network_fakes.create_flavor(count=2) def setUp(self): - super(TestDeleteNetworkFlavor, self).setUp() - self.network.delete_flavor = mock.Mock(return_value=None) - self.network.find_flavor = network_fakes.get_flavor( - network_flavors=self._network_flavors) + super().setUp() + self.network_client.delete_flavor.return_value = None + self.network_client.find_flavor = network_fakes.get_flavor( + network_flavors=self._network_flavors + ) # Get the command object to test - self.cmd = network_flavor.DeleteNetworkFlavor(self.app, self.namespace) + self.cmd = network_flavor.DeleteNetworkFlavor(self.app, None) def test_network_flavor_delete(self): arglist = [ @@ -219,10 +235,12 @@ def test_network_flavor_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_flavor.assert_called_once_with( - self._network_flavors[0].name, ignore_missing=False) - self.network.delete_flavor.assert_called_once_with( - self._network_flavors[0]) + self.network_client.find_flavor.assert_called_once_with( + self._network_flavors[0].name, ignore_missing=False + ) + self.network_client.delete_flavor.assert_called_once_with( + self._network_flavors[0] + ) self.assertIsNone(result) def test_multi_network_flavors_delete(self): @@ -241,7 +259,7 @@ def test_multi_network_flavors_delete(self): calls = [] for a in self._network_flavors: calls.append(mock.call(a)) - self.network.delete_flavor.assert_has_calls(calls) + self.network_client.delete_flavor.assert_has_calls(calls) self.assertIsNone(result) def test_multi_network_flavors_delete_with_exception(self): @@ -250,15 +268,15 @@ def test_multi_network_flavors_delete_with_exception(self): 'unexist_network_flavor', ] verifylist = [ - ('flavor', - [self._network_flavors[0].name, 'unexist_network_flavor']), + ( + 'flavor', + [self._network_flavors[0].name, 'unexist_network_flavor'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._network_flavors[0], exceptions.CommandError] - self.network.find_flavor = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_flavor.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -266,17 +284,18 @@ def test_multi_network_flavors_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 flavors failed to delete.', str(e)) - self.network.find_flavor.assert_any_call( - self._network_flavors[0].name, ignore_missing=False) - self.network.find_flavor.assert_any_call( - 'unexist_network_flavor', ignore_missing=False) - self.network.delete_flavor.assert_called_once_with( + self.network_client.find_flavor.assert_any_call( + self._network_flavors[0].name, ignore_missing=False + ) + self.network_client.find_flavor.assert_any_call( + 'unexist_network_flavor', ignore_missing=False + ) + self.network_client.delete_flavor.assert_called_once_with( self._network_flavors[0] ) class TestListNetworkFlavor(TestNetworkFlavor): - # The network flavors to list up. _network_flavors = network_fakes.create_flavor(count=2) columns = ( @@ -288,21 +307,22 @@ class TestListNetworkFlavor(TestNetworkFlavor): ) data = [] for flavor in _network_flavors: - data.append(( - flavor.id, - flavor.name, - flavor.is_enabled, - flavor.service_type, - flavor.description, - )) + data.append( + ( + flavor.id, + flavor.name, + flavor.is_enabled, + flavor.service_type, + flavor.description, + ) + ) def setUp(self): - super(TestListNetworkFlavor, self).setUp() - self.network.flavors = mock.Mock( - return_value=self._network_flavors) + super().setUp() + self.network_client.flavors.return_value = self._network_flavors # Get the command object to test - self.cmd = network_flavor.ListNetworkFlavor(self.app, self.namespace) + self.cmd = network_flavor.ListNetworkFlavor(self.app, None) def test_network_flavor_list(self): arglist = [] @@ -311,39 +331,44 @@ def test_network_flavor_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.flavors.assert_called_once_with(**{}) + self.network_client.flavors.assert_called_once_with(**{}) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(self.data, list(data)) class TestRemoveNetworkFlavorFromProfile(TestNetworkFlavor): - network_flavor = network_fakes.create_one_network_flavor() service_profile = network_fakes.create_one_service_profile() def setUp(self): - super(TestRemoveNetworkFlavorFromProfile, self).setUp() - self.network.find_flavor = mock.Mock(return_value=self.network_flavor) - self.network.find_service_profile = mock.Mock( - return_value=self.service_profile) - self.network.disassociate_flavor_from_service_profile = mock.Mock() + super().setUp() + self.network_client.find_flavor.return_value = self.network_flavor + self.network_client.find_service_profile.return_value = ( + self.service_profile + ) + self.network_client.disassociate_flavor_from_service_profile = ( + mock.Mock() + ) self.cmd = network_flavor.RemoveNetworkFlavorFromProfile( - self.app, self.namespace) + self.app, None + ) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_remove_flavor_from_service_profile(self): - arglist = [ - self.network_flavor.id, - self.service_profile.id - ] + arglist = [self.network_flavor.id, self.service_profile.id] verifylist = [ ('flavor', self.network_flavor.id), ('service_profile', self.service_profile.id), @@ -352,12 +377,12 @@ def test_remove_flavor_from_service_profile(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.disassociate_flavor_from_service_profile.\ - assert_called_once_with(self.network_flavor, self.service_profile) + self.network_client.disassociate_flavor_from_service_profile.assert_called_once_with( # noqa: E501 + self.network_flavor, self.service_profile + ) class TestShowNetworkFlavor(TestNetworkFlavor): - # The network flavor to show. new_network_flavor = network_fakes.create_one_network_flavor() columns = ( @@ -378,20 +403,24 @@ class TestShowNetworkFlavor(TestNetworkFlavor): ) def setUp(self): - super(TestShowNetworkFlavor, self).setUp() - self.network.find_flavor = mock.Mock( - return_value=self.new_network_flavor) + super().setUp() + self.network_client.find_flavor.return_value = self.new_network_flavor # Get the command object to test - self.cmd = network_flavor.ShowNetworkFlavor(self.app, self.namespace) + self.cmd = network_flavor.ShowNetworkFlavor(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -404,29 +433,29 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_flavor.assert_called_once_with( - self.new_network_flavor.name, ignore_missing=False) + self.network_client.find_flavor.assert_called_once_with( + self.new_network_flavor.name, ignore_missing=False + ) self.assertEqual(set(self.columns), set(columns)) self.assertEqual(set(self.data), set(data)) class TestSetNetworkFlavor(TestNetworkFlavor): - # The network flavor to set. - new_network_flavor = ( - network_fakes.create_one_network_flavor()) + new_network_flavor = network_fakes.create_one_network_flavor() def setUp(self): - super(TestSetNetworkFlavor, self).setUp() - self.network.update_flavor = mock.Mock(return_value=None) - self.network.find_flavor = mock.Mock( - return_value=self.new_network_flavor) + super().setUp() + self.network_client.update_flavor.return_value = None + self.network_client.find_flavor.return_value = self.new_network_flavor # Get the command object to test - self.cmd = network_flavor.SetNetworkFlavor(self.app, self.namespace) + self.cmd = network_flavor.SetNetworkFlavor(self.app, None) def test_set_nothing(self): - arglist = [self.new_network_flavor.name, ] + arglist = [ + self.new_network_flavor.name, + ] verifylist = [ ('flavor', self.new_network_flavor.name), ] @@ -435,13 +464,15 @@ def test_set_nothing(self): result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_flavor.assert_called_with( - self.new_network_flavor, **attrs) + self.network_client.update_flavor.assert_called_with( + self.new_network_flavor, **attrs + ) self.assertIsNone(result) def test_set_name_and_enable(self): arglist = [ - '--name', 'new_network_flavor', + '--name', + 'new_network_flavor', '--enable', self.new_network_flavor.name, ] @@ -457,8 +488,9 @@ def test_set_name_and_enable(self): 'name': "new_network_flavor", 'enabled': True, } - self.network.update_flavor.assert_called_with( - self.new_network_flavor, **attrs) + self.network_client.update_flavor.assert_called_with( + self.new_network_flavor, **attrs + ) self.assertIsNone(result) def test_set_disable(self): @@ -476,6 +508,7 @@ def test_set_disable(self): attrs = { 'enabled': False, } - self.network.update_flavor.assert_called_with( - self.new_network_flavor, **attrs) + self.network_client.update_flavor.assert_called_with( + self.new_network_flavor, **attrs + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py b/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py index 5c2b9e2da9..c8235bfef8 100644 --- a/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py +++ b/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py @@ -20,19 +20,14 @@ class TestFlavorProfile(network_fakes.TestNetworkV2): - def setUp(self): - super(TestFlavorProfile, self).setUp() - # Get the network client - self.network = self.app.client_manager.network - # Get the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + super().setUp() + # Get the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateFlavorProfile(TestFlavorProfile): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() new_flavor_profile = network_fakes.create_one_service_profile() @@ -42,7 +37,6 @@ class TestCreateFlavorProfile(TestFlavorProfile): 'enabled', 'id', 'meta_info', - 'project_id', ) data = ( @@ -51,121 +45,116 @@ class TestCreateFlavorProfile(TestFlavorProfile): new_flavor_profile.is_enabled, new_flavor_profile.id, new_flavor_profile.meta_info, - new_flavor_profile.project_id, ) def setUp(self): - super(TestCreateFlavorProfile, self).setUp() - self.network.create_service_profile = mock.Mock( - return_value=self.new_flavor_profile) - self.projects_mock.get.return_value = self.project + super().setUp() + self.network_client.create_service_profile.return_value = ( + self.new_flavor_profile + ) + # Get the command object to test - self.cmd = (network_flavor_profile.CreateNetworkFlavorProfile( - self.app, self.namespace)) + self.cmd = network_flavor_profile.CreateNetworkFlavorProfile( + self.app, None + ) def test_create_all_options(self): arglist = [ - "--description", self.new_flavor_profile.description, - "--project", self.new_flavor_profile.project_id, - '--project-domain', self.domain.name, + "--description", + self.new_flavor_profile.description, "--enable", - "--driver", self.new_flavor_profile.driver, - "--metainfo", self.new_flavor_profile.meta_info, + "--driver", + self.new_flavor_profile.driver, + "--metainfo", + self.new_flavor_profile.meta_info, ] verifylist = [ ('description', self.new_flavor_profile.description), - ('project', self.new_flavor_profile.project_id), - ('project_domain', self.domain.name), ('enable', True), ('driver', self.new_flavor_profile.driver), - ('metainfo', self.new_flavor_profile.meta_info) + ('metainfo', self.new_flavor_profile.meta_info), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) - - self.network.create_service_profile.assert_called_once_with( - **{'description': self.new_flavor_profile.description, - 'project_id': self.project.id, - 'enabled': self.new_flavor_profile.is_enabled, - 'driver': self.new_flavor_profile.driver, - 'metainfo': self.new_flavor_profile.meta_info} + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_service_profile.assert_called_once_with( + **{ + 'description': self.new_flavor_profile.description, + 'enabled': self.new_flavor_profile.is_enabled, + 'driver': self.new_flavor_profile.driver, + 'metainfo': self.new_flavor_profile.meta_info, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_with_metainfo(self): arglist = [ - "--description", self.new_flavor_profile.description, - "--project", self.new_flavor_profile.project_id, - '--project-domain', self.domain.name, + "--description", + self.new_flavor_profile.description, "--enable", - "--metainfo", self.new_flavor_profile.meta_info, + "--metainfo", + self.new_flavor_profile.meta_info, ] verifylist = [ ('description', self.new_flavor_profile.description), - ('project', self.new_flavor_profile.project_id), - ('project_domain', self.domain.name), ('enable', True), - ('metainfo', self.new_flavor_profile.meta_info) + ('metainfo', self.new_flavor_profile.meta_info), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_service_profile.assert_called_once_with( - **{'description': self.new_flavor_profile.description, - 'project_id': self.project.id, - 'enabled': self.new_flavor_profile.is_enabled, - 'metainfo': self.new_flavor_profile.meta_info} + self.network_client.create_service_profile.assert_called_once_with( + **{ + 'description': self.new_flavor_profile.description, + 'enabled': self.new_flavor_profile.is_enabled, + 'metainfo': self.new_flavor_profile.meta_info, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_with_driver(self): arglist = [ - "--description", self.new_flavor_profile.description, - "--project", self.new_flavor_profile.project_id, - '--project-domain', self.domain.name, + "--description", + self.new_flavor_profile.description, "--enable", - "--driver", self.new_flavor_profile.driver, + "--driver", + self.new_flavor_profile.driver, ] verifylist = [ ('description', self.new_flavor_profile.description), - ('project', self.new_flavor_profile.project_id), - ('project_domain', self.domain.name), ('enable', True), ('driver', self.new_flavor_profile.driver), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) - - self.network.create_service_profile.assert_called_once_with( - **{'description': self.new_flavor_profile.description, - 'project_id': self.project.id, - 'enabled': self.new_flavor_profile.is_enabled, - 'driver': self.new_flavor_profile.driver, - } + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_service_profile.assert_called_once_with( + **{ + 'description': self.new_flavor_profile.description, + 'enabled': self.new_flavor_profile.is_enabled, + 'driver': self.new_flavor_profile.driver, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_without_driver_and_metainfo(self): arglist = [ - "--description", self.new_flavor_profile.description, - "--project", self.new_flavor_profile.project_id, - '--project-domain', self.domain.name, + "--description", + self.new_flavor_profile.description, "--enable", ] verifylist = [ ('description', self.new_flavor_profile.description), - ('project', self.new_flavor_profile.project_id), - ('project_domain', self.domain.name), ('enable', True), ] @@ -180,40 +169,45 @@ def test_create_without_driver_and_metainfo(self): def test_create_disable(self): arglist = [ '--disable', - '--driver', self.new_flavor_profile.driver, + '--driver', + self.new_flavor_profile.driver, ] verifylist = [ ('disable', True), - ('driver', self.new_flavor_profile.driver) + ('driver', self.new_flavor_profile.driver), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_service_profile.assert_called_once_with(**{ - 'enabled': False, - 'driver': self.new_flavor_profile.driver, - }) + self.network_client.create_service_profile.assert_called_once_with( + **{ + 'enabled': False, + 'driver': self.new_flavor_profile.driver, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteFlavorProfile(TestFlavorProfile): - # The network flavor_profiles to delete. _network_flavor_profiles = network_fakes.create_service_profile(count=2) def setUp(self): - super(TestDeleteFlavorProfile, self).setUp() - self.network.delete_service_profile = mock.Mock(return_value=None) - self.network.find_service_profile = ( + super().setUp() + self.network_client.delete_service_profile.return_value = None + + self.network_client.find_service_profile = ( network_fakes.get_service_profile( - flavor_profile=self._network_flavor_profiles) + flavor_profile=self._network_flavor_profiles + ) ) # Get the command object to test self.cmd = network_flavor_profile.DeleteNetworkFlavorProfile( - self.app, self.namespace) + self.app, None + ) def test_network_flavor_profile_delete(self): arglist = [ @@ -226,10 +220,12 @@ def test_network_flavor_profile_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_service_profile.assert_called_once_with( - self._network_flavor_profiles[0].id, ignore_missing=False) - self.network.delete_service_profile.assert_called_once_with( - self._network_flavor_profiles[0]) + self.network_client.find_service_profile.assert_called_once_with( + self._network_flavor_profiles[0].id, ignore_missing=False + ) + self.network_client.delete_service_profile.assert_called_once_with( + self._network_flavor_profiles[0] + ) self.assertIsNone(result) def test_multi_network_flavor_profiles_delete(self): @@ -247,7 +243,7 @@ def test_multi_network_flavor_profiles_delete(self): calls = [] for a in self._network_flavor_profiles: calls.append(mock.call(a)) - self.network.delete_service_profile.assert_has_calls(calls) + self.network_client.delete_service_profile.assert_has_calls(calls) self.assertIsNone(result) def test_multi_network_flavor_profiles_delete_with_exception(self): @@ -256,36 +252,42 @@ def test_multi_network_flavor_profiles_delete_with_exception(self): 'unexist_network_flavor_profile', ] verifylist = [ - ('flavor_profile', - [self._network_flavor_profiles[0].id, - 'unexist_network_flavor_profile']), + ( + 'flavor_profile', + [ + self._network_flavor_profiles[0].id, + 'unexist_network_flavor_profile', + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [self._network_flavor_profiles[0], - exceptions.CommandError] - self.network.find_service_profile = ( - mock.Mock(side_effect=find_mock_result) - ) + find_mock_result = [ + self._network_flavor_profiles[0], + exceptions.CommandError, + ] + self.network_client.find_service_profile.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 flavor_profiles failed to delete.', - str(e)) - - self.network.find_service_profile.assert_any_call( - self._network_flavor_profiles[0].id, ignore_missing=False) - self.network.find_service_profile.assert_any_call( - 'unexist_network_flavor_profile', ignore_missing=False) - self.network.delete_service_profile.assert_called_once_with( + self.assertEqual( + '1 of 2 flavor_profiles failed to delete.', str(e) + ) + + self.network_client.find_service_profile.assert_any_call( + self._network_flavor_profiles[0].id, ignore_missing=False + ) + self.network_client.find_service_profile.assert_any_call( + 'unexist_network_flavor_profile', ignore_missing=False + ) + self.network_client.delete_service_profile.assert_called_once_with( self._network_flavor_profiles[0] ) class TestListFlavorProfile(TestFlavorProfile): - # The network flavor profiles list _network_flavor_profiles = network_fakes.create_service_profile(count=2) @@ -298,22 +300,26 @@ class TestListFlavorProfile(TestFlavorProfile): ) data = [] for flavor_profile in _network_flavor_profiles: - data.append(( - flavor_profile.id, - flavor_profile.driver, - flavor_profile.is_enabled, - flavor_profile.meta_info, - flavor_profile.description, - )) + data.append( + ( + flavor_profile.id, + flavor_profile.driver, + flavor_profile.is_enabled, + flavor_profile.meta_info, + flavor_profile.description, + ) + ) def setUp(self): - super(TestListFlavorProfile, self).setUp() - self.network.service_profiles = mock.Mock( - return_value=self._network_flavor_profiles) + super().setUp() + self.network_client.service_profiles.return_value = ( + self._network_flavor_profiles + ) # Get the command object to test self.cmd = network_flavor_profile.ListNetworkFlavorProfile( - self.app, self.namespace) + self.app, None + ) def test_network_flavor_profile_list(self): arglist = [] @@ -322,13 +328,12 @@ def test_network_flavor_profile_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.service_profiles.assert_called_once_with(**{}) + self.network_client.service_profiles.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestShowFlavorProfile(TestFlavorProfile): - # The network flavor profile to show. network_flavor_profile = network_fakes.create_one_service_profile() columns = ( @@ -337,7 +342,6 @@ class TestShowFlavorProfile(TestFlavorProfile): 'enabled', 'id', 'meta_info', - 'project_id', ) data = ( network_flavor_profile.description, @@ -345,17 +349,18 @@ class TestShowFlavorProfile(TestFlavorProfile): network_flavor_profile.is_enabled, network_flavor_profile.id, network_flavor_profile.meta_info, - network_flavor_profile.project_id, ) def setUp(self): - super(TestShowFlavorProfile, self).setUp() - self.network.find_service_profile = mock.Mock( - return_value=self.network_flavor_profile) + super().setUp() + self.network_client.find_service_profile.return_value = ( + self.network_flavor_profile + ) # Get the command object to test self.cmd = network_flavor_profile.ShowNetworkFlavorProfile( - self.app, self.namespace) + self.app, None + ) def test_show_all_options(self): arglist = [ @@ -368,26 +373,29 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_service_profile.assert_called_once_with( - self.network_flavor_profile.id, ignore_missing=False) + self.network_client.find_service_profile.assert_called_once_with( + self.network_flavor_profile.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestSetFlavorProfile(TestFlavorProfile): - # The network flavor profile to set. network_flavor_profile = network_fakes.create_one_service_profile() def setUp(self): - super(TestSetFlavorProfile, self).setUp() - self.network.update_service_profile = mock.Mock(return_value=None) - self.network.find_service_profile = mock.Mock( - return_value=self.network_flavor_profile) + super().setUp() + self.network_client.update_service_profile.return_value = None + + self.network_client.find_service_profile.return_value = ( + self.network_flavor_profile + ) # Get the command object to test self.cmd = network_flavor_profile.SetNetworkFlavorProfile( - self.app, self.namespace) + self.app, None + ) def test_set_nothing(self): arglist = [self.network_flavor_profile.id] @@ -399,8 +407,9 @@ def test_set_nothing(self): result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_service_profile.assert_called_with( - self.network_flavor_profile, **attrs) + self.network_client.update_service_profile.assert_called_with( + self.network_flavor_profile, **attrs + ) self.assertIsNone(result) def test_set_enable(self): @@ -418,8 +427,9 @@ def test_set_enable(self): attrs = { 'enabled': True, } - self.network.update_service_profile.assert_called_with( - self.network_flavor_profile, **attrs) + self.network_client.update_service_profile.assert_called_with( + self.network_flavor_profile, **attrs + ) self.assertIsNone(result) def test_set_disable(self): @@ -437,6 +447,7 @@ def test_set_disable(self): attrs = { 'enabled': False, } - self.network.update_service_profile.assert_called_with( - self.network_flavor_profile, **attrs) + self.network_client.update_service_profile.assert_called_with( + self.network_flavor_profile, **attrs + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/network/v2/test_network_meter.py b/openstackclient/tests/unit/network/v2/test_network_meter.py index 5cedf0f408..f13839a626 100644 --- a/openstackclient/tests/unit/network/v2/test_network_meter.py +++ b/openstackclient/tests/unit/network/v2/test_network_meter.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -25,22 +24,18 @@ class TestMeter(network_fakes.TestNetworkV2): - def setUp(self): - super(TestMeter, self).setUp() - self.network = self.app.client_manager.network - self.projects_mock = self.app.client_manager.identity.projects - self.domains_mock = self.app.client_manager.identity.domains + super().setUp() + + self.projects_mock = self.identity_client.projects + self.domains_mock = self.identity_client.domains class TestCreateMeter(TestMeter): project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() - new_meter = ( - network_fakes.FakeNetworkMeter. - create_one_meter() - ) + new_meter = network_fakes.FakeNetworkMeter.create_one_meter() columns = ( 'description', 'id', @@ -58,18 +53,23 @@ class TestCreateMeter(TestMeter): ) def setUp(self): - super(TestCreateMeter, self).setUp() - self.network.create_metering_label = mock.Mock( - return_value=self.new_meter) + super().setUp() + self.network_client.create_metering_label.return_value = self.new_meter + self.projects_mock.get.return_value = self.project - self.cmd = network_meter.CreateMeter(self.app, self.namespace) + self.cmd = network_meter.CreateMeter(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ @@ -81,9 +81,9 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_metering_label.assert_called_once_with( + self.network_client.create_metering_label.assert_called_once_with( **{'name': self.new_meter.name} ) self.assertEqual(self.columns, columns) @@ -91,9 +91,12 @@ def test_create_default_options(self): def test_create_all_options(self): arglist = [ - "--description", self.new_meter.description, - "--project", self.new_meter.project_id, - "--project-domain", self.domain.name, + "--description", + self.new_meter.description, + "--project", + self.new_meter.project_id, + "--project-domain", + self.domain.name, "--share", self.new_meter.name, ] @@ -107,34 +110,33 @@ def test_create_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_metering_label.assert_called_once_with( - **{'description': self.new_meter.description, - 'name': self.new_meter.name, - 'project_id': self.project.id, - 'shared': True, } + self.network_client.create_metering_label.assert_called_once_with( + **{ + 'description': self.new_meter.description, + 'name': self.new_meter.name, + 'project_id': self.project.id, + 'shared': True, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteMeter(TestMeter): - def setUp(self): - super(TestDeleteMeter, self).setUp() + super().setUp() - self.meter_list = \ - network_fakes.FakeNetworkMeter.create_meter(count=2) + self.meter_list = network_fakes.FakeNetworkMeter.create_meter(count=2) - self.network.delete_metering_label = mock.Mock(return_value=None) + self.network_client.delete_metering_label.return_value = None - self.network.find_metering_label = network_fakes \ - .FakeNetworkMeter.get_meter( - meter=self.meter_list - ) + self.network_client.find_metering_label = ( + network_fakes.FakeNetworkMeter.get_meter(meter=self.meter_list) + ) - self.cmd = network_meter.DeleteMeter(self.app, self.namespace) + self.cmd = network_meter.DeleteMeter(self.app, None) def test_delete_one_meter(self): arglist = [ @@ -148,7 +150,7 @@ def test_delete_one_meter(self): result = self.cmd.take_action(parsed_args) - self.network.delete_metering_label.assert_called_once_with( + self.network_client.delete_metering_label.assert_called_once_with( self.meter_list[0] ) self.assertIsNone(result) @@ -168,7 +170,7 @@ def test_delete_multiple_meters(self): calls = [] for n in self.meter_list: calls.append(call(n)) - self.network.delete_metering_label.assert_has_calls(calls) + self.network_client.delete_metering_label.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_meter_exception(self): @@ -188,28 +190,27 @@ def test_delete_multiple_meter_exception(self): exceptions.NotFound('404'), self.meter_list[1], ] - self.network.find_meter = mock.Mock(side_effect=return_find) + self.network_client.find_metering_label.side_effect = return_find ret_delete = [ None, exceptions.NotFound('404'), ] - self.network.delete_metering_label = mock.Mock(side_effect=ret_delete) + self.network_client.delete_metering_label.side_effect = ret_delete - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) calls = [ call(self.meter_list[0]), call(self.meter_list[1]), ] - self.network.delete_metering_label.assert_has_calls(calls) + self.network_client.delete_metering_label.assert_has_calls(calls) class TestListMeter(TestMeter): - - meter_list = \ - network_fakes.FakeNetworkMeter.create_meter(count=2) + meter_list = network_fakes.FakeNetworkMeter.create_meter(count=2) columns = ( 'ID', @@ -221,21 +222,21 @@ class TestListMeter(TestMeter): data = [] for meters in meter_list: - data.append(( - meters.id, - meters.name, - meters.description, - meters.shared, - )) + data.append( + ( + meters.id, + meters.name, + meters.description, + meters.shared, + ) + ) def setUp(self): - super(TestListMeter, self).setUp() + super().setUp() - self.network.metering_labels = mock.Mock( - return_value=self.meter_list - ) + self.network_client.metering_labels.return_value = self.meter_list - self.cmd = network_meter.ListMeter(self.app, self.namespace) + self.cmd = network_meter.ListMeter(self.app, None) def test_meter_list(self): arglist = [] @@ -245,16 +246,13 @@ def test_meter_list(self): columns, data = self.cmd.take_action(parsed_args) - self.network.metering_labels.assert_called_with() + self.network_client.metering_labels.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestShowMeter(TestMeter): - new_meter = ( - network_fakes.FakeNetworkMeter. - create_one_meter() - ) + new_meter = network_fakes.FakeNetworkMeter.create_one_meter() columns = ( 'description', 'id', @@ -272,19 +270,23 @@ class TestShowMeter(TestMeter): ) def setUp(self): - super(TestShowMeter, self).setUp() + super().setUp() - self.cmd = network_meter.ShowMeter(self.app, self.namespace) + self.cmd = network_meter.ShowMeter(self.app, None) - self.network.find_metering_label = \ - mock.Mock(return_value=self.new_meter) + self.network_client.find_metering_label.return_value = self.new_meter def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_meter_show_option(self): arglist = [ @@ -297,7 +299,7 @@ def test_meter_show_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_metering_label.assert_called_with( + self.network_client.find_metering_label.assert_called_with( self.new_meter.name, ignore_missing=False ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_network_meter_rule.py b/openstackclient/tests/unit/network/v2/test_network_meter_rule.py index e9224fa650..407fb04bb4 100644 --- a/openstackclient/tests/unit/network/v2/test_network_meter_rule.py +++ b/openstackclient/tests/unit/network/v2/test_network_meter_rule.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -26,20 +25,17 @@ class TestMeterRule(network_fakes.TestNetworkV2): def setUp(self): - super(TestMeterRule, self).setUp() - self.network = self.app.client_manager.network - self.projects_mock = self.app.client_manager.identity.projects - self.domains_mock = self.app.client_manager.identity.domains + super().setUp() + + self.projects_mock = self.identity_client.projects + self.domains_mock = self.identity_client.domains class TestCreateMeterRule(TestMeterRule): project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() - new_rule = ( - network_fakes.FakeNetworkMeterRule. - create_one_rule() - ) + new_rule = network_fakes.FakeNetworkMeterRule.create_one_rule() columns = ( 'destination_ip_prefix', @@ -63,29 +59,36 @@ class TestCreateMeterRule(TestMeterRule): ) def setUp(self): - super(TestCreateMeterRule, self).setUp() - fake_meter = network_fakes.FakeNetworkMeter.create_one_meter({ - 'id': self.new_rule.metering_label_id}) + super().setUp() + fake_meter = network_fakes.FakeNetworkMeter.create_one_meter( + {'id': self.new_rule.metering_label_id} + ) + + self.network_client.create_metering_label_rule.return_value = ( + self.new_rule + ) - self.network.create_metering_label_rule = mock.Mock( - return_value=self.new_rule) self.projects_mock.get.return_value = self.project - self.cmd = network_meter_rule.CreateMeterRule(self.app, - self.namespace) - self.network.find_metering_label = mock.Mock( - return_value=fake_meter) + self.cmd = network_meter_rule.CreateMeterRule(self.app, None) + self.network_client.find_metering_label.return_value = fake_meter def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ self.new_rule.metering_label_id, - "--remote-ip-prefix", self.new_rule.remote_ip_prefix, + "--remote-ip-prefix", + self.new_rule.remote_ip_prefix, ] verifylist = [ ('meter', self.new_rule.metering_label_id), @@ -93,12 +96,14 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_metering_label_rule.assert_called_once_with( - **{'direction': 'ingress', - 'metering_label_id': self.new_rule.metering_label_id, - 'remote_ip_prefix': self.new_rule.remote_ip_prefix, } + self.network_client.create_metering_label_rule.assert_called_once_with( + **{ + 'direction': 'ingress', + 'metering_label_id': self.new_rule.metering_label_id, + 'remote_ip_prefix': self.new_rule.remote_ip_prefix, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -108,7 +113,8 @@ def test_create_all_options(self): "--ingress", "--include", self.new_rule.metering_label_id, - "--remote-ip-prefix", self.new_rule.remote_ip_prefix, + "--remote-ip-prefix", + self.new_rule.remote_ip_prefix, ] verifylist = [ ('ingress', True), @@ -118,13 +124,15 @@ def test_create_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_metering_label_rule.assert_called_once_with( - **{'direction': self.new_rule.direction, - 'excluded': self.new_rule.excluded, - 'metering_label_id': self.new_rule.metering_label_id, - 'remote_ip_prefix': self.new_rule.remote_ip_prefix, } + self.network_client.create_metering_label_rule.assert_called_once_with( + **{ + 'direction': self.new_rule.direction, + 'excluded': self.new_rule.excluded, + 'metering_label_id': self.new_rule.metering_label_id, + 'remote_ip_prefix': self.new_rule.remote_ip_prefix, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -132,20 +140,19 @@ def test_create_all_options(self): class TestDeleteMeterRule(TestMeterRule): def setUp(self): - super(TestDeleteMeterRule, self).setUp() - self.rule_list = \ - network_fakes.FakeNetworkMeterRule.create_meter_rule( - count=2 - ) - self.network.delete_metering_label_rule = mock.Mock(return_value=None) + super().setUp() + self.rule_list = network_fakes.FakeNetworkMeterRule.create_meter_rule( + count=2 + ) + self.network_client.delete_metering_label_rule.return_value = None - self.network.find_metering_label_rule = network_fakes \ - .FakeNetworkMeterRule.get_meter_rule( + self.network_client.find_metering_label_rule = ( + network_fakes.FakeNetworkMeterRule.get_meter_rule( meter_rule=self.rule_list ) + ) - self.cmd = network_meter_rule.DeleteMeterRule(self.app, - self.namespace) + self.cmd = network_meter_rule.DeleteMeterRule(self.app, None) def test_delete_one_rule(self): arglist = [ @@ -159,7 +166,7 @@ def test_delete_one_rule(self): result = self.cmd.take_action(parsed_args) - self.network.delete_metering_label_rule.assert_called_once_with( + self.network_client.delete_metering_label_rule.assert_called_once_with( self.rule_list[0] ) self.assertIsNone(result) @@ -179,7 +186,7 @@ def test_delete_multiple_rules(self): calls = [] for rule in self.rule_list: calls.append(call(rule)) - self.network.delete_metering_label_rule.assert_has_calls(calls) + self.network_client.delete_metering_label_rule.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_rules_exception(self): @@ -199,33 +206,27 @@ def test_delete_multiple_rules_exception(self): exceptions.NotFound('404'), self.rule_list[1], ] - self.network.find_metering_label_rule = mock.Mock( - side_effect=return_find - ) + self.network_client.find_metering_label_rule.side_effect = return_find ret_delete = [ None, exceptions.NotFound('404'), ] - self.network.delete_metering_label_rule = mock.Mock( - side_effect=ret_delete - ) + self.network_client.delete_metering_label_rule.side_effect = ret_delete - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) calls = [ call(self.rule_list[0]), call(self.rule_list[1]), ] - self.network.delete_metering_label_rule.assert_has_calls(calls) + self.network_client.delete_metering_label_rule.assert_has_calls(calls) class TestListMeterRule(TestMeterRule): - rule_list = \ - network_fakes.FakeNetworkMeterRule.create_meter_rule( - count=2 - ) + rule_list = network_fakes.FakeNetworkMeterRule.create_meter_rule(count=2) columns = ( 'ID', @@ -233,30 +234,29 @@ class TestListMeterRule(TestMeterRule): 'Direction', 'Remote IP Prefix', 'Source IP Prefix', - 'Destination IP Prefix' + 'Destination IP Prefix', ) data = [] for rule in rule_list: - data.append(( - rule.id, - rule.excluded, - rule.direction, - rule.remote_ip_prefix, - rule.source_ip_prefix, - rule.destination_ip_prefix - )) + data.append( + ( + rule.id, + rule.excluded, + rule.direction, + rule.remote_ip_prefix, + rule.source_ip_prefix, + rule.destination_ip_prefix, + ) + ) def setUp(self): - super(TestListMeterRule, self).setUp() + super().setUp() - self.network.metering_label_rules = mock.Mock( - return_value=self.rule_list - ) + self.network_client.metering_label_rules.return_value = self.rule_list - self.cmd = network_meter_rule.ListMeterRule(self.app, - self.namespace) + self.cmd = network_meter_rule.ListMeterRule(self.app, None) def test_rule_list(self): arglist = [] @@ -266,16 +266,13 @@ def test_rule_list(self): columns, data = self.cmd.take_action(parsed_args) - self.network.metering_label_rules.assert_called_with() + self.network_client.metering_label_rules.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestShowMeterRule(TestMeterRule): - new_rule = ( - network_fakes.FakeNetworkMeterRule. - create_one_rule() - ) + new_rule = network_fakes.FakeNetworkMeterRule.create_one_rule() columns = ( 'destination_ip_prefix', @@ -300,20 +297,25 @@ class TestShowMeterRule(TestMeterRule): ) def setUp(self): - super(TestShowMeterRule, self).setUp() + super().setUp() - self.cmd = network_meter_rule.ShowMeterRule(self.app, - self.namespace) + self.cmd = network_meter_rule.ShowMeterRule(self.app, None) - self.network.find_metering_label_rule = \ - mock.Mock(return_value=self.new_rule) + self.network_client.find_metering_label_rule.return_value = ( + self.new_rule + ) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_label_rule_show_option(self): arglist = [ @@ -326,7 +328,7 @@ def test_label_rule_show_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_metering_label_rule.assert_called_with( + self.network_client.find_metering_label_rule.assert_called_with( self.new_rule.id, ignore_missing=False ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_policy.py b/openstackclient/tests/unit/network/v2/test_network_qos_policy.py index af4cb3fbcc..17f40ef6b9 100644 --- a/openstackclient/tests/unit/network/v2/test_network_qos_policy.py +++ b/openstackclient/tests/unit/network/v2/test_network_qos_policy.py @@ -25,26 +25,20 @@ class TestQosPolicy(network_fakes.TestNetworkV2): - def setUp(self): - super(TestQosPolicy, self).setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() # Get a shortcut to the ProjectManager Mock self.projects_mock = self.app.client_manager.identity.projects class TestCreateNetworkQosPolicy(TestQosPolicy): - project = identity_fakes_v3.FakeProject.create_one_project() # The new qos policy created. - new_qos_policy = ( - network_fakes.FakeNetworkQosPolicy.create_one_qos_policy( - attrs={ - 'project_id': project.id, - } - )) + new_qos_policy = network_fakes.create_one_qos_policy( + attrs={'project_id': project.id} + ) + columns = ( 'description', 'id', @@ -53,6 +47,7 @@ class TestCreateNetworkQosPolicy(TestQosPolicy): 'project_id', 'rules', 'shared', + 'tags', ) data = ( @@ -62,17 +57,18 @@ class TestCreateNetworkQosPolicy(TestQosPolicy): new_qos_policy.name, new_qos_policy.project_id, new_qos_policy.rules, - new_qos_policy.shared, + new_qos_policy.is_shared, + new_qos_policy.tags, ) def setUp(self): - super(TestCreateNetworkQosPolicy, self).setUp() - self.network.create_qos_policy = mock.Mock( - return_value=self.new_qos_policy) + super().setUp() + self.network_client.create_qos_policy.return_value = ( + self.new_qos_policy + ) # Get the command object to test - self.cmd = network_qos_policy.CreateNetworkQosPolicy( - self.app, self.namespace) + self.cmd = network_qos_policy.CreateNetworkQosPolicy(self.app, None) self.projects_mock.get.return_value = self.project @@ -81,8 +77,13 @@ def test_create_no_options(self): verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ @@ -94,20 +95,22 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_policy.assert_called_once_with(**{ - 'name': self.new_qos_policy.name - }) + self.network_client.create_qos_policy.assert_called_once_with( + **{'name': self.new_qos_policy.name} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_all_options(self): arglist = [ '--share', - '--project', self.project.name, + '--project', + self.project.name, self.new_qos_policy.name, - '--description', 'QoS policy description', + '--description', + 'QoS policy description', '--default', ] verifylist = [ @@ -121,21 +124,20 @@ def test_create_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_policy.assert_called_once_with(**{ - 'shared': True, - 'project_id': self.project.id, - 'name': self.new_qos_policy.name, - 'description': 'QoS policy description', - 'is_default': True, - }) + self.network_client.create_qos_policy.assert_called_once_with( + **{ + 'shared': True, + 'project_id': self.project.id, + 'name': self.new_qos_policy.name, + 'description': 'QoS policy description', + 'is_default': True, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_no_default(self): - arglist = [ - self.new_qos_policy.name, - '--no-default' - ] + arglist = [self.new_qos_policy.name, '--no-default'] verifylist = [ ('project', None), ('name', self.new_qos_policy.name), @@ -143,33 +145,31 @@ def test_create_no_default(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_policy.assert_called_once_with(**{ - 'name': self.new_qos_policy.name, - 'is_default': False, - }) + self.network_client.create_qos_policy.assert_called_once_with( + **{ + 'name': self.new_qos_policy.name, + 'is_default': False, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteNetworkQosPolicy(TestQosPolicy): - # The address scope to delete. - _qos_policies = ( - network_fakes.FakeNetworkQosPolicy.create_qos_policies(count=2)) + _qos_policies = network_fakes.create_qos_policies(count=2) def setUp(self): - super(TestDeleteNetworkQosPolicy, self).setUp() - self.network.delete_qos_policy = mock.Mock(return_value=None) - self.network.find_qos_policy = ( - network_fakes.FakeNetworkQosPolicy.get_qos_policies( - qos_policies=self._qos_policies) + super().setUp() + self.network_client.delete_qos_policy.return_value = None + self.network_client.find_qos_policy = network_fakes.get_qos_policies( + qos_policies=self._qos_policies ) # Get the command object to test - self.cmd = network_qos_policy.DeleteNetworkQosPolicy( - self.app, self.namespace) + self.cmd = network_qos_policy.DeleteNetworkQosPolicy(self.app, None) def test_qos_policy_delete(self): arglist = [ @@ -182,10 +182,12 @@ def test_qos_policy_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self._qos_policies[0].name, ignore_missing=False) - self.network.delete_qos_policy.assert_called_once_with( - self._qos_policies[0]) + self.network_client.find_qos_policy.assert_called_once_with( + self._qos_policies[0].name, ignore_missing=False + ) + self.network_client.delete_qos_policy.assert_called_once_with( + self._qos_policies[0] + ) self.assertIsNone(result) def test_multi_qos_policies_delete(self): @@ -203,7 +205,7 @@ def test_multi_qos_policies_delete(self): calls = [] for a in self._qos_policies: calls.append(call(a)) - self.network.delete_qos_policy.assert_has_calls(calls) + self.network_client.delete_qos_policy.assert_has_calls(calls) self.assertIsNone(result) def test_multi_qos_policies_delete_with_exception(self): @@ -212,14 +214,13 @@ def test_multi_qos_policies_delete_with_exception(self): 'unexist_qos_policy', ] verifylist = [ - ('policy', - [self._qos_policies[0].name, 'unexist_qos_policy']), + ('policy', [self._qos_policies[0].name, 'unexist_qos_policy']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._qos_policies[0], exceptions.CommandError] - self.network.find_qos_policy = ( - mock.MagicMock(side_effect=find_mock_result) + self.network_client.find_qos_policy = mock.MagicMock( + side_effect=find_mock_result ) try: @@ -228,20 +229,20 @@ def test_multi_qos_policies_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 QoS policies failed to delete.', str(e)) - self.network.find_qos_policy.assert_any_call( - self._qos_policies[0].name, ignore_missing=False) - self.network.find_qos_policy.assert_any_call( - 'unexist_qos_policy', ignore_missing=False) - self.network.delete_qos_policy.assert_called_once_with( + self.network_client.find_qos_policy.assert_any_call( + self._qos_policies[0].name, ignore_missing=False + ) + self.network_client.find_qos_policy.assert_any_call( + 'unexist_qos_policy', ignore_missing=False + ) + self.network_client.delete_qos_policy.assert_called_once_with( self._qos_policies[0] ) class TestListNetworkQosPolicy(TestQosPolicy): - # The QoS policies to list up. - qos_policies = ( - network_fakes.FakeNetworkQosPolicy.create_qos_policies(count=3)) + qos_policies = network_fakes.create_qos_policies(count=3) columns = ( 'ID', 'Name', @@ -251,21 +252,22 @@ class TestListNetworkQosPolicy(TestQosPolicy): ) data = [] for qos_policy in qos_policies: - data.append(( - qos_policy.id, - qos_policy.name, - qos_policy.shared, - qos_policy.is_default, - qos_policy.project_id, - )) + data.append( + ( + qos_policy.id, + qos_policy.name, + qos_policy.is_shared, + qos_policy.is_default, + qos_policy.project_id, + ) + ) def setUp(self): - super(TestListNetworkQosPolicy, self).setUp() - self.network.qos_policies = mock.Mock(return_value=self.qos_policies) + super().setUp() + self.network_client.qos_policies.return_value = self.qos_policies # Get the command object to test - self.cmd = network_qos_policy.ListNetworkQosPolicy(self.app, - self.namespace) + self.cmd = network_qos_policy.ListNetworkQosPolicy(self.app, None) def test_qos_policy_list(self): arglist = [] @@ -274,7 +276,7 @@ def test_qos_policy_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_policies.assert_called_once_with(**{}) + self.network_client.qos_policies.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -289,7 +291,7 @@ def test_qos_policy_list_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_policies.assert_called_once_with( + self.network_client.qos_policies.assert_called_once_with( **{'shared': True} ) self.assertEqual(self.columns, columns) @@ -305,7 +307,7 @@ def test_qos_policy_list_no_share(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_policies.assert_called_once_with( + self.network_client.qos_policies.assert_called_once_with( **{'shared': False} ) self.assertEqual(self.columns, columns) @@ -315,8 +317,10 @@ def test_network_qos_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -324,7 +328,7 @@ def test_network_qos_list_project(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_policies.assert_called_once_with( + self.network_client.qos_policies.assert_called_once_with( **{'project_id': project.id} ) @@ -333,22 +337,21 @@ def test_network_qos_list_project(self): class TestSetNetworkQosPolicy(TestQosPolicy): - # The QoS policy to set. - _qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() + _qos_policy = network_fakes.create_one_qos_policy() def setUp(self): - super(TestSetNetworkQosPolicy, self).setUp() - self.network.update_qos_policy = mock.Mock(return_value=None) - self.network.find_qos_policy = mock.Mock( - return_value=self._qos_policy) + super().setUp() + self.network_client.update_qos_policy.return_value = None + self.network_client.find_qos_policy.return_value = self._qos_policy # Get the command object to test - self.cmd = network_qos_policy.SetNetworkQosPolicy(self.app, - self.namespace) + self.cmd = network_qos_policy.SetNetworkQosPolicy(self.app, None) def test_set_nothing(self): - arglist = [self._qos_policy.name, ] + arglist = [ + self._qos_policy.name, + ] verifylist = [ ('policy', self._qos_policy.name), ] @@ -357,15 +360,18 @@ def test_set_nothing(self): result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_qos_policy.assert_called_with( - self._qos_policy, **attrs) + self.network_client.update_qos_policy.assert_called_with( + self._qos_policy, **attrs + ) self.assertIsNone(result) def test_set_name_share_description_default(self): arglist = [ - '--name', 'new_qos_policy', + '--name', + 'new_qos_policy', '--share', - '--description', 'QoS policy description', + '--description', + 'QoS policy description', '--default', self._qos_policy.name, ] @@ -385,8 +391,9 @@ def test_set_name_share_description_default(self): 'shared': True, 'is_default': True, } - self.network.update_qos_policy.assert_called_with( - self._qos_policy, **attrs) + self.network_client.update_qos_policy.assert_called_with( + self._qos_policy, **attrs + ) self.assertIsNone(result) def test_set_no_share_no_default(self): @@ -403,20 +410,16 @@ def test_set_no_share_no_default(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - attrs = { - 'shared': False, - 'is_default': False - } - self.network.update_qos_policy.assert_called_with( - self._qos_policy, **attrs) + attrs = {'shared': False, 'is_default': False} + self.network_client.update_qos_policy.assert_called_with( + self._qos_policy, **attrs + ) self.assertIsNone(result) class TestShowNetworkQosPolicy(TestQosPolicy): - # The QoS policy to show. - _qos_policy = ( - network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()) + _qos_policy = network_fakes.create_one_qos_policy() columns = ( 'description', 'id', @@ -425,6 +428,7 @@ class TestShowNetworkQosPolicy(TestQosPolicy): 'project_id', 'rules', 'shared', + 'tags', ) data = ( _qos_policy.description, @@ -432,25 +436,30 @@ class TestShowNetworkQosPolicy(TestQosPolicy): _qos_policy.is_default, _qos_policy.name, _qos_policy.project_id, - _qos_policy.rules, - _qos_policy.shared, + network_qos_policy.RulesColumn(_qos_policy.rules), + _qos_policy.is_shared, + _qos_policy.tags, ) def setUp(self): - super(TestShowNetworkQosPolicy, self).setUp() - self.network.find_qos_policy = mock.Mock(return_value=self._qos_policy) + super().setUp() + self.network_client.find_qos_policy.return_value = self._qos_policy # Get the command object to test - self.cmd = network_qos_policy.ShowNetworkQosPolicy(self.app, - self.namespace) + self.cmd = network_qos_policy.ShowNetworkQosPolicy(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -463,7 +472,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self._qos_policy.name, ignore_missing=False) + self.network_client.find_qos_policy.assert_called_once_with( + self._qos_policy.name, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_rule.py b/openstackclient/tests/unit/network/v2/test_network_qos_rule.py index c7de8160c5..4300304022 100644 --- a/openstackclient/tests/unit/network/v2/test_network_qos_rule.py +++ b/openstackclient/tests/unit/network/v2/test_network_qos_rule.py @@ -14,6 +14,7 @@ # under the License. from unittest import mock +import uuid from osc_lib import exceptions @@ -26,39 +27,55 @@ RULE_TYPE_DSCP_MARKING = 'dscp-marking' RULE_TYPE_MINIMUM_BANDWIDTH = 'minimum-bandwidth' RULE_TYPE_MINIMUM_PACKET_RATE = 'minimum-packet-rate' -DSCP_VALID_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, - 34, 36, 38, 40, 46, 48, 56] +DSCP_VALID_MARKS = [ + 0, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 46, + 48, + 56, +] class TestNetworkQosRule(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkQosRule, self).setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network - self.qos_policy = (network_fakes.FakeNetworkQosPolicy. - create_one_qos_policy()) - self.network.find_qos_policy = mock.Mock(return_value=self.qos_policy) + super().setUp() + self.qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = self.qos_policy class TestCreateNetworkQosRuleMinimumBandwidth(TestNetworkQosRule): - def test_check_type_parameters(self): pass def setUp(self): - super(TestCreateNetworkQosRuleMinimumBandwidth, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_BANDWIDTH} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_BANDWIDTH, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.columns = ( 'direction', 'id', 'min_kbps', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( @@ -66,28 +83,34 @@ def setUp(self): self.new_rule.id, self.new_rule.min_kbps, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.create_qos_minimum_bandwidth_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.create_qos_minimum_bandwidth_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--type', RULE_TYPE_MINIMUM_BANDWIDTH, - '--min-kbps', str(self.new_rule.min_kbps), + '--type', + RULE_TYPE_MINIMUM_BANDWIDTH, + '--min-kbps', + str(self.new_rule.min_kbps), '--egress', self.new_rule.qos_policy_id, ] @@ -100,20 +123,24 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_minimum_bandwidth_rule.assert_called_once_with( + self.network_client.create_qos_minimum_bandwidth_rule.assert_called_once_with( self.qos_policy.id, - **{'min_kbps': self.new_rule.min_kbps, - 'direction': self.new_rule.direction} + **{ + 'min_kbps': self.new_rule.min_kbps, + 'direction': self.new_rule.direction, + }, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_wrong_options(self): arglist = [ - '--type', RULE_TYPE_MINIMUM_BANDWIDTH, - '--max-kbps', '10000', + '--type', + RULE_TYPE_MINIMUM_BANDWIDTH, + '--max-kbps', + '10000', self.new_rule.qos_policy_id, ] @@ -127,30 +154,31 @@ def test_create_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to create Network QoS rule: "Create" rule command ' - 'for type "minimum-bandwidth" requires arguments: ' - 'direction, min_kbps') + msg = ( + 'Failed to create Network QoS rule: "Create" rule command ' + 'for type "minimum-bandwidth" requires arguments: ' + 'direction, min_kbps' + ) self.assertEqual(msg, str(e)) class TestCreateNetworkQosRuleMinimumPacketRate(TestNetworkQosRule): - def test_check_type_parameters(self): pass def setUp(self): - super(TestCreateNetworkQosRuleMinimumPacketRate, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_PACKET_RATE} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_PACKET_RATE, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.columns = ( 'direction', 'id', 'min_kpps', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( @@ -158,28 +186,32 @@ def setUp(self): self.new_rule.id, self.new_rule.min_kpps, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.create_qos_minimum_packet_rate_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.create_qos_minimum_packet_rate_rule.return_value = self.new_rule # Get the command object to test - self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--type', RULE_TYPE_MINIMUM_PACKET_RATE, - '--min-kpps', str(self.new_rule.min_kpps), + '--type', + RULE_TYPE_MINIMUM_PACKET_RATE, + '--min-kpps', + str(self.new_rule.min_kpps), '--egress', self.new_rule.qos_policy_id, ] @@ -192,20 +224,24 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_minimum_packet_rate_rule.\ - assert_called_once_with( - self.qos_policy.id, - **{'min_kpps': self.new_rule.min_kpps, - 'direction': self.new_rule.direction}) + self.network_client.create_qos_minimum_packet_rate_rule.assert_called_once_with( # noqa: E501 + self.qos_policy.id, + **{ + 'min_kpps': self.new_rule.min_kpps, + 'direction': self.new_rule.direction, + }, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_wrong_options(self): arglist = [ - '--type', RULE_TYPE_MINIMUM_PACKET_RATE, - '--min-kbps', '10000', + '--type', + RULE_TYPE_MINIMUM_PACKET_RATE, + '--min-kbps', + '10000', self.new_rule.qos_policy_id, ] @@ -219,57 +255,64 @@ def test_create_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to create Network QoS rule: "Create" rule command ' - 'for type "minimum-packet-rate" requires arguments: ' - 'direction, min_kpps') + msg = ( + 'Failed to create Network QoS rule: "Create" rule command ' + 'for type "minimum-packet-rate" requires arguments: ' + 'direction, min_kpps' + ) self.assertEqual(msg, str(e)) class TestCreateNetworkQosRuleDSCPMarking(TestNetworkQosRule): - def test_check_type_parameters(self): pass def setUp(self): - super(TestCreateNetworkQosRuleDSCPMarking, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_DSCP_MARKING} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_DSCP_MARKING, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.columns = ( 'dscp_mark', 'id', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( self.new_rule.dscp_mark, self.new_rule.id, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.create_qos_dscp_marking_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.create_qos_dscp_marking_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--type', RULE_TYPE_DSCP_MARKING, - '--dscp-mark', str(self.new_rule.dscp_mark), + '--type', + RULE_TYPE_DSCP_MARKING, + '--dscp-mark', + str(self.new_rule.dscp_mark), self.new_rule.qos_policy_id, ] @@ -282,17 +325,18 @@ def test_create_default_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_dscp_marking_rule.assert_called_once_with( - self.qos_policy.id, - **{'dscp_mark': self.new_rule.dscp_mark} + self.network_client.create_qos_dscp_marking_rule.assert_called_once_with( + self.qos_policy.id, **{'dscp_mark': self.new_rule.dscp_mark} ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_wrong_options(self): arglist = [ - '--type', RULE_TYPE_DSCP_MARKING, - '--max-kbps', '10000', + '--type', + RULE_TYPE_DSCP_MARKING, + '--max-kbps', + '10000', self.new_rule.qos_policy_id, ] @@ -306,60 +350,67 @@ def test_create_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to create Network QoS rule: "Create" rule command ' - 'for type "dscp-marking" requires arguments: dscp_mark') + msg = ( + 'Failed to create Network QoS rule: "Create" rule command ' + 'for type "dscp-marking" requires arguments: dscp_mark' + ) self.assertEqual(msg, str(e)) class TestCreateNetworkQosRuleBandwidtLimit(TestNetworkQosRule): - def test_check_type_parameters(self): pass def setUp(self): - super(TestCreateNetworkQosRuleBandwidtLimit, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_BANDWIDTH_LIMIT} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_BANDWIDTH_LIMIT, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.columns = ( 'direction', 'id', - 'max_burst_kbits', + 'max_burst_kbps', 'max_kbps', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( self.new_rule.direction, self.new_rule.id, - self.new_rule.max_burst_kbits, + self.new_rule.max_burst_kbps, self.new_rule.max_kbps, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.create_qos_bandwidth_limit_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.create_qos_bandwidth_limit_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.CreateNetworkQosRule(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--type', RULE_TYPE_BANDWIDTH_LIMIT, - '--max-kbps', str(self.new_rule.max_kbps), + '--type', + RULE_TYPE_BANDWIDTH_LIMIT, + '--max-kbps', + str(self.new_rule.max_kbps), '--egress', self.new_rule.qos_policy_id, ] @@ -371,39 +422,48 @@ def test_create_default_options(self): ('qos_policy', self.new_rule.qos_policy_id), ] - rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_BANDWIDTH_LIMIT}) - rule.max_burst_kbits = 0 + rule = network_fakes.create_one_qos_rule( + { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_BANDWIDTH_LIMIT, + } + ) + rule.max_burst_kbps = 0 expected_data = ( rule.direction, rule.id, - rule.max_burst_kbits, + rule.max_burst_kbps, rule.max_kbps, rule.project_id, - rule.qos_policy_id, rule.type, ) with mock.patch.object( - self.network, "create_qos_bandwidth_limit_rule", - return_value=rule) as create_qos_bandwidth_limit_rule: + self.network_client, + "create_qos_bandwidth_limit_rule", + return_value=rule, + ) as create_qos_bandwidth_limit_rule: parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) create_qos_bandwidth_limit_rule.assert_called_once_with( self.qos_policy.id, - **{'max_kbps': self.new_rule.max_kbps, - 'direction': self.new_rule.direction} + **{ + 'max_kbps': self.new_rule.max_kbps, + 'direction': self.new_rule.direction, + }, ) self.assertEqual(self.columns, columns) self.assertEqual(expected_data, data) def test_create_all_options(self): arglist = [ - '--type', RULE_TYPE_BANDWIDTH_LIMIT, - '--max-kbps', str(self.new_rule.max_kbps), - '--max-burst-kbits', str(self.new_rule.max_burst_kbits), + '--type', + RULE_TYPE_BANDWIDTH_LIMIT, + '--max-kbps', + str(self.new_rule.max_kbps), + '--max-burst-kbits', + str(self.new_rule.max_burst_kbps), '--egress', self.new_rule.qos_policy_id, ] @@ -411,27 +471,31 @@ def test_create_all_options(self): verifylist = [ ('type', RULE_TYPE_BANDWIDTH_LIMIT), ('max_kbps', self.new_rule.max_kbps), - ('max_burst_kbits', self.new_rule.max_burst_kbits), + ('max_burst_kbits', self.new_rule.max_burst_kbps), ('egress', True), ('qos_policy', self.new_rule.qos_policy_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_qos_bandwidth_limit_rule.assert_called_once_with( + self.network_client.create_qos_bandwidth_limit_rule.assert_called_once_with( self.qos_policy.id, - **{'max_kbps': self.new_rule.max_kbps, - 'max_burst_kbps': self.new_rule.max_burst_kbits, - 'direction': self.new_rule.direction} + **{ + 'max_kbps': self.new_rule.max_kbps, + 'max_burst_kbps': self.new_rule.max_burst_kbps, + 'direction': self.new_rule.direction, + }, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_wrong_options(self): arglist = [ - '--type', RULE_TYPE_BANDWIDTH_LIMIT, - '--min-kbps', '10000', + '--type', + RULE_TYPE_BANDWIDTH_LIMIT, + '--min-kbps', + '10000', self.new_rule.qos_policy_id, ] @@ -445,30 +509,32 @@ def test_create_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to create Network QoS rule: "Create" rule command ' - 'for type "bandwidth-limit" requires arguments: max_kbps') + msg = ( + 'Failed to create Network QoS rule: "Create" rule command ' + 'for type "bandwidth-limit" requires arguments: max_kbps' + ) self.assertEqual(msg, str(e)) class TestDeleteNetworkQosRuleMinimumBandwidth(TestNetworkQosRule): - def setUp(self): - super(TestDeleteNetworkQosRuleMinimumBandwidth, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_BANDWIDTH} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_BANDWIDTH, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.delete_qos_minimum_bandwidth_rule = mock.Mock( - return_value=None) - self.network.find_qos_minimum_bandwidth_rule = ( - network_fakes.FakeNetworkQosRule.get_qos_rules( - qos_rules=self.new_rule) + self.network_client.delete_qos_minimum_bandwidth_rule.return_value = ( + None + ) + + self.network_client.find_qos_minimum_bandwidth_rule = ( + network_fakes.get_qos_rules(qos_rules=self.new_rule) ) # Get the command object to test - self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, None) def test_qos_policy_delete(self): arglist = [ @@ -482,10 +548,12 @@ def test_qos_policy_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self.qos_policy.id, ignore_missing=False) - self.network.delete_qos_minimum_bandwidth_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.find_qos_policy.assert_called_once_with( + self.qos_policy.id, ignore_missing=False + ) + self.network_client.delete_qos_minimum_bandwidth_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertIsNone(result) def test_qos_policy_delete_error(self): @@ -498,36 +566,37 @@ def test_qos_policy_delete_error(self): ('id', self.new_rule.id), ] - self.network.delete_qos_minimum_bandwidth_rule.side_effect = \ + self.network_client.delete_qos_minimum_bandwidth_rule.side_effect = ( Exception('Error message') + ) try: parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to delete Network QoS rule ID "%(rule)s": %(e)s' % - {'rule': self.new_rule.id, 'e': 'Error message'}) + msg = 'Failed to delete Network QoS rule ID "{rule}": {e}'.format( + rule=self.new_rule.id, + e='Error message', + ) self.assertEqual(msg, str(e)) class TestDeleteNetworkQosRuleMinimumPacketRate(TestNetworkQosRule): - def setUp(self): - super(TestDeleteNetworkQosRuleMinimumPacketRate, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_PACKET_RATE} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_PACKET_RATE, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.delete_qos_minimum_packet_rate_rule = mock.Mock( - return_value=None) - self.network.find_qos_minimum_packet_rate_rule = ( - network_fakes.FakeNetworkQosRule.get_qos_rules( - qos_rules=self.new_rule) + self.network_client.delete_qos_minimum_packet_rate_rule.return_value = None + + self.network_client.find_qos_minimum_packet_rate_rule = ( + network_fakes.get_qos_rules(qos_rules=self.new_rule) ) # Get the command object to test - self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, None) def test_qos_policy_delete(self): arglist = [ @@ -541,10 +610,12 @@ def test_qos_policy_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self.qos_policy.id, ignore_missing=False) - self.network.delete_qos_minimum_packet_rate_rule.\ - assert_called_once_with(self.new_rule.id, self.qos_policy.id) + self.network_client.find_qos_policy.assert_called_once_with( + self.qos_policy.id, ignore_missing=False + ) + self.network_client.delete_qos_minimum_packet_rate_rule.assert_called_once_with( # noqa: E501 + self.new_rule.id, self.qos_policy.id + ) self.assertIsNone(result) def test_qos_policy_delete_error(self): @@ -557,36 +628,37 @@ def test_qos_policy_delete_error(self): ('id', self.new_rule.id), ] - self.network.delete_qos_minimum_packet_rate_rule.side_effect = \ + self.network_client.delete_qos_minimum_packet_rate_rule.side_effect = ( Exception('Error message') + ) try: parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to delete Network QoS rule ID "%(rule)s": %(e)s' % - {'rule': self.new_rule.id, 'e': 'Error message'}) + msg = 'Failed to delete Network QoS rule ID "{rule}": {e}'.format( + rule=self.new_rule.id, + e='Error message', + ) self.assertEqual(msg, str(e)) class TestDeleteNetworkQosRuleDSCPMarking(TestNetworkQosRule): - def setUp(self): - super(TestDeleteNetworkQosRuleDSCPMarking, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_DSCP_MARKING} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_DSCP_MARKING, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.delete_qos_dscp_marking_rule = mock.Mock( - return_value=None) - self.network.find_qos_dscp_marking_rule = ( - network_fakes.FakeNetworkQosRule.get_qos_rules( - qos_rules=self.new_rule) + self.network_client.delete_qos_dscp_marking_rule.return_value = None + + self.network_client.find_qos_dscp_marking_rule = ( + network_fakes.get_qos_rules(qos_rules=self.new_rule) ) # Get the command object to test - self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, None) def test_qos_policy_delete(self): arglist = [ @@ -600,10 +672,12 @@ def test_qos_policy_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self.qos_policy.id, ignore_missing=False) - self.network.delete_qos_dscp_marking_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.find_qos_policy.assert_called_once_with( + self.qos_policy.id, ignore_missing=False + ) + self.network_client.delete_qos_dscp_marking_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertIsNone(result) def test_qos_policy_delete_error(self): @@ -616,36 +690,37 @@ def test_qos_policy_delete_error(self): ('id', self.new_rule.id), ] - self.network.delete_qos_dscp_marking_rule.side_effect = \ + self.network_client.delete_qos_dscp_marking_rule.side_effect = ( Exception('Error message') + ) try: parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to delete Network QoS rule ID "%(rule)s": %(e)s' % - {'rule': self.new_rule.id, 'e': 'Error message'}) + msg = 'Failed to delete Network QoS rule ID "{rule}": {e}'.format( + rule=self.new_rule.id, + e='Error message', + ) self.assertEqual(msg, str(e)) class TestDeleteNetworkQosRuleBandwidthLimit(TestNetworkQosRule): - def setUp(self): - super(TestDeleteNetworkQosRuleBandwidthLimit, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_BANDWIDTH_LIMIT} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_BANDWIDTH_LIMIT, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.delete_qos_bandwidth_limit_rule = mock.Mock( - return_value=None) - self.network.find_qos_bandwidth_limit_rule = ( - network_fakes.FakeNetworkQosRule.get_qos_rules( - qos_rules=self.new_rule) + self.network_client.delete_qos_bandwidth_limit_rule.return_value = None + + self.network_client.find_qos_bandwidth_limit_rule = ( + network_fakes.get_qos_rules(qos_rules=self.new_rule) ) # Get the command object to test - self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app, None) def test_qos_policy_delete(self): arglist = [ @@ -659,10 +734,12 @@ def test_qos_policy_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self.qos_policy.id, ignore_missing=False) - self.network.delete_qos_bandwidth_limit_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.find_qos_policy.assert_called_once_with( + self.qos_policy.id, ignore_missing=False + ) + self.network_client.delete_qos_bandwidth_limit_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertIsNone(result) def test_qos_policy_delete_error(self): @@ -675,36 +752,41 @@ def test_qos_policy_delete_error(self): ('id', self.new_rule.id), ] - self.network.delete_qos_bandwidth_limit_rule.side_effect = \ + self.network_client.delete_qos_bandwidth_limit_rule.side_effect = ( Exception('Error message') + ) try: parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to delete Network QoS rule ID "%(rule)s": %(e)s' % - {'rule': self.new_rule.id, 'e': 'Error message'}) + msg = 'Failed to delete Network QoS rule ID "{rule}": {e}'.format( + rule=self.new_rule.id, + e='Error message', + ) self.assertEqual(msg, str(e)) class TestSetNetworkQosRuleMinimumBandwidth(TestNetworkQosRule): - def setUp(self): - super(TestSetNetworkQosRuleMinimumBandwidth, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_BANDWIDTH} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs=attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_BANDWIDTH, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.update_qos_minimum_bandwidth_rule = mock.Mock( - return_value=None) - self.network.find_qos_minimum_bandwidth_rule = mock.Mock( - return_value=self.new_rule) - self.network.find_qos_policy = mock.Mock( - return_value=self.qos_policy) + self.network_client.update_qos_minimum_bandwidth_rule.return_value = ( + None + ) + + self.network_client.find_qos_minimum_bandwidth_rule.return_value = ( + self.new_rule + ) + + self.network_client.find_qos_policy.return_value = self.qos_policy # Get the command object to test - self.cmd = (network_qos_rule.SetNetworkQosRule(self.app, - self.namespace)) + self.cmd = network_qos_rule.SetNetworkQosRule(self.app, None) def test_set_nothing(self): arglist = [ @@ -719,8 +801,9 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_qos_minimum_bandwidth_rule.assert_called_with( - self.new_rule, self.qos_policy.id) + self.network_client.update_qos_minimum_bandwidth_rule.assert_called_with( + self.new_rule, self.qos_policy.id + ) self.assertIsNone(result) def test_set_min_kbps(self): @@ -735,7 +818,8 @@ def _set_min_kbps(self, min_kbps=None): self.new_rule.min_kbps = min_kbps arglist = [ - '--min-kbps', str(self.new_rule.min_kbps), + '--min-kbps', + str(self.new_rule.min_kbps), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -751,8 +835,9 @@ def _set_min_kbps(self, min_kbps=None): attrs = { 'min_kbps': self.new_rule.min_kbps, } - self.network.update_qos_minimum_bandwidth_rule.assert_called_with( - self.new_rule, self.qos_policy.id, **attrs) + self.network_client.update_qos_minimum_bandwidth_rule.assert_called_with( + self.new_rule, self.qos_policy.id, **attrs + ) self.assertIsNone(result) if min_kbps: @@ -760,7 +845,8 @@ def _set_min_kbps(self, min_kbps=None): def test_set_wrong_options(self): arglist = [ - '--max-kbps', str(10000), + '--max-kbps', + str(10000), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -774,31 +860,33 @@ def test_set_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to set Network QoS rule ID "%(rule)s": Rule type ' - '"minimum-bandwidth" only requires arguments: direction, ' - 'min_kbps' % {'rule': self.new_rule.id}) + msg = ( + f'Failed to set Network QoS rule ID "{self.new_rule.id}": Rule type ' + '"minimum-bandwidth" only requires arguments: direction, ' + 'min_kbps' + ) self.assertEqual(msg, str(e)) class TestSetNetworkQosRuleMinimumPacketRate(TestNetworkQosRule): - def setUp(self): - super(TestSetNetworkQosRuleMinimumPacketRate, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_PACKET_RATE} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs=attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_PACKET_RATE, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.update_qos_minimum_packet_rate_rule = mock.Mock( - return_value=None) - self.network.find_qos_minimum_packet_rate_rule = mock.Mock( - return_value=self.new_rule) - self.network.find_qos_policy = mock.Mock( - return_value=self.qos_policy) + self.network_client.update_qos_minimum_packet_rate_rule.return_value = None + + self.network_client.find_qos_minimum_packet_rate_rule.return_value = ( + self.new_rule + ) + + self.network_client.find_qos_policy.return_value = self.qos_policy # Get the command object to test - self.cmd = (network_qos_rule.SetNetworkQosRule(self.app, - self.namespace)) + self.cmd = network_qos_rule.SetNetworkQosRule(self.app, None) def test_set_nothing(self): arglist = [ @@ -813,8 +901,9 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_qos_minimum_packet_rate_rule.assert_called_with( - self.new_rule, self.qos_policy.id) + self.network_client.update_qos_minimum_packet_rate_rule.assert_called_with( + self.new_rule, self.qos_policy.id + ) self.assertIsNone(result) def test_set_min_kpps(self): @@ -829,7 +918,8 @@ def _set_min_kpps(self, min_kpps=None): self.new_rule.min_kpps = min_kpps arglist = [ - '--min-kpps', str(self.new_rule.min_kpps), + '--min-kpps', + str(self.new_rule.min_kpps), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -845,8 +935,9 @@ def _set_min_kpps(self, min_kpps=None): attrs = { 'min_kpps': self.new_rule.min_kpps, } - self.network.update_qos_minimum_packet_rate_rule.assert_called_with( - self.new_rule, self.qos_policy.id, **attrs) + self.network_client.update_qos_minimum_packet_rate_rule.assert_called_with( + self.new_rule, self.qos_policy.id, **attrs + ) self.assertIsNone(result) if min_kpps: @@ -854,7 +945,8 @@ def _set_min_kpps(self, min_kpps=None): def test_set_wrong_options(self): arglist = [ - '--min-kbps', str(10000), + '--min-kbps', + str(10000), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -868,31 +960,33 @@ def test_set_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to set Network QoS rule ID "%(rule)s": Rule type ' - '"minimum-packet-rate" only requires arguments: direction, ' - 'min_kpps' % {'rule': self.new_rule.id}) + msg = ( + f'Failed to set Network QoS rule ID "{self.new_rule.id}": Rule type ' + '"minimum-packet-rate" only requires arguments: direction, ' + 'min_kpps' + ) self.assertEqual(msg, str(e)) class TestSetNetworkQosRuleDSCPMarking(TestNetworkQosRule): - def setUp(self): - super(TestSetNetworkQosRuleDSCPMarking, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_DSCP_MARKING} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs=attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_DSCP_MARKING, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.update_qos_dscp_marking_rule = mock.Mock( - return_value=None) - self.network.find_qos_dscp_marking_rule = mock.Mock( - return_value=self.new_rule) - self.network.find_qos_policy = mock.Mock( - return_value=self.qos_policy) + self.network_client.update_qos_dscp_marking_rule.return_value = None + + self.network_client.find_qos_dscp_marking_rule.return_value = ( + self.new_rule + ) + + self.network_client.find_qos_policy.return_value = self.qos_policy # Get the command object to test - self.cmd = (network_qos_rule.SetNetworkQosRule(self.app, - self.namespace)) + self.cmd = network_qos_rule.SetNetworkQosRule(self.app, None) def test_set_nothing(self): arglist = [ @@ -907,8 +1001,9 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_qos_dscp_marking_rule.assert_called_with( - self.new_rule, self.qos_policy.id) + self.network_client.update_qos_dscp_marking_rule.assert_called_with( + self.new_rule, self.qos_policy.id + ) self.assertIsNone(result) def test_set_dscp_mark(self): @@ -923,7 +1018,8 @@ def _set_dscp_mark(self, dscp_mark=None): self.new_rule.dscp_mark = dscp_mark arglist = [ - '--dscp-mark', str(self.new_rule.dscp_mark), + '--dscp-mark', + str(self.new_rule.dscp_mark), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -939,8 +1035,9 @@ def _set_dscp_mark(self, dscp_mark=None): attrs = { 'dscp_mark': self.new_rule.dscp_mark, } - self.network.update_qos_dscp_marking_rule.assert_called_with( - self.new_rule, self.qos_policy.id, **attrs) + self.network_client.update_qos_dscp_marking_rule.assert_called_with( + self.new_rule, self.qos_policy.id, **attrs + ) self.assertIsNone(result) if dscp_mark: @@ -948,7 +1045,8 @@ def _set_dscp_mark(self, dscp_mark=None): def test_set_wrong_options(self): arglist = [ - '--max-kbps', str(10000), + '--max-kbps', + str(10000), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -962,31 +1060,29 @@ def test_set_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to set Network QoS rule ID "%(rule)s": Rule type ' - '"dscp-marking" only requires arguments: dscp_mark' % - {'rule': self.new_rule.id}) + msg = ( + f'Failed to set Network QoS rule ID "{self.new_rule.id}": Rule type ' + '"dscp-marking" only requires arguments: dscp_mark' + ) self.assertEqual(msg, str(e)) class TestSetNetworkQosRuleBandwidthLimit(TestNetworkQosRule): - def setUp(self): - super(TestSetNetworkQosRuleBandwidthLimit, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_BANDWIDTH_LIMIT} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs=attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_BANDWIDTH_LIMIT, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] - self.network.update_qos_bandwidth_limit_rule = mock.Mock( - return_value=None) - self.network.find_qos_bandwidth_limit_rule = mock.Mock( - return_value=self.new_rule) - self.network.find_qos_policy = mock.Mock( - return_value=self.qos_policy) + self.network_client.update_qos_bandwidth_limit_rule.return_value = None + self.network_client.find_qos_bandwidth_limit_rule.return_value = ( + self.new_rule + ) + self.network_client.find_qos_policy.return_value = self.qos_policy - # Get the command object to test - self.cmd = (network_qos_rule.SetNetworkQosRule(self.app, - self.namespace)) + self.cmd = network_qos_rule.SetNetworkQosRule(self.app, None) def test_set_nothing(self): arglist = [ @@ -1001,8 +1097,9 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_qos_bandwidth_limit_rule.assert_called_with( - self.new_rule, self.qos_policy.id) + self.network_client.update_qos_bandwidth_limit_rule.assert_called_with( + self.new_rule, self.qos_policy.id + ) self.assertIsNone(result) def test_set_max_kbps(self): @@ -1016,12 +1113,12 @@ def _reset_max_kbps(self, max_kbps): def _set_max_kbps(self, max_kbps=None): if max_kbps: - self.addCleanup(self._reset_max_kbps, - self.new_rule.max_kbps) + self.addCleanup(self._reset_max_kbps, self.new_rule.max_kbps) self.new_rule.max_kbps = max_kbps arglist = [ - '--max-kbps', str(self.new_rule.max_kbps), + '--max-kbps', + str(self.new_rule.max_kbps), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -1037,8 +1134,9 @@ def _set_max_kbps(self, max_kbps=None): attrs = { 'max_kbps': self.new_rule.max_kbps, } - self.network.update_qos_bandwidth_limit_rule.assert_called_with( - self.new_rule, self.qos_policy.id, **attrs) + self.network_client.update_qos_bandwidth_limit_rule.assert_called_with( + self.new_rule, self.qos_policy.id, **attrs + ) self.assertIsNone(result) def test_set_max_burst_kbits(self): @@ -1048,21 +1146,23 @@ def test_set_max_burst_kbits_to_zero(self): self._set_max_burst_kbits(max_burst_kbits=0) def _reset_max_burst_kbits(self, max_burst_kbits): - self.new_rule.max_burst_kbits = max_burst_kbits + self.new_rule.max_burst_kbps = max_burst_kbits def _set_max_burst_kbits(self, max_burst_kbits=None): if max_burst_kbits: - self.addCleanup(self._reset_max_burst_kbits, - self.new_rule.max_burst_kbits) - self.new_rule.max_burst_kbits = max_burst_kbits + self.addCleanup( + self._reset_max_burst_kbits, self.new_rule.max_burst_kbps + ) + self.new_rule.max_burst_kbps = max_burst_kbits arglist = [ - '--max-burst-kbits', str(self.new_rule.max_burst_kbits), + '--max-burst-kbits', + str(self.new_rule.max_burst_kbps), self.new_rule.qos_policy_id, self.new_rule.id, ] verifylist = [ - ('max_burst_kbits', self.new_rule.max_burst_kbits), + ('max_burst_kbits', self.new_rule.max_burst_kbps), ('qos_policy', self.new_rule.qos_policy_id), ('id', self.new_rule.id), ] @@ -1071,10 +1171,11 @@ def _set_max_burst_kbits(self, max_burst_kbits=None): result = self.cmd.take_action(parsed_args) attrs = { - 'max_burst_kbps': self.new_rule.max_burst_kbits, + 'max_burst_kbps': self.new_rule.max_burst_kbps, } - self.network.update_qos_bandwidth_limit_rule.assert_called_with( - self.new_rule, self.qos_policy.id, **attrs) + self.network_client.update_qos_bandwidth_limit_rule.assert_called_with( + self.new_rule, self.qos_policy.id, **attrs + ) self.assertIsNone(result) def test_set_direction_egress(self): @@ -1090,7 +1191,7 @@ def _set_direction(self, direction): self.addCleanup(self._reset_direction, self.new_rule.direction) arglist = [ - '--%s' % direction, + f'--{direction}', self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -1106,13 +1207,15 @@ def _set_direction(self, direction): attrs = { 'direction': direction, } - self.network.update_qos_bandwidth_limit_rule.assert_called_with( - self.new_rule, self.qos_policy.id, **attrs) + self.network_client.update_qos_bandwidth_limit_rule.assert_called_with( + self.new_rule, self.qos_policy.id, **attrs + ) self.assertIsNone(result) def test_set_wrong_options(self): arglist = [ - '--min-kbps', str(10000), + '--min-kbps', + str(10000), self.new_rule.qos_policy_id, self.new_rule.id, ] @@ -1126,41 +1229,47 @@ def test_set_wrong_options(self): try: self.cmd.take_action(parsed_args) except exceptions.CommandError as e: - msg = ('Failed to set Network QoS rule ID "%(rule)s": Rule type ' - '"bandwidth-limit" only requires arguments: direction, ' - 'max_burst_kbps, max_kbps' % {'rule': self.new_rule.id}) + msg = ( + f'Failed to set Network QoS rule ID "{self.new_rule.id}": Rule type ' + '"bandwidth-limit" only requires arguments: direction, ' + 'max_burst_kbps, max_kbps' + ) self.assertEqual(msg, str(e)) class TestListNetworkQosRule(TestNetworkQosRule): - def setUp(self): - super(TestListNetworkQosRule, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_BANDWIDTH} - self.new_rule_min_bw = (network_fakes.FakeNetworkQosRule. - create_one_qos_rule(attrs=attrs)) - attrs['type'] = RULE_TYPE_MINIMUM_PACKET_RATE - self.new_rule_min_pps = (network_fakes.FakeNetworkQosRule. - create_one_qos_rule(attrs=attrs)) - attrs['type'] = RULE_TYPE_DSCP_MARKING - self.new_rule_dscp_mark = (network_fakes.FakeNetworkQosRule. - create_one_qos_rule(attrs=attrs)) - attrs['type'] = RULE_TYPE_BANDWIDTH_LIMIT - self.new_rule_max_bw = (network_fakes.FakeNetworkQosRule. - create_one_qos_rule(attrs=attrs)) - self.qos_policy.rules = [self.new_rule_min_bw, - self.new_rule_min_pps, - self.new_rule_dscp_mark, - self.new_rule_max_bw] - self.network.find_qos_minimum_bandwidth_rule = mock.Mock( - return_value=self.new_rule_min_bw) - self.network.find_qos_minimum_packet_rate_rule = mock.Mock( - return_value=self.new_rule_min_pps) - self.network.find_qos_dscp_marking_rule = mock.Mock( - return_value=self.new_rule_dscp_mark) - self.network.find_qos_bandwidth_limit_rule = mock.Mock( - return_value=self.new_rule_max_bw) + super().setUp() + self.qos_policy.rules = [ + { + 'max_kbps': 1024, + 'max_burst_kbps': 1024, + 'direction': 'egress', + 'id': 'qos-rule-id-' + uuid.uuid4().hex, + 'qos_policy_id': self.qos_policy.id, + 'type': 'bandwidth_limit', + }, + { + 'dscp_mark': 0, + 'id': 'qos-rule-id-' + uuid.uuid4().hex, + 'qos_policy_id': self.qos_policy.id, + 'type': 'dscp_marking', + }, + { + 'min_kbps': 1024, + 'direction': 'egress', + 'id': 'qos-rule-id-' + uuid.uuid4().hex, + 'qos_policy_id': self.qos_policy.id, + 'type': 'minimum_bandwidth', + }, + { + 'min_kpps': 2800, + 'direction': 'egress', + 'id': 'qos-rule-id-' + uuid.uuid4().hex, + 'qos_policy_id': self.qos_policy.id, + 'type': 'minimum_packet_rate', + }, + ] self.columns = ( 'ID', 'QoS Policy ID', @@ -1174,25 +1283,23 @@ def setUp(self): ) self.data = [] for index in range(len(self.qos_policy.rules)): - self.data.append(( - self.qos_policy.rules[index].id, - self.qos_policy.rules[index].qos_policy_id, - self.qos_policy.rules[index].type, - getattr(self.qos_policy.rules[index], 'max_kbps', ''), - getattr(self.qos_policy.rules[index], 'max_burst_kbps', ''), - getattr(self.qos_policy.rules[index], 'min_kbps', ''), - getattr(self.qos_policy.rules[index], 'min_kpps', ''), - getattr(self.qos_policy.rules[index], 'dscp_mark', ''), - getattr(self.qos_policy.rules[index], 'direction', ''), - )) - # Get the command object to test - self.cmd = network_qos_rule.ListNetworkQosRule(self.app, - self.namespace) + self.data.append( + ( + self.qos_policy.rules[index]['id'], + self.qos_policy.id, + self.qos_policy.rules[index]['type'], + self.qos_policy.rules[index].get('max_kbps', ''), + self.qos_policy.rules[index].get('max_burst_kbps', ''), + self.qos_policy.rules[index].get('min_kbps', ''), + self.qos_policy.rules[index].get('min_kpps', ''), + self.qos_policy.rules[index].get('dscp_mark', ''), + self.qos_policy.rules[index].get('direction', ''), + ) + ) + self.cmd = network_qos_rule.ListNetworkQosRule(self.app, None) def test_qos_rule_list(self): - arglist = [ - self.qos_policy.id - ] + arglist = [self.qos_policy.id] verifylist = [ ('qos_policy', self.qos_policy.id), ] @@ -1200,8 +1307,9 @@ def test_qos_rule_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_qos_policy.assert_called_once_with( - self.qos_policy.id, ignore_missing=False) + self.network_client.find_qos_policy.assert_called_once_with( + self.qos_policy.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) list_data = list(data) self.assertEqual(len(self.data), len(list_data)) @@ -1210,45 +1318,48 @@ def test_qos_rule_list(self): class TestShowNetworkQosRuleMinimumBandwidth(TestNetworkQosRule): - def setUp(self): - super(TestShowNetworkQosRuleMinimumBandwidth, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_BANDWIDTH} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_BANDWIDTH, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] self.columns = ( 'direction', 'id', 'min_kbps', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( self.new_rule.direction, self.new_rule.id, self.new_rule.min_kbps, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.get_qos_minimum_bandwidth_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.get_qos_minimum_bandwidth_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1263,52 +1374,56 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_qos_minimum_bandwidth_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.get_qos_minimum_bandwidth_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) class TestShowNetworkQosRuleMinimumPacketRate(TestNetworkQosRule): - def setUp(self): - super(TestShowNetworkQosRuleMinimumPacketRate, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_MINIMUM_PACKET_RATE} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_MINIMUM_PACKET_RATE, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] self.columns = ( 'direction', 'id', 'min_kpps', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( self.new_rule.direction, self.new_rule.id, self.new_rule.min_kpps, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.get_qos_minimum_packet_rate_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.get_qos_minimum_packet_rate_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1323,50 +1438,54 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_qos_minimum_packet_rate_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.get_qos_minimum_packet_rate_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) class TestShowNetworkQosDSCPMarking(TestNetworkQosRule): - def setUp(self): - super(TestShowNetworkQosDSCPMarking, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_DSCP_MARKING} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_DSCP_MARKING, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] self.columns = ( 'dscp_mark', 'id', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( self.new_rule.dscp_mark, self.new_rule.id, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.get_qos_dscp_marking_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.get_qos_dscp_marking_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1381,54 +1500,58 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_qos_dscp_marking_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.get_qos_dscp_marking_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) class TestShowNetworkQosBandwidthLimit(TestNetworkQosRule): - def setUp(self): - super(TestShowNetworkQosBandwidthLimit, self).setUp() - attrs = {'qos_policy_id': self.qos_policy.id, - 'type': RULE_TYPE_BANDWIDTH_LIMIT} - self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule( - attrs) + super().setUp() + attrs = { + 'qos_policy_id': self.qos_policy.id, + 'type': RULE_TYPE_BANDWIDTH_LIMIT, + } + self.new_rule = network_fakes.create_one_qos_rule(attrs) self.qos_policy.rules = [self.new_rule] self.columns = ( 'direction', 'id', - 'max_burst_kbits', + 'max_burst_kbps', 'max_kbps', 'project_id', - 'qos_policy_id', - 'type' + 'type', ) self.data = ( self.new_rule.direction, self.new_rule.id, - self.new_rule.max_burst_kbits, + self.new_rule.max_burst_kbps, self.new_rule.max_kbps, self.new_rule.project_id, - self.new_rule.qos_policy_id, self.new_rule.type, ) - self.network.get_qos_bandwidth_limit_rule = mock.Mock( - return_value=self.new_rule) + self.network_client.get_qos_bandwidth_limit_rule.return_value = ( + self.new_rule + ) # Get the command object to test - self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, - self.namespace) + self.cmd = network_qos_rule.ShowNetworkQosRule(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1443,7 +1566,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_qos_bandwidth_limit_rule.assert_called_once_with( - self.new_rule.id, self.qos_policy.id) + self.network_client.get_qos_bandwidth_limit_rule.assert_called_once_with( + self.new_rule.id, self.qos_policy.id + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py index 3aae822e61..1c50b9e938 100644 --- a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py +++ b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock from openstackclient.network.v2 import network_qos_rule_type as _qos_rule_type from openstackclient.tests.unit.network.v2 import fakes as network_fakes @@ -21,49 +20,37 @@ class TestNetworkQosRuleType(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkQosRuleType, self).setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() class TestShowNetworkQosRuleType(TestNetworkQosRuleType): - - attrs = { - 'drivers': [{ - 'name': 'driver 1', - 'supported_parameters': [] - }] - } + attrs = {'drivers': [{'name': 'driver 1', 'supported_parameters': []}]} # The QoS policies to show. - qos_rule_type = ( - network_fakes.FakeNetworkQosRuleType.create_one_qos_rule_type(attrs)) - columns = ( - 'drivers', - 'rule_type_name' - ) - data = [ - qos_rule_type.drivers, - qos_rule_type.type - ] + qos_rule_type = network_fakes.create_one_qos_rule_type(attrs) + columns = ('drivers', 'rule_type_name') + columns = ('drivers', 'rule_type_name') + data = [qos_rule_type.drivers, qos_rule_type.type] def setUp(self): - super(TestShowNetworkQosRuleType, self).setUp() - self.network.get_qos_rule_type = mock.Mock( - return_value=self.qos_rule_type) + super().setUp() + self.network_client.get_qos_rule_type.return_value = self.qos_rule_type # Get the command object to test - self.cmd = _qos_rule_type.ShowNetworkQosRuleType(self.app, - self.namespace) + self.cmd = _qos_rule_type.ShowNetworkQosRuleType(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -76,34 +63,28 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_qos_rule_type.assert_called_once_with( - self.qos_rule_type.type) + self.network_client.get_qos_rule_type.assert_called_once_with( + self.qos_rule_type.type + ) self.assertEqual(self.columns, columns) self.assertEqual(list(self.data), list(data)) class TestListNetworkQosRuleType(TestNetworkQosRuleType): - # The QoS policies to list up. - qos_rule_types = ( - network_fakes.FakeNetworkQosRuleType.create_qos_rule_types(count=3)) - columns = ( - 'Type', - ) + qos_rule_types = network_fakes.create_qos_rule_types(count=3) + + columns = ('Type',) data = [] for qos_rule_type in qos_rule_types: - data.append(( - qos_rule_type.type, - )) + data.append((qos_rule_type.type,)) def setUp(self): - super(TestListNetworkQosRuleType, self).setUp() - self.network.qos_rule_types = mock.Mock( - return_value=self.qos_rule_types) + super().setUp() + self.network_client.qos_rule_types.return_value = self.qos_rule_types # Get the command object to test - self.cmd = _qos_rule_type.ListNetworkQosRuleType(self.app, - self.namespace) + self.cmd = _qos_rule_type.ListNetworkQosRuleType(self.app, None) def test_qos_rule_type_list(self): arglist = [] @@ -112,14 +93,12 @@ def test_qos_rule_type_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_rule_types.assert_called_once_with(**{}) + self.network_client.qos_rule_types.assert_called_once_with(**{}) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_qos_rule_type_list_all_supported(self): - arglist = [ - '--all-supported' - ] + arglist = ['--all-supported'] verifylist = [ ('all_supported', True), ] @@ -127,16 +106,14 @@ def test_qos_rule_type_list_all_supported(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_rule_types.assert_called_once_with( + self.network_client.qos_rule_types.assert_called_once_with( **{'all_supported': True} ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_qos_rule_type_list_all_rules(self): - arglist = [ - '--all-rules' - ] + arglist = ['--all-rules'] verifylist = [ ('all_rules', True), ] @@ -144,7 +121,7 @@ def test_qos_rule_type_list_all_rules(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.qos_rule_types.assert_called_once_with( + self.network_client.qos_rule_types.assert_called_once_with( **{'all_rules': True} ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_network_rbac.py b/openstackclient/tests/unit/network/v2/test_network_rbac.py index 7ce252054d..d3a7192453 100644 --- a/openstackclient/tests/unit/network/v2/test_network_rbac.py +++ b/openstackclient/tests/unit/network/v2/test_network_rbac.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call import ddt @@ -24,30 +23,28 @@ class TestNetworkRBAC(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkRBAC, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects @ddt.ddt class TestCreateNetworkRBAC(TestNetworkRBAC): - network_object = network_fakes.create_one_network() - qos_object = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - sg_object = network_fakes.FakeNetworkSecGroup.create_one_security_group() + qos_object = network_fakes.create_one_qos_policy() + sg_object = network_fakes.create_one_security_group() as_object = network_fakes.create_one_address_scope() snp_object = network_fakes.FakeSubnetPool.create_one_subnet_pool() ag_object = network_fakes.create_one_address_group() project = identity_fakes_v3.FakeProject.create_one_project() rbac_policy = network_fakes.create_one_network_rbac( - attrs={'project_id': project.id, - 'target_tenant': project.id, - 'object_id': network_object.id} + attrs={ + 'project_id': project.id, + 'target_tenant': project.id, + 'object_id': network_object.id, + } ) columns = ( @@ -69,30 +66,31 @@ class TestCreateNetworkRBAC(TestNetworkRBAC): ] def setUp(self): - super(TestCreateNetworkRBAC, self).setUp() + super().setUp() # Get the command object to test - self.cmd = network_rbac.CreateNetworkRBAC(self.app, self.namespace) - - self.network.create_rbac_policy = mock.Mock( - return_value=self.rbac_policy) - self.network.find_network = mock.Mock( - return_value=self.network_object) - self.network.find_qos_policy = mock.Mock( - return_value=self.qos_object) - self.network.find_security_group = mock.Mock( - return_value=self.sg_object) - self.network.find_address_scope = mock.Mock( - return_value=self.as_object) - self.network.find_subnet_pool = mock.Mock( - return_value=self.snp_object) - self.network.find_address_group = mock.Mock( - return_value=self.ag_object) + self.cmd = network_rbac.CreateNetworkRBAC(self.app, None) + + self.network_client.create_rbac_policy.return_value = self.rbac_policy + + self.network_client.find_network.return_value = self.network_object + + self.network_client.find_qos_policy.return_value = self.qos_object + + self.network_client.find_security_group.return_value = self.sg_object + + self.network_client.find_address_scope.return_value = self.as_object + + self.network_client.find_subnet_pool.return_value = self.snp_object + + self.network_client.find_address_group.return_value = self.ag_object + self.projects_mock.get.return_value = self.project def test_network_rbac_create_no_type(self): arglist = [ - '--action', self.rbac_policy.action, + '--action', + self.rbac_policy.action, self.rbac_policy.object_id, ] verifylist = [ @@ -100,12 +98,18 @@ def test_network_rbac_create_no_type(self): ('rbac_policy', self.rbac_policy.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_rbac_create_no_action(self): arglist = [ - '--type', self.rbac_policy.object_type, + '--type', + self.rbac_policy.object_type, self.rbac_policy.object_id, ] verifylist = [ @@ -113,14 +117,22 @@ def test_network_rbac_create_no_action(self): ('rbac_policy', self.rbac_policy.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_rbac_create_invalid_type(self): arglist = [ - '--action', self.rbac_policy.action, - '--type', 'invalid_type', - '--target-project', self.rbac_policy.target_project_id, + '--action', + self.rbac_policy.action, + '--type', + 'invalid_type', + '--target-project', + self.rbac_policy.target_project_id, self.rbac_policy.object_id, ] verifylist = [ @@ -130,14 +142,22 @@ def test_network_rbac_create_invalid_type(self): ('rbac_policy', self.rbac_policy.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_rbac_create_invalid_action(self): arglist = [ - '--type', self.rbac_policy.object_type, - '--action', 'invalid_action', - '--target-project', self.rbac_policy.target_project_id, + '--type', + self.rbac_policy.object_type, + '--action', + 'invalid_action', + '--target-project', + self.rbac_policy.target_project_id, self.rbac_policy.object_id, ] verifylist = [ @@ -147,14 +167,22 @@ def test_network_rbac_create_invalid_action(self): ('rbac_policy', self.rbac_policy.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_rbac_create(self): arglist = [ - '--type', self.rbac_policy.object_type, - '--action', self.rbac_policy.action, - '--target-project', self.rbac_policy.target_project_id, + '--type', + self.rbac_policy.object_type, + '--action', + self.rbac_policy.action, + '--target-project', + self.rbac_policy.target_project_id, self.rbac_policy.object_id, ] verifylist = [ @@ -168,19 +196,23 @@ def test_network_rbac_create(self): # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.create_rbac_policy.assert_called_with(**{ - 'object_id': self.rbac_policy.object_id, - 'object_type': self.rbac_policy.object_type, - 'action': self.rbac_policy.action, - 'target_tenant': self.rbac_policy.target_project_id, - }) + self.network_client.create_rbac_policy.assert_called_with( + **{ + 'object_id': self.rbac_policy.object_id, + 'object_type': self.rbac_policy.object_type, + 'action': self.rbac_policy.action, + 'target_tenant': self.rbac_policy.target_project_id, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_network_rbac_create_with_target_all_projects(self): arglist = [ - '--type', self.rbac_policy.object_type, - '--action', self.rbac_policy.action, + '--type', + self.rbac_policy.object_type, + '--action', + self.rbac_policy.action, '--target-all-projects', self.rbac_policy.object_id, ] @@ -194,21 +226,29 @@ def test_network_rbac_create_with_target_all_projects(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_rbac_policy.assert_called_with(**{ - 'object_id': self.rbac_policy.object_id, - 'object_type': self.rbac_policy.object_type, - 'action': self.rbac_policy.action, - 'target_tenant': '*', - }) + self.network_client.create_rbac_policy.assert_called_with( + **{ + 'object_id': self.rbac_policy.object_id, + 'object_type': self.rbac_policy.object_type, + 'action': self.rbac_policy.action, + 'target_tenant': '*', + } + ) def test_network_rbac_create_all_options(self): arglist = [ - '--type', self.rbac_policy.object_type, - '--action', self.rbac_policy.action, - '--target-project', self.rbac_policy.target_project_id, - '--project', self.rbac_policy.project_id, - '--project-domain', self.project.domain_id, - '--target-project-domain', self.project.domain_id, + '--type', + self.rbac_policy.object_type, + '--action', + self.rbac_policy.action, + '--target-project', + self.rbac_policy.target_project_id, + '--project', + self.rbac_policy.project_id, + '--project-domain', + self.project.domain_id, + '--target-project-domain', + self.project.domain_id, self.rbac_policy.object_id, ] verifylist = [ @@ -225,13 +265,15 @@ def test_network_rbac_create_all_options(self): # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.create_rbac_policy.assert_called_with(**{ - 'object_id': self.rbac_policy.object_id, - 'object_type': self.rbac_policy.object_type, - 'action': self.rbac_policy.action, - 'target_tenant': self.rbac_policy.target_project_id, - 'project_id': self.rbac_policy.project_id, - }) + self.network_client.create_rbac_policy.assert_called_with( + **{ + 'object_id': self.rbac_policy.object_id, + 'object_type': self.rbac_policy.object_type, + 'action': self.rbac_policy.action, + 'target_tenant': self.rbac_policy.target_project_id, + 'project_id': self.rbac_policy.project_id, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -240,7 +282,7 @@ def test_network_rbac_create_all_options(self): ('security_group', "sg_object"), ('subnetpool', "snp_object"), ('address_scope', "as_object"), - ('address_group', "ag_object") + ('address_group', "ag_object"), ) @ddt.unpack def test_network_rbac_create_object(self, obj_type, obj_fake_attr): @@ -249,9 +291,12 @@ def test_network_rbac_create_object(self, obj_type, obj_fake_attr): self.rbac_policy.object_type = obj_type self.rbac_policy.object_id = obj_fake.id arglist = [ - '--type', obj_type, - '--action', self.rbac_policy.action, - '--target-project', self.rbac_policy.target_project_id, + '--type', + obj_type, + '--action', + self.rbac_policy.action, + '--target-project', + self.rbac_policy.target_project_id, obj_fake.name, ] verifylist = [ @@ -265,12 +310,14 @@ def test_network_rbac_create_object(self, obj_type, obj_fake_attr): # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.create_rbac_policy.assert_called_with(**{ - 'object_id': obj_fake.id, - 'object_type': obj_type, - 'action': self.rbac_policy.action, - 'target_tenant': self.rbac_policy.target_project_id, - }) + self.network_client.create_rbac_policy.assert_called_with( + **{ + 'object_id': obj_fake.id, + 'object_type': obj_type, + 'action': self.rbac_policy.action, + 'target_tenant': self.rbac_policy.target_project_id, + } + ) self.data = [ self.rbac_policy.action, self.rbac_policy.id, @@ -284,19 +331,17 @@ def test_network_rbac_create_object(self, obj_type, obj_fake_attr): class TestDeleteNetworkRBAC(TestNetworkRBAC): - rbac_policies = network_fakes.create_network_rbacs(count=2) def setUp(self): - super(TestDeleteNetworkRBAC, self).setUp() - self.network.delete_rbac_policy = mock.Mock(return_value=None) - self.network.find_rbac_policy = ( - network_fakes.get_network_rbacs( - rbac_policies=self.rbac_policies) + super().setUp() + self.network_client.delete_rbac_policy.return_value = None + self.network_client.find_rbac_policy = network_fakes.get_network_rbacs( + rbac_policies=self.rbac_policies ) # Get the command object to test - self.cmd = network_rbac.DeleteNetworkRBAC(self.app, self.namespace) + self.cmd = network_rbac.DeleteNetworkRBAC(self.app, None) def test_network_rbac_delete(self): arglist = [ @@ -309,10 +354,12 @@ def test_network_rbac_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_rbac_policy.assert_called_once_with( - self.rbac_policies[0].id, ignore_missing=False) - self.network.delete_rbac_policy.assert_called_once_with( - self.rbac_policies[0]) + self.network_client.find_rbac_policy.assert_called_once_with( + self.rbac_policies[0].id, ignore_missing=False + ) + self.network_client.delete_rbac_policy.assert_called_once_with( + self.rbac_policies[0] + ) self.assertIsNone(result) def test_multi_network_rbacs_delete(self): @@ -331,7 +378,7 @@ def test_multi_network_rbacs_delete(self): calls = [] for r in self.rbac_policies: calls.append(call(r)) - self.network.delete_rbac_policy.assert_has_calls(calls) + self.network_client.delete_rbac_policy.assert_has_calls(calls) self.assertIsNone(result) def test_multi_network_policies_delete_with_exception(self): @@ -340,15 +387,12 @@ def test_multi_network_policies_delete_with_exception(self): 'unexist_rbac_policy', ] verifylist = [ - ('rbac_policy', - [self.rbac_policies[0].id, 'unexist_rbac_policy']), + ('rbac_policy', [self.rbac_policies[0].id, 'unexist_rbac_policy']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.rbac_policies[0], exceptions.CommandError] - self.network.find_rbac_policy = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_rbac_policy.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -356,17 +400,18 @@ def test_multi_network_policies_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 RBAC policies failed to delete.', str(e)) - self.network.find_rbac_policy.assert_any_call( - self.rbac_policies[0].id, ignore_missing=False) - self.network.find_rbac_policy.assert_any_call( - 'unexist_rbac_policy', ignore_missing=False) - self.network.delete_rbac_policy.assert_called_once_with( + self.network_client.find_rbac_policy.assert_any_call( + self.rbac_policies[0].id, ignore_missing=False + ) + self.network_client.find_rbac_policy.assert_any_call( + 'unexist_rbac_policy', ignore_missing=False + ) + self.network_client.delete_rbac_policy.assert_called_once_with( self.rbac_policies[0] ) class TestListNetworkRABC(TestNetworkRBAC): - # The network rbac policies going to be listed up. rbac_policies = network_fakes.create_network_rbacs(count=3) @@ -383,27 +428,31 @@ class TestListNetworkRABC(TestNetworkRBAC): ) data = [] for r in rbac_policies: - data.append(( - r.id, - r.object_type, - r.object_id, - )) + data.append( + ( + r.id, + r.object_type, + r.object_id, + ) + ) data_long = [] for r in rbac_policies: - data_long.append(( - r.id, - r.object_type, - r.object_id, - r.action, - )) + data_long.append( + ( + r.id, + r.object_type, + r.object_id, + r.action, + ) + ) def setUp(self): - super(TestListNetworkRABC, self).setUp() + super().setUp() # Get the command object to test - self.cmd = network_rbac.ListNetworkRBAC(self.app, self.namespace) + self.cmd = network_rbac.ListNetworkRBAC(self.app, None) - self.network.rbac_policies = mock.Mock(return_value=self.rbac_policies) + self.network_client.rbac_policies.return_value = self.rbac_policies self.project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = self.project @@ -416,39 +465,41 @@ def test_network_rbac_list(self): # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.rbac_policies.assert_called_with() + self.network_client.rbac_policies.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_network_rbac_list_type_opt(self): arglist = [ - '--type', self.rbac_policies[0].object_type, ] - verifylist = [ - ('type', self.rbac_policies[0].object_type)] + '--type', + self.rbac_policies[0].object_type, + ] + verifylist = [('type', self.rbac_policies[0].object_type)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.rbac_policies.assert_called_with(**{ - 'object_type': self.rbac_policies[0].object_type - }) + self.network_client.rbac_policies.assert_called_with( + **{'object_type': self.rbac_policies[0].object_type} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_network_rbac_list_action_opt(self): arglist = [ - '--action', self.rbac_policies[0].action, ] - verifylist = [ - ('action', self.rbac_policies[0].action)] + '--action', + self.rbac_policies[0].action, + ] + verifylist = [('action', self.rbac_policies[0].action)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.rbac_policies.assert_called_with(**{ - 'action': self.rbac_policies[0].action - }) + self.network_client.rbac_policies.assert_called_with( + **{'action': self.rbac_policies[0].action} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -465,42 +516,45 @@ def test_network_rbac_list_with_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.rbac_policies.assert_called_with() + self.network_client.rbac_policies.assert_called_with() self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) def test_network_rbac_list_target_project_opt(self): arglist = [ - '--target-project', self.rbac_policies[0].target_project_id, ] + '--target-project', + self.rbac_policies[0].target_project_id, + ] verifylist = [ - ('target_project', self.rbac_policies[0].target_project_id)] + ('target_project', self.rbac_policies[0].target_project_id) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.rbac_policies.assert_called_with(**{ - 'target_project_id': self.project.id - }) + self.network_client.rbac_policies.assert_called_with( + **{'target_project_id': self.project.id} + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestSetNetworkRBAC(TestNetworkRBAC): - project = identity_fakes_v3.FakeProject.create_one_project() rbac_policy = network_fakes.create_one_network_rbac( - attrs={'target_tenant': project.id}) + attrs={'target_tenant': project.id} + ) def setUp(self): - super(TestSetNetworkRBAC, self).setUp() + super().setUp() # Get the command object to test - self.cmd = network_rbac.SetNetworkRBAC(self.app, self.namespace) + self.cmd = network_rbac.SetNetworkRBAC(self.app, None) - self.network.find_rbac_policy = mock.Mock( - return_value=self.rbac_policy) - self.network.update_rbac_policy = mock.Mock(return_value=None) + self.network_client.find_rbac_policy.return_value = self.rbac_policy + + self.network_client.update_rbac_policy.return_value = None self.projects_mock.get.return_value = self.project def test_network_rbac_set_nothing(self): @@ -513,17 +567,19 @@ def test_network_rbac_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_rbac_policy.assert_called_once_with( + self.network_client.find_rbac_policy.assert_called_once_with( self.rbac_policy.id, ignore_missing=False ) attrs = {} - self.network.update_rbac_policy.assert_called_once_with( - self.rbac_policy, **attrs) + self.network_client.update_rbac_policy.assert_called_once_with( + self.rbac_policy, **attrs + ) self.assertIsNone(result) def test_network_rbac_set(self): arglist = [ - '--target-project', self.project.id, + '--target-project', + self.project.id, self.rbac_policy.id, ] verifylist = [ @@ -533,17 +589,17 @@ def test_network_rbac_set(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_rbac_policy.assert_called_once_with( + self.network_client.find_rbac_policy.assert_called_once_with( self.rbac_policy.id, ignore_missing=False ) attrs = {'target_tenant': self.project.id} - self.network.update_rbac_policy.assert_called_once_with( - self.rbac_policy, **attrs) + self.network_client.update_rbac_policy.assert_called_once_with( + self.rbac_policy, **attrs + ) self.assertIsNone(result) class TestShowNetworkRBAC(TestNetworkRBAC): - rbac_policy = network_fakes.create_one_network_rbac() columns = ( @@ -565,20 +621,24 @@ class TestShowNetworkRBAC(TestNetworkRBAC): ] def setUp(self): - super(TestShowNetworkRBAC, self).setUp() + super().setUp() # Get the command object to test - self.cmd = network_rbac.ShowNetworkRBAC(self.app, self.namespace) + self.cmd = network_rbac.ShowNetworkRBAC(self.app, None) - self.network.find_rbac_policy = mock.Mock( - return_value=self.rbac_policy) + self.network_client.find_rbac_policy.return_value = self.rbac_policy def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_network_rbac_show_all_options(self): arglist = [ @@ -592,7 +652,7 @@ def test_network_rbac_show_all_options(self): # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) - self.network.find_rbac_policy.assert_called_with( + self.network_client.find_rbac_policy.assert_called_with( self.rbac_policy.id, ignore_missing=False ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_network_segment.py b/openstackclient/tests/unit/network/v2/test_network_segment.py index b41b619172..ab71c32547 100644 --- a/openstackclient/tests/unit/network/v2/test_network_segment.py +++ b/openstackclient/tests/unit/network/v2/test_network_segment.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -22,21 +21,18 @@ class TestNetworkSegment(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkSegment, self).setUp() - - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() class TestCreateNetworkSegment(TestNetworkSegment): - # The network segment to create along with associated network. _network_segment = network_fakes.create_one_network_segment() - _network = network_fakes.create_one_network({ - 'id': _network_segment.network_id, - }) + _network = network_fakes.create_one_network( + { + 'id': _network_segment.network_id, + } + ) columns = ( 'description', @@ -59,37 +55,43 @@ class TestCreateNetworkSegment(TestNetworkSegment): ) def setUp(self): - super(TestCreateNetworkSegment, self).setUp() + super().setUp() - self.network.create_segment = mock.Mock( - return_value=self._network_segment - ) - self.network.find_network = mock.Mock(return_value=self._network) + self.network_client.create_segment.return_value = self._network_segment + + self.network_client.find_network.return_value = self._network # Get the command object to test - self.cmd = network_segment.CreateNetworkSegment( - self.app, - self.namespace - ) + self.cmd = network_segment.CreateNetworkSegment(self.app, None) def test_create_no_options(self): # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_create_invalid_network_type(self): arglist = [ - '--network', self._network_segment.network_id, - '--network-type', 'foo', + '--network', + self._network_segment.network_id, + '--network-type', + 'foo', self._network_segment.name, ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_create_minimum_options(self): arglist = [ - '--network', self._network_segment.network_id, - '--network-type', self._network_segment.network_type, + '--network', + self._network_segment.network_id, + '--network-type', + self._network_segment.network_type, self._network_segment.name, ] verifylist = [ @@ -101,26 +103,32 @@ def test_create_minimum_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_network.assert_called_once_with( - self._network_segment.network_id, - ignore_missing=False + self.network_client.find_network.assert_called_once_with( + self._network_segment.network_id, ignore_missing=False + ) + self.network_client.create_segment.assert_called_once_with( + **{ + 'network_id': self._network_segment.network_id, + 'network_type': self._network_segment.network_type, + 'name': self._network_segment.name, + } ) - self.network.create_segment.assert_called_once_with(**{ - 'network_id': self._network_segment.network_id, - 'network_type': self._network_segment.network_type, - 'name': self._network_segment.name, - }) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_create_all_options(self): arglist = [ - '--description', self._network_segment.description, - '--network', self._network_segment.network_id, - '--network-type', self._network_segment.network_type, - '--physical-network', self._network_segment.physical_network, - '--segment', str(self._network_segment.segmentation_id), + '--description', + self._network_segment.description, + '--network', + self._network_segment.network_id, + '--network-type', + self._network_segment.network_type, + '--physical-network', + self._network_segment.physical_network, + '--segment', + str(self._network_segment.segmentation_id), self._network_segment.name, ] verifylist = [ @@ -135,41 +143,36 @@ def test_create_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_network.assert_called_once_with( - self._network_segment.network_id, - ignore_missing=False + self.network_client.find_network.assert_called_once_with( + self._network_segment.network_id, ignore_missing=False + ) + self.network_client.create_segment.assert_called_once_with( + **{ + 'description': self._network_segment.description, + 'network_id': self._network_segment.network_id, + 'network_type': self._network_segment.network_type, + 'physical_network': self._network_segment.physical_network, + 'segmentation_id': self._network_segment.segmentation_id, + 'name': self._network_segment.name, + } ) - self.network.create_segment.assert_called_once_with(**{ - 'description': self._network_segment.description, - 'network_id': self._network_segment.network_id, - 'network_type': self._network_segment.network_type, - 'physical_network': self._network_segment.physical_network, - 'segmentation_id': self._network_segment.segmentation_id, - 'name': self._network_segment.name, - }) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteNetworkSegment(TestNetworkSegment): - # The network segments to delete. _network_segments = network_fakes.create_network_segments() def setUp(self): - super(TestDeleteNetworkSegment, self).setUp() + super().setUp() - self.network.delete_segment = mock.Mock(return_value=None) - self.network.find_segment = mock.Mock( - side_effect=self._network_segments - ) + self.network_client.delete_segment.return_value = None + self.network_client.find_segment.side_effect = self._network_segments # Get the command object to test - self.cmd = network_segment.DeleteNetworkSegment( - self.app, - self.namespace - ) + self.cmd = network_segment.DeleteNetworkSegment(self.app, None) def test_delete(self): arglist = [ @@ -182,7 +185,7 @@ def test_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_segment.assert_called_once_with( + self.network_client.delete_segment.assert_called_once_with( self._network_segments[0] ) self.assertIsNone(result) @@ -201,38 +204,37 @@ def test_delete_multiple(self): calls = [] for _network_segment in self._network_segments: calls.append(call(_network_segment)) - self.network.delete_segment.assert_has_calls(calls) + self.network_client.delete_segment.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_with_exception(self): - arglist = [ - self._network_segments[0].id, - 'doesnotexist' - ] + arglist = [self._network_segments[0].id, 'doesnotexist'] verifylist = [ - ('network_segment', [self._network_segments[0].id, - 'doesnotexist']), + ( + 'network_segment', + [self._network_segments[0].id, 'doesnotexist'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [self._network_segments[0], - exceptions.CommandError] - self.network.find_segment = ( - mock.Mock(side_effect=find_mock_result) - ) + find_mock_result = [self._network_segments[0], exceptions.CommandError] + self.network_client.find_segment.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 network segments failed to delete.', - str(e)) - - self.network.find_segment.assert_any_call( - self._network_segments[0].id, ignore_missing=False) - self.network.find_segment.assert_any_call( - 'doesnotexist', ignore_missing=False) - self.network.delete_segment.assert_called_once_with( + self.assertEqual( + '1 of 2 network segments failed to delete.', str(e) + ) + + self.network_client.find_segment.assert_any_call( + self._network_segments[0].id, ignore_missing=False + ) + self.network_client.find_segment.assert_any_call( + 'doesnotexist', ignore_missing=False + ) + self.network_client.delete_segment.assert_called_once_with( self._network_segments[0] ) @@ -248,39 +250,42 @@ class TestListNetworkSegment(TestNetworkSegment): 'Network Type', 'Segment', ) - columns_long = columns + ( - 'Physical Network', - ) + columns_long = columns + ('Physical Network',) data = [] for _network_segment in _network_segments: - data.append(( - _network_segment.id, - _network_segment.name, - _network_segment.network_id, - _network_segment.network_type, - _network_segment.segmentation_id, - )) + data.append( + ( + _network_segment.id, + _network_segment.name, + _network_segment.network_id, + _network_segment.network_type, + _network_segment.segmentation_id, + ) + ) data_long = [] for _network_segment in _network_segments: - data_long.append(( - _network_segment.id, - _network_segment.name, - _network_segment.network_id, - _network_segment.network_type, - _network_segment.segmentation_id, - _network_segment.physical_network, - )) + data_long.append( + ( + _network_segment.id, + _network_segment.name, + _network_segment.network_id, + _network_segment.network_type, + _network_segment.segmentation_id, + _network_segment.physical_network, + ) + ) def setUp(self): - super(TestListNetworkSegment, self).setUp() + super().setUp() # Get the command object to test - self.cmd = network_segment.ListNetworkSegment(self.app, self.namespace) + self.cmd = network_segment.ListNetworkSegment(self.app, None) + + self.network_client.find_network.return_value = self._network - self.network.find_network = mock.Mock(return_value=self._network) - self.network.segments = mock.Mock(return_value=self._network_segments) + self.network_client.segments.return_value = self._network_segments def test_list_no_option(self): arglist = [] @@ -292,7 +297,7 @@ def test_list_no_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.segments.assert_called_once_with() + self.network_client.segments.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -308,7 +313,7 @@ def test_list_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.segments.assert_called_once_with() + self.network_client.segments.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) @@ -317,15 +322,12 @@ def test_list_network(self): '--network', self._network.id, ] - verifylist = [ - ('long', False), - ('network', self._network.id) - ] + verifylist = [('long', False), ('network', self._network.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.segments.assert_called_once_with( + self.network_client.segments.assert_called_once_with( **{'network_id': self._network.id} ) self.assertEqual(self.columns, columns) @@ -333,22 +335,18 @@ def test_list_network(self): class TestSetNetworkSegment(TestNetworkSegment): - # The network segment to show. _network_segment = network_fakes.create_one_network_segment() def setUp(self): - super(TestSetNetworkSegment, self).setUp() + super().setUp() - self.network.find_segment = mock.Mock( - return_value=self._network_segment - ) - self.network.update_segment = mock.Mock( - return_value=self._network_segment - ) + self.network_client.find_segment.return_value = self._network_segment + + self.network_client.update_segment.return_value = self._network_segment # Get the command object to test - self.cmd = network_segment.SetNetworkSegment(self.app, self.namespace) + self.cmd = network_segment.SetNetworkSegment(self.app, None) def test_set_no_options(self): arglist = [ @@ -361,15 +359,17 @@ def test_set_no_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_segment.assert_called_once_with( + self.network_client.update_segment.assert_called_once_with( self._network_segment, **{} ) self.assertIsNone(result) def test_set_all_options(self): arglist = [ - '--description', 'new description', - '--name', 'new name', + '--description', + 'new description', + '--name', + 'new name', self._network_segment.id, ] verifylist = [ @@ -385,14 +385,13 @@ def test_set_all_options(self): 'description': 'new description', 'name': 'new name', } - self.network.update_segment.assert_called_once_with( + self.network_client.update_segment.assert_called_once_with( self._network_segment, **attrs ) self.assertIsNone(result) class TestShowNetworkSegment(TestNetworkSegment): - # The network segment to show. _network_segment = network_fakes.create_one_network_segment() @@ -417,19 +416,18 @@ class TestShowNetworkSegment(TestNetworkSegment): ) def setUp(self): - super(TestShowNetworkSegment, self).setUp() + super().setUp() - self.network.find_segment = mock.Mock( - return_value=self._network_segment - ) + self.network_client.find_segment.return_value = self._network_segment # Get the command object to test - self.cmd = network_segment.ShowNetworkSegment(self.app, self.namespace) + self.cmd = network_segment.ShowNetworkSegment(self.app, None) def test_show_no_options(self): # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_show_all_options(self): arglist = [ @@ -442,9 +440,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_segment.assert_called_once_with( - self._network_segment.id, - ignore_missing=False + self.network_client.find_segment.assert_called_once_with( + self._network_segment.id, ignore_missing=False ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_network_segment_range.py b/openstackclient/tests/unit/network/v2/test_network_segment_range.py index 20d34bfcc9..9c9c900e36 100644 --- a/openstackclient/tests/unit/network/v2/test_network_segment_range.py +++ b/openstackclient/tests/unit/network/v2/test_network_segment_range.py @@ -25,30 +25,25 @@ class TestAuxiliaryFunctions(tests_utils.TestCase): - def test__get_ranges(self): input_reference = [ ([1, 2, 3, 4, 5, 6, 7], ['1-7']), ([1, 2, 5, 4, 3, 6, 7], ['1-7']), ([1, 2, 4, 3, 7, 6], ['1-4', '6-7']), - ([1, 2, 4, 3, '13', 12, '7', '6'], ['1-4', '6-7', '12-13']) + ([1, 2, 4, 3, '13', 12, '7', '6'], ['1-4', '6-7', '12-13']), ] for input, reference in input_reference: - self.assertEqual(reference, - list(network_segment_range._get_ranges(input))) + self.assertEqual( + reference, list(network_segment_range._get_ranges(input)) + ) class TestNetworkSegmentRange(network_fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkSegmentRange, self).setUp() - - # Get a shortcut to the network client - self.network = self.app.client_manager.network + super().setUp() class TestCreateNetworkSegmentRange(TestNetworkSegmentRange): - # The network segment range to create. _network_segment_range = network_fakes.create_one_network_segment_range() @@ -81,42 +76,54 @@ class TestCreateNetworkSegmentRange(TestNetworkSegmentRange): ) def setUp(self): - super(TestCreateNetworkSegmentRange, self).setUp() + super().setUp() - self.network.find_extension = mock.Mock() - self.network.create_network_segment_range = mock.Mock( - return_value=self._network_segment_range + self.network_client.create_network_segment_range.return_value = ( + self._network_segment_range ) # Get the command object to test self.cmd = network_segment_range.CreateNetworkSegmentRange( - self.app, - self.namespace + self.app, None ) def test_create_no_options(self): # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_create_invalid_network_type(self): arglist = [ '--private', - '--project', self._network_segment_range.project_id, - '--network-type', 'foo', - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--project', + self._network_segment_range.project_id, + '--network-type', + 'foo', + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_create_default_with_project_id(self): arglist = [ - '--project', self._network_segment_range.project_id, - '--network-type', 'vxlan', - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--project', + self._network_segment_range.project_id, + '--network-type', + 'vxlan', + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -128,17 +135,21 @@ def test_create_default_with_project_id(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_shared_with_project_id(self): arglist = [ '--shared', - '--project', self._network_segment_range.project_id, - '--network-type', 'vxlan', - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--project', + self._network_segment_range.project_id, + '--network-type', + 'vxlan', + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -151,17 +162,21 @@ def test_create_shared_with_project_id(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_tunnel_with_physical_network(self): arglist = [ '--shared', - '--network-type', 'vxlan', - '--physical-network', self._network_segment_range.physical_network, - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--network-type', + 'vxlan', + '--physical-network', + self._network_segment_range.physical_network, + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -174,15 +189,18 @@ def test_create_tunnel_with_physical_network(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_minimum_options(self): arglist = [ - '--network-type', 'vxlan', - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--network-type', + 'vxlan', + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -195,13 +213,15 @@ def test_create_minimum_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network_segment_range.assert_called_once_with(**{ - 'shared': True, - 'network_type': 'vxlan', - 'minimum': self._network_segment_range.minimum, - 'maximum': self._network_segment_range.maximum, - 'name': self._network_segment_range.name, - }) + self.network_client.create_network_segment_range.assert_called_once_with( + **{ + 'shared': True, + 'network_type': 'vxlan', + 'minimum': self._network_segment_range.minimum, + 'maximum': self._network_segment_range.maximum, + 'name': self._network_segment_range.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -209,10 +229,14 @@ def test_create_minimum_options(self): def test_create_private_minimum_options(self): arglist = [ '--private', - '--project', self._network_segment_range.project_id, - '--network-type', 'vxlan', - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--project', + self._network_segment_range.project_id, + '--network-type', + 'vxlan', + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -227,14 +251,16 @@ def test_create_private_minimum_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network_segment_range.assert_called_once_with(**{ - 'shared': False, - 'project_id': mock.ANY, - 'network_type': 'vxlan', - 'minimum': self._network_segment_range.minimum, - 'maximum': self._network_segment_range.maximum, - 'name': self._network_segment_range.name, - }) + self.network_client.create_network_segment_range.assert_called_once_with( + **{ + 'shared': False, + 'project_id': mock.ANY, + 'network_type': 'vxlan', + 'minimum': self._network_segment_range.minimum, + 'maximum': self._network_segment_range.maximum, + 'name': self._network_segment_range.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -242,9 +268,12 @@ def test_create_private_minimum_options(self): def test_create_shared_minimum_options(self): arglist = [ '--shared', - '--network-type', 'vxlan', - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--network-type', + 'vxlan', + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -258,13 +287,15 @@ def test_create_shared_minimum_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network_segment_range.assert_called_once_with(**{ - 'shared': True, - 'network_type': 'vxlan', - 'minimum': self._network_segment_range.minimum, - 'maximum': self._network_segment_range.maximum, - 'name': self._network_segment_range.name, - }) + self.network_client.create_network_segment_range.assert_called_once_with( + **{ + 'shared': True, + 'network_type': 'vxlan', + 'minimum': self._network_segment_range.minimum, + 'maximum': self._network_segment_range.maximum, + 'name': self._network_segment_range.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -272,11 +303,16 @@ def test_create_shared_minimum_options(self): def test_create_all_options(self): arglist = [ '--private', - '--project', self._network_segment_range.project_id, - '--network-type', self._network_segment_range.network_type, - '--physical-network', self._network_segment_range.physical_network, - '--minimum', str(self._network_segment_range.minimum), - '--maximum', str(self._network_segment_range.maximum), + '--project', + self._network_segment_range.project_id, + '--network-type', + self._network_segment_range.network_type, + '--physical-network', + self._network_segment_range.physical_network, + '--minimum', + str(self._network_segment_range.minimum), + '--maximum', + str(self._network_segment_range.maximum), self._network_segment_range.name, ] verifylist = [ @@ -292,39 +328,38 @@ def test_create_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_network_segment_range.assert_called_once_with(**{ - 'shared': self._network_segment_range.shared, - 'project_id': mock.ANY, - 'network_type': self._network_segment_range.network_type, - 'physical_network': self._network_segment_range.physical_network, - 'minimum': self._network_segment_range.minimum, - 'maximum': self._network_segment_range.maximum, - 'name': self._network_segment_range.name, - }) + self.network_client.create_network_segment_range.assert_called_once_with( + **{ + 'shared': self._network_segment_range.shared, + 'project_id': mock.ANY, + 'network_type': self._network_segment_range.network_type, + 'physical_network': self._network_segment_range.physical_network, # noqa: E501 + 'minimum': self._network_segment_range.minimum, + 'maximum': self._network_segment_range.maximum, + 'name': self._network_segment_range.name, + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestDeleteNetworkSegmentRange(TestNetworkSegmentRange): - # The network segment ranges to delete. _network_segment_ranges = network_fakes.create_network_segment_ranges() def setUp(self): - super(TestDeleteNetworkSegmentRange, self).setUp() + super().setUp() + + self.network_client.delete_network_segment_range.return_value = None - self.network.find_extension = mock.Mock() - self.network.delete_network_segment_range = mock.Mock( - return_value=None) - self.network.find_network_segment_range = mock.Mock( - side_effect=self._network_segment_ranges + self.network_client.find_network_segment_range.side_effect = ( + self._network_segment_ranges ) # Get the command object to test self.cmd = network_segment_range.DeleteNetworkSegmentRange( - self.app, - self.namespace + self.app, None ) def test_delete(self): @@ -338,7 +373,7 @@ def test_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_network_segment_range.assert_called_once_with( + self.network_client.delete_network_segment_range.assert_called_once_with( self._network_segment_ranges[0] ) self.assertIsNone(result) @@ -357,45 +392,52 @@ def test_delete_multiple(self): calls = [] for _network_segment_range in self._network_segment_ranges: calls.append(call(_network_segment_range)) - self.network.delete_network_segment_range.assert_has_calls(calls) + self.network_client.delete_network_segment_range.assert_has_calls( + calls + ) self.assertIsNone(result) def test_delete_multiple_with_exception(self): - arglist = [ - self._network_segment_ranges[0].id, - 'doesnotexist' - ] + arglist = [self._network_segment_ranges[0].id, 'doesnotexist'] verifylist = [ - ('network_segment_range', - [self._network_segment_ranges[0].id, 'doesnotexist']), + ( + 'network_segment_range', + [self._network_segment_ranges[0].id, 'doesnotexist'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [self._network_segment_ranges[0], - exceptions.CommandError] - self.network.find_network_segment_range = ( - mock.Mock(side_effect=find_mock_result) + find_mock_result = [ + self._network_segment_ranges[0], + exceptions.CommandError, + ] + self.network_client.find_network_segment_range.side_effect = ( + find_mock_result ) try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 network segment ranges failed to delete.', - str(e)) - - self.network.find_network_segment_range.assert_any_call( - self._network_segment_ranges[0].id, ignore_missing=False) - self.network.find_network_segment_range.assert_any_call( - 'doesnotexist', ignore_missing=False) - self.network.delete_network_segment_range.assert_called_once_with( + self.assertEqual( + '1 of 2 network segment ranges failed to delete.', str(e) + ) + + self.network_client.find_network_segment_range.assert_any_call( + self._network_segment_ranges[0].id, ignore_missing=False + ) + self.network_client.find_network_segment_range.assert_any_call( + 'doesnotexist', ignore_missing=False + ) + self.network_client.delete_network_segment_range.assert_called_once_with( self._network_segment_ranges[0] ) class TestListNetworkSegmentRange(TestNetworkSegmentRange): - _network_segment_ranges = ( - network_fakes.create_network_segment_ranges(count=3)) + _network_segment_ranges = network_fakes.create_network_segment_ranges( + count=3 + ) columns = ( 'ID', @@ -406,7 +448,7 @@ class TestListNetworkSegmentRange(TestNetworkSegmentRange): 'Network Type', 'Physical Network', 'Minimum ID', - 'Maximum ID' + 'Maximum ID', ) columns_long = columns + ( 'Used', @@ -415,44 +457,49 @@ class TestListNetworkSegmentRange(TestNetworkSegmentRange): data = [] for _network_segment_range in _network_segment_ranges: - data.append(( - _network_segment_range.id, - _network_segment_range.name, - _network_segment_range.default, - _network_segment_range.shared, - _network_segment_range.project_id, - _network_segment_range.network_type, - _network_segment_range.physical_network, - _network_segment_range.minimum, - _network_segment_range.maximum, - )) + data.append( + ( + _network_segment_range.id, + _network_segment_range.name, + _network_segment_range.default, + _network_segment_range.shared, + _network_segment_range.project_id, + _network_segment_range.network_type, + _network_segment_range.physical_network, + _network_segment_range.minimum, + _network_segment_range.maximum, + ) + ) data_long = [] for _network_segment_range in _network_segment_ranges: - data_long.append(( - _network_segment_range.id, - _network_segment_range.name, - _network_segment_range.default, - _network_segment_range.shared, - _network_segment_range.project_id, - _network_segment_range.network_type, - _network_segment_range.physical_network, - _network_segment_range.minimum, - _network_segment_range.maximum, - {'3312e4ba67864b2eb53f3f41432f8efc': ['104', '106']}, - ['100-103', '105'], - )) + data_long.append( + ( + _network_segment_range.id, + _network_segment_range.name, + _network_segment_range.default, + _network_segment_range.shared, + _network_segment_range.project_id, + _network_segment_range.network_type, + _network_segment_range.physical_network, + _network_segment_range.minimum, + _network_segment_range.maximum, + {'3312e4ba67864b2eb53f3f41432f8efc': ['104', '106']}, + ['100-103', '105'], + ) + ) def setUp(self): - super(TestListNetworkSegmentRange, self).setUp() + super().setUp() + + self.network_client.network_segment_ranges.return_value = ( + self._network_segment_ranges + ) # Get the command object to test self.cmd = network_segment_range.ListNetworkSegmentRange( - self.app, self.namespace) - - self.network.find_extension = mock.Mock() - self.network.network_segment_ranges = mock.Mock( - return_value=self._network_segment_ranges) + self.app, None + ) def test_list_no_option(self): arglist = [] @@ -467,7 +514,7 @@ def test_list_no_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.network_segment_ranges.assert_called_once_with() + self.network_client.network_segment_ranges.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -486,40 +533,45 @@ def test_list_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.network_segment_ranges.assert_called_once_with() + self.network_client.network_segment_ranges.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) class TestSetNetworkSegmentRange(TestNetworkSegmentRange): - # The network segment range to set. _network_segment_range = network_fakes.create_one_network_segment_range() # The network segment range updated. minimum_updated = _network_segment_range.minimum - 5 maximum_updated = _network_segment_range.maximum + 5 - available_updated = (list(range(minimum_updated, 104)) + [105] + - list(range(107, maximum_updated + 1))) - _network_segment_range_updated = network_fakes.\ - create_one_network_segment_range( - attrs={'minimum': minimum_updated, - 'maximum': maximum_updated, - 'used': {104: '3312e4ba67864b2eb53f3f41432f8efc', - 106: '3312e4ba67864b2eb53f3f41432f8efc'}, - 'available': available_updated} + available_updated = ( + list(range(minimum_updated, 104)) + + [105] + + list(range(107, maximum_updated + 1)) + ) + _network_segment_range_updated = ( + network_fakes.create_one_network_segment_range( + attrs={ + 'minimum': minimum_updated, + 'maximum': maximum_updated, + 'used': { + 104: '3312e4ba67864b2eb53f3f41432f8efc', + 106: '3312e4ba67864b2eb53f3f41432f8efc', + }, + 'available': available_updated, + } ) + ) def setUp(self): - super(TestSetNetworkSegmentRange, self).setUp() + super().setUp() - self.network.find_extension = mock.Mock() - self.network.find_network_segment_range = mock.Mock( - return_value=self._network_segment_range + self.network_client.find_network_segment_range.return_value = ( + self._network_segment_range ) # Get the command object to test - self.cmd = network_segment_range.SetNetworkSegmentRange(self.app, - self.namespace) + self.cmd = network_segment_range.SetNetworkSegmentRange(self.app, None) def test_set_no_options(self): arglist = [ @@ -530,21 +582,25 @@ def test_set_no_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.network.update_network_segment_range = mock.Mock( - return_value=self._network_segment_range + self.network_client.update_network_segment_range.return_value = ( + self._network_segment_range ) + result = self.cmd.take_action(parsed_args) - self.network.update_network_segment_range.assert_called_once_with( + self.network_client.update_network_segment_range.assert_called_once_with( self._network_segment_range, **{} ) self.assertIsNone(result) def test_set_all_options(self): arglist = [ - '--name', 'new name', - '--minimum', str(self.minimum_updated), - '--maximum', str(self.maximum_updated), + '--name', + 'new name', + '--minimum', + str(self.minimum_updated), + '--maximum', + str(self.maximum_updated), self._network_segment_range.id, ] verifylist = [ @@ -555,9 +611,10 @@ def test_set_all_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.network.update_network_segment_range = mock.Mock( - return_value=self._network_segment_range_updated + self.network_client.update_network_segment_range.return_value = ( + self._network_segment_range_updated ) + result = self.cmd.take_action(parsed_args) attrs = { @@ -565,14 +622,13 @@ def test_set_all_options(self): 'minimum': self.minimum_updated, 'maximum': self.maximum_updated, } - self.network.update_network_segment_range.assert_called_once_with( + self.network_client.update_network_segment_range.assert_called_once_with( self._network_segment_range, **attrs ) self.assertIsNone(result) class TestShowNetworkSegmentRange(TestNetworkSegmentRange): - # The network segment range to show. _network_segment_range = network_fakes.create_one_network_segment_range() @@ -605,21 +661,22 @@ class TestShowNetworkSegmentRange(TestNetworkSegmentRange): ) def setUp(self): - super(TestShowNetworkSegmentRange, self).setUp() + super().setUp() - self.network.find_extension = mock.Mock() - self.network.find_network_segment_range = mock.Mock( - return_value=self._network_segment_range + self.network_client.find_network_segment_range.return_value = ( + self._network_segment_range ) # Get the command object to test self.cmd = network_segment_range.ShowNetworkSegmentRange( - self.app, self.namespace) + self.app, None + ) def test_show_no_options(self): # Missing required args should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_show_all_options(self): arglist = [ @@ -632,9 +689,8 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_network_segment_range.assert_called_once_with( - self._network_segment_range.id, - ignore_missing=False + self.network_client.find_network_segment_range.assert_called_once_with( + self._network_segment_range.id, ignore_missing=False ) self.assertEqual(self.columns, columns) diff --git a/openstackclient/tests/unit/network/v2/test_network_service_provider.py b/openstackclient/tests/unit/network/v2/test_network_service_provider.py index 5e4ddea6cf..f84bcd38d7 100644 --- a/openstackclient/tests/unit/network/v2/test_network_service_provider.py +++ b/openstackclient/tests/unit/network/v2/test_network_service_provider.py @@ -13,25 +13,24 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock -from openstackclient.network.v2 import network_service_provider \ - as service_provider +from openstackclient.network.v2 import ( + network_service_provider as service_provider, +) from openstackclient.tests.unit.network.v2 import fakes class TestNetworkServiceProvider(fakes.TestNetworkV2): - def setUp(self): - super(TestNetworkServiceProvider, self).setUp() - self.network = self.app.client_manager.network + super().setUp() class TestListNetworkServiceProvider(TestNetworkServiceProvider): - provider_list = \ + provider_list = ( fakes.FakeNetworkServiceProvider.create_network_service_providers( count=2 ) + ) columns = ( 'Service Type', @@ -42,21 +41,19 @@ class TestListNetworkServiceProvider(TestNetworkServiceProvider): data = [] for provider in provider_list: - data.append(( - provider.service_type, - provider.name, - provider.is_default, - )) + data.append( + ( + provider.service_type, + provider.name, + provider.is_default, + ) + ) def setUp(self): - super(TestListNetworkServiceProvider, self).setUp() - self.network.service_providers = mock.Mock( - return_value=self.provider_list - ) + super().setUp() + self.network_client.service_providers.return_value = self.provider_list - self.cmd = \ - service_provider.ListNetworkServiceProvider(self.app, - self.namespace) + self.cmd = service_provider.ListNetworkServiceProvider(self.app, None) def test_network_service_provider_list(self): arglist = [] @@ -66,6 +63,6 @@ def test_network_service_provider_list(self): columns, data = self.cmd.take_action(parsed_args) - self.network.service_providers.assert_called_with() + self.network_client.service_providers.assert_called_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) diff --git a/openstackclient/tests/unit/network/v2/test_network_trunk.py b/openstackclient/tests/unit/network/v2/test_network_trunk.py index fae70fb018..1056c21c30 100644 --- a/openstackclient/tests/unit/network/v2/test_network_trunk.py +++ b/openstackclient/tests/unit/network/v2/test_network_trunk.py @@ -9,11 +9,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse import copy -from unittest import mock from unittest.mock import call from osc_lib.cli import format_columns @@ -23,22 +20,19 @@ from openstackclient.network.v2 import network_trunk from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3 from openstackclient.tests.unit.network.v2 import fakes as network_fakes -from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit import utils as test_utils # Tests for Neutron trunks # class TestNetworkTrunk(network_fakes.TestNetworkV2): - def setUp(self): super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateNetworkTrunk(TestNetworkTrunk): @@ -46,20 +40,23 @@ class TestCreateNetworkTrunk(TestNetworkTrunk): domain = identity_fakes_v3.FakeDomain.create_one_domain() trunk_networks = network_fakes.create_networks(count=2) parent_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[0]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[0]['id']} + ) sub_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[1]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[1]['id']} + ) new_trunk = network_fakes.create_one_trunk( - attrs={'project_id': project.id, - 'port_id': parent_port['id'], - 'sub_ports': { - 'port_id': sub_port['id'], - 'segmentation_id': 42, - 'segmentation_type': 'vlan'} - }) + attrs={ + 'project_id': project.id, + 'port_id': parent_port['id'], + 'sub_ports': { + 'port_id': sub_port['id'], + 'segmentation_id': 42, + 'segmentation_type': 'vlan', + }, + } + ) columns = ( 'description', @@ -70,7 +67,7 @@ class TestCreateNetworkTrunk(TestNetworkTrunk): 'project_id', 'status', 'sub_ports', - 'tags' + 'tags', ) data = ( new_trunk.description, @@ -86,12 +83,15 @@ class TestCreateNetworkTrunk(TestNetworkTrunk): def setUp(self): super().setUp() - self.network.create_trunk = mock.Mock(return_value=self.new_trunk) - self.network.find_port = mock.Mock( - side_effect=[self.parent_port, self.sub_port]) + self.network_client.create_trunk.return_value = self.new_trunk + + self.network_client.find_port.side_effect = [ + self.parent_port, + self.sub_port, + ] # Get the command object to test - self.cmd = network_trunk.CreateNetworkTrunk(self.app, self.namespace) + self.cmd = network_trunk.CreateNetworkTrunk(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain @@ -100,12 +100,18 @@ def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - "--parent-port", self.new_trunk['port_id'], + "--parent-port", + self.new_trunk['port_id'], self.new_trunk['name'], ] verifylist = [ @@ -114,13 +120,15 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_trunk.assert_called_once_with(**{ - 'name': self.new_trunk['name'], - 'admin_state_up': self.new_trunk['admin_state_up'], - 'port_id': self.new_trunk['port_id'], - }) + self.network_client.create_trunk.assert_called_once_with( + **{ + 'name': self.new_trunk['name'], + 'admin_state_up': self.new_trunk['admin_state_up'], + 'port_id': self.new_trunk['port_id'], + } + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -129,37 +137,49 @@ def test_create_full_options(self): subport = self.new_trunk.sub_ports[0] arglist = [ "--disable", - "--description", self.new_trunk.description, - "--parent-port", self.new_trunk.port_id, - "--subport", 'port=%(port)s,segmentation-type=%(seg_type)s,' - 'segmentation-id=%(seg_id)s' % { - 'seg_id': subport['segmentation_id'], - 'seg_type': subport['segmentation_type'], - 'port': subport['port_id']}, + "--description", + self.new_trunk.description, + "--parent-port", + self.new_trunk.port_id, + "--subport", + 'port={port},segmentation-type={seg_type},' + 'segmentation-id={seg_id}'.format( + seg_id=subport['segmentation_id'], + seg_type=subport['segmentation_type'], + port=subport['port_id'], + ), self.new_trunk.name, ] verifylist = [ ('name', self.new_trunk.name), ('description', self.new_trunk.description), ('parent_port', self.new_trunk.port_id), - ('add_subports', [{ - 'port': subport['port_id'], - 'segmentation-id': str(subport['segmentation_id']), - 'segmentation-type': subport['segmentation_type']}]), + ( + 'add_subports', + [ + { + 'port': subport['port_id'], + 'segmentation-id': str(subport['segmentation_id']), + 'segmentation-type': subport['segmentation_type'], + } + ], + ), ('disable', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_trunk.assert_called_once_with(**{ - 'name': self.new_trunk.name, - 'description': self.new_trunk.description, - 'admin_state_up': False, - 'port_id': self.new_trunk.port_id, - 'sub_ports': [subport], - }) + self.network_client.create_trunk.assert_called_once_with( + **{ + 'name': self.new_trunk.name, + 'description': self.new_trunk.description, + 'admin_state_up': False, + 'port_id': self.new_trunk.port_id, + 'sub_ports': [subport], + } + ) self.assertEqual(self.columns, columns) data_with_desc = list(self.data) data_with_desc[0] = self.new_trunk['description'] @@ -169,27 +189,37 @@ def test_create_full_options(self): def test_create_trunk_with_subport_invalid_segmentation_id_fail(self): subport = self.new_trunk.sub_ports[0] arglist = [ - "--parent-port", self.new_trunk.port_id, - "--subport", "port=%(port)s,segmentation-type=%(seg_type)s," - "segmentation-id=boom" % { - 'seg_type': subport['segmentation_type'], - 'port': subport['port_id']}, + "--parent-port", + self.new_trunk.port_id, + "--subport", + "port={port},segmentation-type={seg_type}," + "segmentation-id=boom".format( + seg_type=subport['segmentation_type'], + port=subport['port_id'], + ), self.new_trunk.name, ] verifylist = [ ('name', self.new_trunk.name), ('parent_port', self.new_trunk.port_id), - ('add_subports', [{ - 'port': subport['port_id'], - 'segmentation-id': 'boom', - 'segmentation-type': subport['segmentation_type']}]), + ( + 'add_subports', + [ + { + 'port': subport['port_id'], + 'segmentation-id': 'boom', + 'segmentation-type': subport['segmentation_type'], + } + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) with testtools.ExpectedException(exceptions.CommandError) as e: self.cmd.take_action(parsed_args) - self.assertEqual("Segmentation-id 'boom' is not an integer", - str(e)) + self.assertEqual( + "Segmentation-id 'boom' is not an integer", str(e) + ) def test_create_network_trunk_subports_without_optional_keys(self): subport = copy.copy(self.new_trunk.sub_ports[0]) @@ -197,26 +227,29 @@ def test_create_network_trunk_subports_without_optional_keys(self): subport.pop('segmentation_type') subport.pop('segmentation_id') arglist = [ - '--parent-port', self.new_trunk.port_id, - '--subport', 'port=%(port)s' % {'port': subport['port_id']}, + '--parent-port', + self.new_trunk.port_id, + '--subport', + 'port={port}'.format(port=subport['port_id']), self.new_trunk.name, ] verifylist = [ ('name', self.new_trunk.name), ('parent_port', self.new_trunk.port_id), - ('add_subports', [{ - 'port': subport['port_id']}]), + ('add_subports', [{'port': subport['port_id']}]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_trunk.assert_called_once_with(**{ - 'name': self.new_trunk.name, - 'admin_state_up': True, - 'port_id': self.new_trunk.port_id, - 'sub_ports': [subport], - }) + self.network_client.create_trunk.assert_called_once_with( + **{ + 'name': self.new_trunk.name, + 'admin_state_up': True, + 'port_id': self.new_trunk.port_id, + 'sub_ports': [subport], + } + ) self.assertEqual(self.columns, columns) data_with_desc = list(self.data) data_with_desc[0] = self.new_trunk['description'] @@ -226,22 +259,30 @@ def test_create_network_trunk_subports_without_optional_keys(self): def test_create_network_trunk_subports_without_required_key_fail(self): subport = self.new_trunk.sub_ports[0] arglist = [ - '--parent-port', self.new_trunk.port_id, - '--subport', 'segmentation-type=%(seg_type)s,' - 'segmentation-id=%(seg_id)s' % { - 'seg_id': subport['segmentation_id'], - 'seg_type': subport['segmentation_type']}, + '--parent-port', + self.new_trunk.port_id, + '--subport', + 'segmentation-type={seg_type},segmentation-id={seg_id}'.format( + seg_id=subport['segmentation_id'], + seg_type=subport['segmentation_type'], + ), self.new_trunk.name, ] verifylist = [ ('name', self.new_trunk.name), ('parent_port', self.new_trunk.port_id), - ('add_subports', [{ - 'segmentation_id': str(subport['segmentation_id']), - 'segmentation_type': subport['segmentation_type']}]), - ] - - with testtools.ExpectedException(argparse.ArgumentTypeError): + ( + 'add_subports', + [ + { + 'segmentation_id': str(subport['segmentation_id']), + 'segmentation_type': subport['segmentation_type'], + } + ], + ), + ] + + with testtools.ExpectedException(test_utils.ParserException): self.check_parser(self.cmd, arglist, verifylist) @@ -251,34 +292,42 @@ class TestDeleteNetworkTrunk(TestNetworkTrunk): domain = identity_fakes_v3.FakeDomain.create_one_domain() trunk_networks = network_fakes.create_networks(count=2) parent_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[0]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[0]['id']} + ) sub_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[1]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[1]['id']} + ) new_trunks = network_fakes.create_trunks( - attrs={'project_id': project.id, - 'port_id': parent_port['id'], - 'sub_ports': { - 'port_id': sub_port['id'], - 'segmentation_id': 42, - 'segmentation_type': 'vlan'} - }) + attrs={ + 'project_id': project.id, + 'port_id': parent_port['id'], + 'sub_ports': { + 'port_id': sub_port['id'], + 'segmentation_id': 42, + 'segmentation_type': 'vlan', + }, + } + ) def setUp(self): super().setUp() - self.network.find_trunk = mock.Mock( - side_effect=[self.new_trunks[0], self.new_trunks[1]]) - self.network.delete_trunk = mock.Mock(return_value=None) - self.network.find_port = mock.Mock( - side_effect=[self.parent_port, self.sub_port]) + self.network_client.find_trunk.side_effect = [ + self.new_trunks[0], + self.new_trunks[1], + ] + + self.network_client.delete_trunk.return_value = None + self.network_client.find_port.side_effect = [ + self.parent_port, + self.sub_port, + ] self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain # Get the command object to test - self.cmd = network_trunk.DeleteNetworkTrunk(self.app, self.namespace) + self.cmd = network_trunk.DeleteNetworkTrunk(self.app, None) def test_delete_trunkx(self): arglist = [ @@ -290,8 +339,9 @@ def test_delete_trunkx(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_trunk.assert_called_once_with( - self.new_trunks[0].id) + self.network_client.delete_trunk.assert_called_once_with( + self.new_trunks[0].id + ) self.assertIsNone(result) def test_delete_trunk_multiple(self): @@ -310,7 +360,7 @@ def test_delete_trunk_multiple(self): calls = [] for t in self.new_trunks: calls.append(call(t.id)) - self.network.delete_trunk.assert_has_calls(calls) + self.network_client.delete_trunk.assert_has_calls(calls) self.assertIsNone(result) def test_delete_trunk_multiple_with_exception(self): @@ -319,23 +369,24 @@ def test_delete_trunk_multiple_with_exception(self): 'unexist_trunk', ] verifylist = [ - ('trunk', - [self.new_trunks[0].name, 'unexist_trunk']), + ('trunk', [self.new_trunks[0].name, 'unexist_trunk']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.network.find_trunk = mock.Mock( - side_effect=[self.new_trunks[0], exceptions.CommandError]) + self.network_client.find_trunk.side_effect = [ + self.new_trunks[0], + exceptions.CommandError, + ] + with testtools.ExpectedException(exceptions.CommandError) as e: self.cmd.take_action(parsed_args) self.assertEqual('1 of 2 trunks failed to delete.', str(e)) - self.network.delete_trunk.assert_called_once_with( + self.network_client.delete_trunk.assert_called_once_with( self.new_trunks[0].id ) class TestShowNetworkTrunk(TestNetworkTrunk): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() # The trunk to set. @@ -349,7 +400,7 @@ class TestShowNetworkTrunk(TestNetworkTrunk): 'project_id', 'status', 'sub_ports', - 'tags' + 'tags', ) data = ( new_trunk.description, @@ -365,21 +416,26 @@ class TestShowNetworkTrunk(TestNetworkTrunk): def setUp(self): super().setUp() - self.network.find_trunk = mock.Mock(return_value=self.new_trunk) - self.network.get_trunk = mock.Mock(return_value=self.new_trunk) + self.network_client.find_trunk.return_value = self.new_trunk + self.network_client.get_trunk.return_value = self.new_trunk self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain # Get the command object to test - self.cmd = network_trunk.ShowNetworkTrunk(self.app, self.namespace) + self.cmd = network_trunk.ShowNetworkTrunk(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -392,7 +448,9 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.get_trunk.assert_called_once_with(self.new_trunk.id) + self.network_client.get_trunk.assert_called_once_with( + self.new_trunk.id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) @@ -402,51 +460,42 @@ class TestListNetworkTrunk(TestNetworkTrunk): domain = identity_fakes_v3.FakeDomain.create_one_domain() # Create trunks to be listed. new_trunks = network_fakes.create_trunks( - {'created_at': '2001-01-01 00:00:00', - 'updated_at': '2001-01-01 00:00:00'}, count=3) - - columns = ( - 'ID', - 'Name', - 'Parent Port', - 'Description' - ) - columns_long = columns + ( - 'Status', - 'State', - 'Created At', - 'Updated At' + { + 'created_at': '2001-01-01 00:00:00', + 'updated_at': '2001-01-01 00:00:00', + }, + count=3, ) + + columns = ('ID', 'Name', 'Parent Port', 'Description') + columns_long = columns + ('Status', 'State', 'Created At', 'Updated At') data = [] for t in new_trunks: - data.append(( - t['id'], - t['name'], - t['port_id'], - t['description'] - )) + data.append((t['id'], t['name'], t['port_id'], t['description'])) data_long = [] for t in new_trunks: - data_long.append(( - t['id'], - t['name'], - t['port_id'], - t['description'], - t['status'], - network_trunk.AdminStateColumn(''), - '2001-01-01 00:00:00', - '2001-01-01 00:00:00', - )) + data_long.append( + ( + t['id'], + t['name'], + t['port_id'], + t['description'], + t['status'], + network_trunk.AdminStateColumn(''), + '2001-01-01 00:00:00', + '2001-01-01 00:00:00', + ) + ) def setUp(self): super().setUp() - self.network.trunks = mock.Mock(return_value=self.new_trunks) + self.network_client.trunks.return_value = self.new_trunks self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain # Get the command object to test - self.cmd = network_trunk.ListNetworkTrunk(self.app, self.namespace) + self.cmd = network_trunk.ListNetworkTrunk(self.app, None) def test_trunk_list_no_option(self): arglist = [] @@ -455,7 +504,7 @@ def test_trunk_list_no_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.trunks.assert_called_once_with() + self.network_client.trunks.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -470,31 +519,33 @@ def test_trunk_list_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.trunks.assert_called_once_with() + self.network_client.trunks.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) class TestSetNetworkTrunk(TestNetworkTrunk): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() trunk_networks = network_fakes.create_networks(count=2) parent_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[0]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[0]['id']} + ) sub_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[1]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[1]['id']} + ) # Create trunks to be listed. _trunk = network_fakes.create_one_trunk( - attrs={'project_id': project.id, - 'port_id': parent_port['id'], - 'sub_ports': { - 'port_id': sub_port['id'], - 'segmentation_id': 42, - 'segmentation_type': 'vlan'} - }) + attrs={ + 'project_id': project.id, + 'port_id': parent_port['id'], + 'sub_ports': { + 'port_id': sub_port['id'], + 'segmentation_id': 42, + 'segmentation_type': 'vlan', + }, + } + ) columns = ( 'admin_state_up', 'id', @@ -517,21 +568,25 @@ class TestSetNetworkTrunk(TestNetworkTrunk): def setUp(self): super().setUp() - self.network.update_trunk = mock.Mock(return_value=self._trunk) - self.network.add_trunk_subports = mock.Mock(return_value=self._trunk) - self.network.find_trunk = mock.Mock(return_value=self._trunk) - self.network.find_port = mock.Mock( - side_effect=[self.sub_port, self.sub_port]) + self.network_client.update_trunk.return_value = self._trunk + self.network_client.add_trunk_subports.return_value = self._trunk + + self.network_client.find_trunk.return_value = self._trunk + self.network_client.find_port.side_effect = [ + self.sub_port, + self.sub_port, + ] self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain # Get the command object to test - self.cmd = network_trunk.SetNetworkTrunk(self.app, self.namespace) + self.cmd = network_trunk.SetNetworkTrunk(self.app, None) def _test_set_network_trunk_attr(self, attr, value): arglist = [ - '--%s' % attr, value, + f'--{attr}', + value, self._trunk[attr], ] verifylist = [ @@ -545,8 +600,9 @@ def _test_set_network_trunk_attr(self, attr, value): attrs = { attr: value, } - self.network.update_trunk.assert_called_once_with( - self._trunk, **attrs) + self.network_client.update_trunk.assert_called_once_with( + self._trunk, **attrs + ) self.assertIsNone(result) def test_set_network_trunk_name(self): @@ -571,8 +627,9 @@ def test_set_network_trunk_admin_state_up_disable(self): attrs = { 'admin_state_up': False, } - self.network.update_trunk.assert_called_once_with( - self._trunk, **attrs) + self.network_client.update_trunk.assert_called_once_with( + self._trunk, **attrs + ) self.assertIsNone(result) def test_set_network_trunk_admin_state_up_enable(self): @@ -591,45 +648,60 @@ def test_set_network_trunk_admin_state_up_enable(self): attrs = { 'admin_state_up': True, } - self.network.update_trunk.assert_called_once_with( - self._trunk, **attrs) + self.network_client.update_trunk.assert_called_once_with( + self._trunk, **attrs + ) self.assertIsNone(result) def test_set_network_trunk_nothing(self): - arglist = [self._trunk['name'], ] - verifylist = [('trunk', self._trunk['name']), ] + arglist = [ + self._trunk['name'], + ] + verifylist = [ + ('trunk', self._trunk['name']), + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {} - self.network.update_trunk.assert_called_once_with( - self._trunk, **attrs) + self.network_client.update_trunk.assert_called_once_with( + self._trunk, **attrs + ) self.assertIsNone(result) def test_set_network_trunk_subports(self): subport = self._trunk['sub_ports'][0] arglist = [ - '--subport', 'port=%(port)s,segmentation-type=%(seg_type)s,' - 'segmentation-id=%(seg_id)s' % { - 'seg_id': subport['segmentation_id'], - 'seg_type': subport['segmentation_type'], - 'port': subport['port_id']}, + '--subport', + 'port={port},segmentation-type={seg_type},' + 'segmentation-id={seg_id}'.format( + seg_id=subport['segmentation_id'], + seg_type=subport['segmentation_type'], + port=subport['port_id'], + ), self._trunk['name'], ] verifylist = [ ('trunk', self._trunk['name']), - ('set_subports', [{ - 'port': subport['port_id'], - 'segmentation-id': str(subport['segmentation_id']), - 'segmentation-type': subport['segmentation_type']}]), + ( + 'set_subports', + [ + { + 'port': subport['port_id'], + 'segmentation-id': str(subport['segmentation_id']), + 'segmentation-type': subport['segmentation_type'], + } + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_trunk_subports.assert_called_once_with( - self._trunk, [subport]) + self.network_client.add_trunk_subports.assert_called_once_with( + self._trunk, [subport] + ) self.assertIsNone(result) def test_set_network_trunk_subports_without_optional_keys(self): @@ -638,46 +710,55 @@ def test_set_network_trunk_subports_without_optional_keys(self): subport.pop('segmentation_type') subport.pop('segmentation_id') arglist = [ - '--subport', 'port=%(port)s' % {'port': subport['port_id']}, + '--subport', + 'port={port}'.format(port=subport['port_id']), self._trunk['name'], ] verifylist = [ ('trunk', self._trunk['name']), - ('set_subports', [{ - 'port': subport['port_id']}]), + ('set_subports', [{'port': subport['port_id']}]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_trunk_subports.assert_called_once_with( - self._trunk, [subport]) + self.network_client.add_trunk_subports.assert_called_once_with( + self._trunk, [subport] + ) self.assertIsNone(result) def test_set_network_trunk_subports_without_required_key_fail(self): subport = self._trunk['sub_ports'][0] arglist = [ - '--subport', 'segmentation-type=%(seg_type)s,' - 'segmentation-id=%(seg_id)s' % { - 'seg_id': subport['segmentation_id'], - 'seg_type': subport['segmentation_type']}, + '--subport', + 'segmentation-type={seg_type},segmentation-id={seg_id}'.format( + seg_id=subport['segmentation_id'], + seg_type=subport['segmentation_type'], + ), self._trunk['name'], ] verifylist = [ ('trunk', self._trunk['name']), - ('set_subports', [{ - 'segmentation-id': str(subport['segmentation_id']), - 'segmentation-type': subport['segmentation_type']}]), - ] - - with testtools.ExpectedException(argparse.ArgumentTypeError): + ( + 'set_subports', + [ + { + 'segmentation-id': str(subport['segmentation_id']), + 'segmentation-type': subport['segmentation_type'], + } + ], + ), + ] + + with testtools.ExpectedException(test_utils.ParserException): self.check_parser(self.cmd, arglist, verifylist) - self.network.add_trunk_subports.assert_not_called() + self.network_client.add_trunk_subports.assert_not_called() def test_set_trunk_attrs_with_exception(self): arglist = [ - '--name', 'reallylongname', + '--name', + 'reallylongname', self._trunk['name'], ] verifylist = [ @@ -686,22 +767,24 @@ def test_set_trunk_attrs_with_exception(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.network.update_trunk = ( - mock.Mock(side_effect=exceptions.CommandError) - ) + self.network_client.update_trunk.side_effect = exceptions.CommandError + with testtools.ExpectedException(exceptions.CommandError) as e: self.cmd.take_action(parsed_args) self.assertEqual( - "Failed to set trunk '%s': " % self._trunk['name'], - str(e)) + "Failed to set trunk '{}': ".format(self._trunk['name']), + str(e), + ) attrs = {'name': 'reallylongname'} - self.network.update_trunk.assert_called_once_with( - self._trunk, **attrs) - self.network.add_trunk_subports.assert_not_called() + self.network_client.update_trunk.assert_called_once_with( + self._trunk, **attrs + ) + self.network_client.add_trunk_subports.assert_not_called() def test_set_trunk_add_subport_with_exception(self): arglist = [ - '--subport', 'port=invalid_subport', + '--subport', + 'port=invalid_subport', self._trunk['name'], ] verifylist = [ @@ -710,24 +793,29 @@ def test_set_trunk_add_subport_with_exception(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.network.add_trunk_subports = ( - mock.Mock(side_effect=exceptions.CommandError) + self.network_client.add_trunk_subports.side_effect = ( + exceptions.CommandError ) - self.network.find_port = (mock.Mock( - return_value={'id': 'invalid_subport'})) + + self.network_client.find_port.side_effect = [ + network_fakes.create_one_port({'id': 'invalid_subport'}) + ] + with testtools.ExpectedException(exceptions.CommandError) as e: self.cmd.take_action(parsed_args) self.assertEqual( - "Failed to add subports to trunk '%s': " % self._trunk['name'], - str(e)) - self.network.update_trunk.assert_called_once_with( - self._trunk) - self.network.add_trunk_subports.assert_called_once_with( - self._trunk, [{'port_id': 'invalid_subport'}]) + "Failed to add subports to trunk '{}': ".format( + self._trunk['name'] + ), + str(e), + ) + self.network_client.update_trunk.assert_called_once_with(self._trunk) + self.network_client.add_trunk_subports.assert_called_once_with( + self._trunk, [{'port_id': 'invalid_subport'}] + ) class TestListNetworkSubport(TestNetworkTrunk): - _trunk = network_fakes.create_one_trunk() _subports = _trunk['sub_ports'] @@ -738,25 +826,29 @@ class TestListNetworkSubport(TestNetworkTrunk): ) data = [] for s in _subports: - data.append(( - s['port_id'], - s['segmentation_type'], - s['segmentation_id'], - )) + data.append( + ( + s['port_id'], + s['segmentation_type'], + s['segmentation_id'], + ) + ) def setUp(self): super().setUp() - self.network.find_trunk = mock.Mock(return_value=self._trunk) - self.network.get_trunk_subports = mock.Mock( - return_value={network_trunk.SUB_PORTS: self._subports}) + self.network_client.find_trunk.return_value = self._trunk + self.network_client.get_trunk_subports.return_value = { + network_trunk.SUB_PORTS: self._subports + } # Get the command object to test - self.cmd = network_trunk.ListNetworkSubport(self.app, self.namespace) + self.cmd = network_trunk.ListNetworkSubport(self.app, None) def test_subport_list(self): arglist = [ - '--trunk', self._trunk['name'], + '--trunk', + self._trunk['name'], ] verifylist = [ ('trunk', self._trunk['name']), @@ -764,7 +856,9 @@ def test_subport_list(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.get_trunk_subports.assert_called_once_with(self._trunk) + self.network_client.get_trunk_subports.assert_called_once_with( + self._trunk + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -774,19 +868,22 @@ class TestUnsetNetworkTrunk(TestNetworkTrunk): domain = identity_fakes_v3.FakeDomain.create_one_domain() trunk_networks = network_fakes.create_networks(count=2) parent_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[0]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[0]['id']} + ) sub_port = network_fakes.create_one_port( - attrs={'project_id': project.id, - 'network_id': trunk_networks[1]['id']}) + attrs={'project_id': project.id, 'network_id': trunk_networks[1]['id']} + ) _trunk = network_fakes.create_one_trunk( - attrs={'project_id': project.id, - 'port_id': parent_port['id'], - 'sub_ports': { - 'port_id': sub_port['id'], - 'segmentation_id': 42, - 'segmentation_type': 'vlan'} - }) + attrs={ + 'project_id': project.id, + 'port_id': parent_port['id'], + 'sub_ports': { + 'port_id': sub_port['id'], + 'segmentation_id': 42, + 'segmentation_type': 'vlan', + }, + } + ) columns = ( 'admin_state_up', @@ -810,18 +907,22 @@ class TestUnsetNetworkTrunk(TestNetworkTrunk): def setUp(self): super().setUp() - self.network.find_trunk = mock.Mock(return_value=self._trunk) - self.network.find_port = mock.Mock( - side_effect=[self.sub_port, self.sub_port]) - self.network.delete_trunk_subports = mock.Mock(return_value=None) + self.network_client.find_trunk.return_value = self._trunk + self.network_client.find_port.side_effect = [ + self.sub_port, + self.sub_port, + ] + + self.network_client.delete_trunk_subports.return_value = None # Get the command object to test - self.cmd = network_trunk.UnsetNetworkTrunk(self.app, self.namespace) + self.cmd = network_trunk.UnsetNetworkTrunk(self.app, None) def test_unset_network_trunk_subport(self): subport = self._trunk['sub_ports'][0] arglist = [ - "--subport", subport['port_id'], + "--subport", + subport['port_id'], self._trunk['name'], ] @@ -834,9 +935,8 @@ def test_unset_network_trunk_subport(self): result = self.cmd.take_action(parsed_args) - self.network.delete_trunk_subports.assert_called_once_with( - self._trunk, - [{'port_id': subport['port_id']}] + self.network_client.delete_trunk_subports.assert_called_once_with( + self._trunk, [{'port_id': subport['port_id']}] ) self.assertIsNone(result) @@ -847,5 +947,10 @@ def test_unset_subport_no_arguments_fail(self): verifylist = [ ('trunk', self._trunk['name']), ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) diff --git a/openstackclient/tests/unit/network/v2/test_port.py b/openstackclient/tests/unit/network/v2/test_port.py index 04412c5a84..11a8711c7b 100644 --- a/openstackclient/tests/unit/network/v2/test_port.py +++ b/openstackclient/tests/unit/network/v2/test_port.py @@ -9,36 +9,36 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse from unittest import mock from unittest.mock import call +import uuid from osc_lib.cli import format_columns from osc_lib import exceptions -from osc_lib import utils from openstackclient.network.v2 import port from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes -from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit import utils as test_utils -LIST_FIELDS_TO_RETRIEVE = ('id', 'name', 'mac_address', 'fixed_ips', 'status') -LIST_FIELDS_TO_RETRIEVE_LONG = ('security_group_ids', 'device_owner', 'tags') +LIST_FIELDS_TO_RETRIEVE = ['id', 'name', 'mac_address', 'fixed_ips', 'status'] +LIST_FIELDS_TO_RETRIEVE_LONG = [ + 'security_groups', + 'device_owner', + 'tags', + 'trunk_details', +] class TestPort(network_fakes.TestNetworkV2): - def setUp(self): - super(TestPort, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects @staticmethod def _get_common_cols_data(fake_port): @@ -61,6 +61,8 @@ def _get_common_cols_data(fake_port): 'dns_name', 'extra_dhcp_opts', 'fixed_ips', + 'hardware_offload_type', + 'hints', 'id', 'ip_allocation', 'mac_address', @@ -77,6 +79,7 @@ def _get_common_cols_data(fake_port): 'security_group_ids', 'status', 'tags', + 'trusted', 'trunk_details', 'updated_at', ) @@ -100,6 +103,8 @@ def _get_common_cols_data(fake_port): fake_port.dns_name, format_columns.ListDictColumn(fake_port.extra_dhcp_opts), format_columns.ListDictColumn(fake_port.fixed_ips), + fake_port.hardware_offload_type, + fake_port.hints, fake_port.id, fake_port.ip_allocation, fake_port.mac_address, @@ -116,6 +121,7 @@ def _get_common_cols_data(fake_port): format_columns.ListColumn(fake_port.security_group_ids), fake_port.status, format_columns.ListColumn(fake_port.tags), + fake_port.trusted, fake_port.trunk_details, fake_port.updated_at, ) @@ -124,72 +130,89 @@ def _get_common_cols_data(fake_port): class TestCreatePort(TestPort): - _port = network_fakes.create_one_port() columns, data = TestPort._get_common_cols_data(_port) def setUp(self): - super(TestCreatePort, self).setUp() - - self.network.create_port = mock.Mock(return_value=self._port) - self.network.set_tags = mock.Mock(return_value=None) - fake_net = network_fakes.create_one_network({ - 'id': self._port.network_id, - }) - self.network.find_network = mock.Mock(return_value=fake_net) + super().setUp() + + self.network_client.create_port.return_value = self._port + self.network_client.set_tags.return_value = None + fake_net = network_fakes.create_one_network( + { + 'id': self._port.network_id, + } + ) + self.network_client.find_network.return_value = fake_net self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_subnet = mock.Mock(return_value=self.fake_subnet) - self.network.find_extension = mock.Mock(return_value=[]) + self.network_client.find_subnet.return_value = self.fake_subnet + + self.network_client.find_extension.return_value = [] # Get the command object to test - self.cmd = port.CreatePort(self.app, self.namespace) + self.cmd = port.CreatePort(self.app, None) def test_create_default_options(self): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'name': 'test-port', - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'name': 'test-port', + } + ) + self.assertFalse(self.network_client.set_tags.called) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_full_options(self): arglist = [ - '--mac-address', 'aa:aa:aa:aa:aa:aa', - '--fixed-ip', 'subnet=%s,ip-address=10.0.0.2' - % self.fake_subnet.id, - '--description', self._port.description, - '--device', 'deviceid', - '--device-owner', 'fakeowner', + '--mac-address', + 'aa:aa:aa:aa:aa:aa', + '--fixed-ip', + f'subnet={self.fake_subnet.id},ip-address=10.0.0.2', + '--description', + self._port.description, + '--device', + 'deviceid', + '--device-owner', + 'fakeowner', '--disable', - '--vnic-type', 'macvtap', - '--binding-profile', 'foo=bar', - '--binding-profile', 'foo2=bar2', - '--network', self._port.network_id, - '--dns-domain', 'example.org', - '--dns-name', '8.8.8.8', + '--vnic-type', + 'macvtap', + '--binding-profile', + 'foo=bar', + '--binding-profile', + 'foo2=bar2', + '--network', + self._port.network_id, + '--dns-domain', + 'example.org', + '--dns-name', + '8.8.8.8', 'test-port', - ] verifylist = [ ('mac_address', 'aa:aa:aa:aa:aa:aa'), ( 'fixed_ip', - [{'subnet': self.fake_subnet.id, 'ip-address': '10.0.0.2'}] + [{'subnet': self.fake_subnet.id, 'ip-address': '10.0.0.2'}], ), ('description', self._port.description), ('device', 'deviceid'), @@ -204,56 +227,73 @@ def test_create_full_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'mac_address': 'aa:aa:aa:aa:aa:aa', - 'fixed_ips': [{'subnet_id': self.fake_subnet.id, - 'ip_address': '10.0.0.2'}], - 'description': self._port.description, - 'device_id': 'deviceid', - 'device_owner': 'fakeowner', - 'admin_state_up': False, - 'binding:vnic_type': 'macvtap', - 'binding:profile': {'foo': 'bar', 'foo2': 'bar2'}, - 'network_id': self._port.network_id, - 'dns_domain': 'example.org', - 'dns_name': '8.8.8.8', - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'mac_address': 'aa:aa:aa:aa:aa:aa', + 'fixed_ips': [ + { + 'subnet_id': self.fake_subnet.id, + 'ip_address': '10.0.0.2', + } + ], + 'description': self._port.description, + 'device_id': 'deviceid', + 'device_owner': 'fakeowner', + 'admin_state_up': False, + 'binding:vnic_type': 'macvtap', + 'binding:profile': {'foo': 'bar', 'foo2': 'bar2'}, + 'network_id': self._port.network_id, + 'dns_domain': 'example.org', + 'dns_name': '8.8.8.8', + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_invalid_json_binding_profile(self): arglist = [ - '--network', self._port.network_id, - '--binding-profile', '{"parent_name":"fake_parent"', + '--network', + self._port.network_id, + '--binding-profile', + '{"parent_name":"fake_parent"', 'test-port', ] - self.assertRaises(argparse.ArgumentTypeError, - self.check_parser, - self.cmd, - arglist, - None) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) def test_create_invalid_key_value_binding_profile(self): arglist = [ - '--network', self._port.network_id, - '--binding-profile', 'key', + '--network', + self._port.network_id, + '--binding-profile', + 'key', 'test-port', ] - self.assertRaises(argparse.ArgumentTypeError, - self.check_parser, - self.cmd, - arglist, - None) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) def test_create_json_binding_profile(self): arglist = [ - '--network', self._port.network_id, - '--binding-profile', '{"parent_name":"fake_parent"}', - '--binding-profile', '{"tag":42}', + '--network', + self._port.network_id, + '--binding-profile', + '{"parent_name":"fake_parent"}', + '--binding-profile', + '{"tag":42}', 'test-port', ] verifylist = [ @@ -264,132 +304,162 @@ def test_create_json_binding_profile(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'binding:profile': {'parent_name': 'fake_parent', 'tag': 42}, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'binding:profile': {'parent_name': 'fake_parent', 'tag': 42}, + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_security_group(self): - secgroup = network_fakes.FakeSecurityGroup.create_one_security_group() - self.network.find_security_group = mock.Mock(return_value=secgroup) + secgroup = network_fakes.create_one_security_group() + self.network_client.find_security_group.return_value = secgroup + arglist = [ - '--network', self._port.network_id, - '--security-group', secgroup.id, + '--network', + self._port.network_id, + '--security-group', + secgroup.id, 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), - ('security_group', [secgroup.id]), + ('security_groups', [secgroup.id]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'security_group_ids': [secgroup.id], - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'security_group_ids': [secgroup.id], + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_port_with_dns_name(self): arglist = [ - '--network', self._port.network_id, - '--dns-name', '8.8.8.8', + '--network', + self._port.network_id, + '--dns-name', + '8.8.8.8', 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), ('dns_name', '8.8.8.8'), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'dns_name': '8.8.8.8', - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'dns_name': '8.8.8.8', + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_security_groups(self): - sg_1 = network_fakes.FakeSecurityGroup.create_one_security_group() - sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group() - self.network.find_security_group = mock.Mock(side_effect=[sg_1, sg_2]) + sg_1 = network_fakes.create_one_security_group() + sg_2 = network_fakes.create_one_security_group() + self.network_client.find_security_group.side_effect = [sg_1, sg_2] + arglist = [ - '--network', self._port.network_id, - '--security-group', sg_1.id, - '--security-group', sg_2.id, + '--network', + self._port.network_id, + '--security-group', + sg_1.id, + '--security-group', + sg_2.id, 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), - ('security_group', [sg_1.id, sg_2.id]), + ('security_groups', [sg_1.id, sg_2.id]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'security_group_ids': [sg_1.id, sg_2.id], - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'security_group_ids': [sg_1.id, sg_2.id], + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_no_security_groups(self): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, '--no-security-group', 'test-port', ] verifylist = [ ('network', self._port.network_id), ('enable', True), - ('no_security_group', True), + ('security_groups', []), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'security_group_ids': [], - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'security_group_ids': [], + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_no_fixed_ips(self): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, '--no-fixed-ip', 'test-port', ] @@ -401,55 +471,73 @@ def test_create_with_no_fixed_ips(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'fixed_ips': [], - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'fixed_ips': [], + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_port_with_allowed_address_pair_ipaddr(self): - pairs = [{'ip_address': '192.168.1.123'}, - {'ip_address': '192.168.1.45'}] + pairs = [ + {'ip_address': '192.168.1.123'}, + {'ip_address': '192.168.1.45'}, + ] arglist = [ - '--network', self._port.network_id, - '--allowed-address', 'ip-address=192.168.1.123', - '--allowed-address', 'ip-address=192.168.1.45', + '--network', + self._port.network_id, + '--allowed-address', + 'ip-address=192.168.1.123', + '--allowed-address', + 'ip-address=192.168.1.45', 'test-port', ] verifylist = [ ('network', self._port.network_id), ('enable', True), - ('allowed_address_pairs', [{'ip-address': '192.168.1.123'}, - {'ip-address': '192.168.1.45'}]), + ( + 'allowed_address_pairs', + [ + {'ip-address': '192.168.1.123'}, + {'ip-address': '192.168.1.45'}, + ], + ), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'allowed_address_pairs': pairs, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'allowed_address_pairs': pairs, + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_port_with_allowed_address_pair(self): - pairs = [{'ip_address': '192.168.1.123', - 'mac_address': 'aa:aa:aa:aa:aa:aa'}, - {'ip_address': '192.168.1.45', - 'mac_address': 'aa:aa:aa:aa:aa:b1'}] + pairs = [ + { + 'ip_address': '192.168.1.123', + 'mac_address': 'aa:aa:aa:aa:aa:aa', + }, + {'ip_address': '192.168.1.45', 'mac_address': 'aa:aa:aa:aa:aa:b1'}, + ] arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, '--allowed-address', 'ip-address=192.168.1.123,mac-address=aa:aa:aa:aa:aa:aa', '--allowed-address', @@ -459,62 +547,85 @@ def test_create_port_with_allowed_address_pair(self): verifylist = [ ('network', self._port.network_id), ('enable', True), - ('allowed_address_pairs', [{'ip-address': '192.168.1.123', - 'mac-address': 'aa:aa:aa:aa:aa:aa'}, - {'ip-address': '192.168.1.45', - 'mac-address': 'aa:aa:aa:aa:aa:b1'}]), + ( + 'allowed_address_pairs', + [ + { + 'ip-address': '192.168.1.123', + 'mac-address': 'aa:aa:aa:aa:aa:aa', + }, + { + 'ip-address': '192.168.1.45', + 'mac-address': 'aa:aa:aa:aa:aa:b1', + }, + ], + ), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'allowed_address_pairs': pairs, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'allowed_address_pairs': pairs, + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_port_with_qos(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + arglist = [ - '--network', self._port.network_id, - '--qos-policy', qos_policy.id, + '--network', + self._port.network_id, + '--qos-policy', + qos_policy.id, 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), ('qos_policy', qos_policy.id), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'qos_policy_id': qos_policy.id, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'qos_policy_id': qos_policy.id, + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_port_security_enabled(self): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, '--enable-port-security', 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), ('enable_port_security', True), ('name', 'test-port'), @@ -524,21 +635,27 @@ def test_create_port_security_enabled(self): self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'port_security_enabled': True, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'port_security_enabled': True, + 'name': 'test-port', + } + ) def test_create_port_security_disabled(self): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, '--disable-port-security', 'test-port', ] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), ('disable_port_security', True), ('name', 'test-port'), @@ -548,16 +665,19 @@ def test_create_port_security_disabled(self): self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'port_security_enabled': False, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'port_security_enabled': False, + 'name': 'test-port', + } + ) def _test_create_with_tag(self, add_tags=True, add_tags_in_post=True): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, 'test-port', ] if add_tags: @@ -565,7 +685,10 @@ def _test_create_with_tag(self, add_tags=True, add_tags_in_post=True): else: arglist += ['--no-tag'] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('enable', True), ('name', 'test-port'), ] @@ -574,10 +697,10 @@ def _test_create_with_tag(self, add_tags=True, add_tags_in_post=True): else: verifylist.append(('no_tag', True)) - self.network.find_extension = mock.Mock(return_value=add_tags_in_post) + self.network_client.find_extension.return_value = add_tags_in_post parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) args = { 'admin_state_up': True, @@ -589,31 +712,34 @@ def _test_create_with_tag(self, add_tags=True, add_tags_in_post=True): args['tags'] = sorted(['red', 'blue']) else: args['tags'] = [] - self.network.create_port.assert_called_once() + self.network_client.create_port.assert_called_once() # Now we need to verify if arguments to call create_port are as # expected, # But we can't simply use assert_called_once_with() method because # duplicates from 'tags' are removed with # list(set(parsed_args.tags)) and that don't quarantee order of # tags list which is used to call create_port(). - create_port_call_kwargs = self.network.create_port.call_args[1] + create_port_call_kwargs = ( + self.network_client.create_port.call_args[1] + ) create_port_call_kwargs['tags'] = sorted( - create_port_call_kwargs['tags']) + create_port_call_kwargs['tags'] + ) self.assertDictEqual(args, create_port_call_kwargs) else: - self.network.create_port.assert_called_once_with( + self.network_client.create_port.assert_called_once_with( admin_state_up=True, network_id=self._port.network_id, - name='test-port' + name='test-port', ) if add_tags: - self.network.set_tags.assert_called_once_with( - self._port, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self._port, test_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_tags(self): @@ -630,7 +756,8 @@ def test_create_with_no_tag_using_put(self): def _test_create_with_uplink_status_propagation(self, enable=True): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, 'test-port', ] if enable: @@ -638,7 +765,10 @@ def _test_create_with_uplink_status_propagation(self, enable=True): else: arglist += ['--disable-uplink-status-propagation'] verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('name', 'test-port'), ] if enable: @@ -647,16 +777,18 @@ def _test_create_with_uplink_status_propagation(self, enable=True): verifylist.append(('disable_uplink_status_propagation', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'propagate_uplink_status': enable, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'propagate_uplink_status': enable, + 'name': 'test-port', + } + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_uplink_status_propagation_enabled(self): @@ -666,33 +798,52 @@ def test_create_with_uplink_status_propagation_disabled(self): self._test_create_with_uplink_status_propagation(enable=False) def test_create_port_with_extra_dhcp_option(self): - extra_dhcp_options = [{'opt_name': 'classless-static-route', - 'opt_value': '169.254.169.254/32,22.2.0.2,' - '0.0.0.0/0,22.2.0.1', - 'ip_version': '4'}, - {'opt_name': 'dns-server', - 'opt_value': '240C::6666', - 'ip_version': '6'}] - arglist = [ - '--network', self._port.network_id, - '--extra-dhcp-option', 'name=classless-static-route,' - 'value=169.254.169.254/32,22.2.0.2,' - '0.0.0.0/0,22.2.0.1,' - 'ip-version=4', - '--extra-dhcp-option', 'name=dns-server,value=240C::6666,' - 'ip-version=6', + extra_dhcp_options = [ + { + 'opt_name': 'classless-static-route', + 'opt_value': '169.254.169.254/32,22.2.0.2,0.0.0.0/0,22.2.0.1', + 'ip_version': '4', + }, + { + 'opt_name': 'dns-server', + 'opt_value': '240C::6666', + 'ip_version': '6', + }, + ] + arglist = [ + '--network', + self._port.network_id, + '--extra-dhcp-option', + 'name=classless-static-route,' + 'value=169.254.169.254/32,22.2.0.2,' + '0.0.0.0/0,22.2.0.1,' + 'ip-version=4', + '--extra-dhcp-option', + 'name=dns-server,value=240C::6666,ip-version=6', 'test-port', ] verifylist = [ - ('network', self._port.network_id,), - ('extra_dhcp_options', [{'name': 'classless-static-route', - 'value': '169.254.169.254/32,22.2.0.2,' - '0.0.0.0/0,22.2.0.1', - 'ip-version': '4'}, - {'name': 'dns-server', - 'value': '240C::6666', - 'ip-version': '6'}]), + ( + 'network', + self._port.network_id, + ), + ( + 'extra_dhcp_options', + [ + { + 'name': 'classless-static-route', + 'value': '169.254.169.254/32,22.2.0.2,' + '0.0.0.0/0,22.2.0.1', + 'ip-version': '4', + }, + { + 'name': 'dns-server', + 'value': '240C::6666', + 'ip-version': '6', + }, + ], + ), ('name', 'test-port'), ] @@ -700,32 +851,38 @@ def test_create_port_with_extra_dhcp_option(self): self.cmd.take_action(parsed_args) - self.network.create_port.assert_called_once_with(**{ - 'admin_state_up': True, - 'network_id': self._port.network_id, - 'extra_dhcp_opts': extra_dhcp_options, - 'name': 'test-port', - }) + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'extra_dhcp_opts': extra_dhcp_options, + 'name': 'test-port', + } + ) def _test_create_with_numa_affinity_policy(self, policy=None): arglist = [ - '--network', self._port.network_id, + '--network', + self._port.network_id, 'test-port', ] if policy: - arglist += ['--numa-policy-%s' % policy] + arglist += [f'--numa-policy-{policy}'] numa_affinity_policy = None if not policy else policy verifylist = [ - ('network', self._port.network_id,), + ( + 'network', + self._port.network_id, + ), ('name', 'test-port'), ] if policy: - verifylist.append(('numa_policy_%s' % policy, True)) + verifylist.append((f'numa_policy_{policy}', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) create_args = { 'admin_state_up': True, @@ -734,9 +891,9 @@ def _test_create_with_numa_affinity_policy(self, policy=None): } if numa_affinity_policy: create_args['numa_affinity_policy'] = numa_affinity_policy - self.network.create_port.assert_called_once_with(**create_args) + self.network_client.create_port.assert_called_once_with(**create_args) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_numa_affinity_policy_required(self): @@ -753,20 +910,28 @@ def test_create_with_numa_affinity_policy_null(self): def test_create_with_device_profile(self): arglist = [ - '--network', self._port.network_id, - '--device-profile', 'cyborg_device_profile_1', + '--network', + self._port.network_id, + '--device-profile', + 'cyborg_device_profile_1', 'test-port', ] verifylist = [ - ('network', self._port.network_id,), - ('device_profile', self._port.device_profile,), + ( + 'network', + self._port.network_id, + ), + ( + 'device_profile', + self._port.device_profile, + ), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) create_args = { 'admin_state_up': True, @@ -774,24 +939,237 @@ def test_create_with_device_profile(self): 'name': 'test-port', 'device_profile': 'cyborg_device_profile_1', } - self.network.create_port.assert_called_once_with(**create_args) + self.network_client.create_port.assert_called_once_with(**create_args) + self.assertCountEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_hints_invalid_json(self): + arglist = [ + '--network', + self._port.network_id, + '--hint', + 'invalid json', + 'test-port', + ] + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) + + def test_create_hints_invalid_alias(self): + arglist = [ + '--network', + self._port.network_id, + '--hint', + 'invalid-alias=value', + 'test-port', + ] + verifylist = [ + ('network', self._port.network_id), + ('enable', True), + ('hint', {'invalid-alias': 'value'}), + ('name', 'test-port'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + def test_create_hints_invalid_value(self): + arglist = [ + '--network', + self._port.network_id, + '--hint', + 'ovs-tx-steering=invalid-value', + 'test-port', + ] + verifylist = [ + ('network', self._port.network_id), + ('enable', True), + ('hint', {'ovs-tx-steering': 'invalid-value'}), + ('name', 'test-port'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + def test_create_hints_valid_alias_value(self): + arglist = [ + '--network', + self._port.network_id, + '--hint', + 'ovs-tx-steering=hash', + 'test-port', + ] + verifylist = [ + ('network', self._port.network_id), + ('enable', True), + ('hint', {'ovs-tx-steering': 'hash'}), + ('name', 'test-port'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'hints': { + 'openvswitch': {'other_config': {'tx-steering': 'hash'}} + }, + 'name': 'test-port', + } + ) + + self.assertCountEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_hints_valid_json(self): + arglist = [ + '--network', + self._port.network_id, + '--hint', + '{"openvswitch": {"other_config": {"tx-steering": "hash"}}}', + 'test-port', + ] + verifylist = [ + ('network', self._port.network_id), + ('enable', True), + ( + 'hint', + {"openvswitch": {"other_config": {"tx-steering": "hash"}}}, + ), + ('name', 'test-port'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.network_client.create_port.assert_called_once_with( + **{ + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'hints': { + 'openvswitch': {'other_config': {'tx-steering': 'hash'}} + }, + 'name': 'test-port', + } + ) + + self.assertCountEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def _test_create_with_hardware_offload_type(self, hwol_type=None): + arglist = [ + '--network', + self._port.network_id, + 'test-port', + ] + if hwol_type: + arglist += ['--hardware-offload-type', hwol_type] + + hardware_offload_type = None if not hwol_type else hwol_type + verifylist = [ + ( + 'network', + self._port.network_id, + ), + ('name', 'test-port'), + ] + if hwol_type: + verifylist.append(('hardware_offload_type', hwol_type)) + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + create_args = { + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'name': 'test-port', + } + if hwol_type: + create_args['hardware_offload_type'] = hardware_offload_type + self.network_client.create_port.assert_called_once_with(**create_args) + self.assertEqual(set(self.columns), set(columns)) self.assertCountEqual(self.data, data) + def test_create_with_hardware_offload_type_switchdev(self): + self._test_create_with_hardware_offload_type(hwol_type='switchdev') -class TestDeletePort(TestPort): + def test_create_with_hardware_offload_type_null(self): + self._test_create_with_hardware_offload_type() + + def _test_create_with_trusted_field(self, trusted): + arglist = [ + '--network', + self._port.network_id, + 'test-port', + ] + if trusted: + arglist += ['--trusted'] + else: + arglist += ['--not-trusted'] + + verifylist = [ + ( + 'network', + self._port.network_id, + ), + ('name', 'test-port'), + ] + if trusted: + verifylist.append(('trusted', True)) + else: + verifylist.append(('trusted', False)) + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + create_args = { + 'admin_state_up': True, + 'network_id': self._port.network_id, + 'name': 'test-port', + } + create_args['trusted'] = trusted + self.network_client.create_port.assert_called_once_with(**create_args) + self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.data, data) + + def test_create_with_trusted_true(self): + self._test_create_with_trusted_field(True) + + def test_create_with_trusted_false(self): + self._test_create_with_trusted_field(False) + + +class TestDeletePort(TestPort): # Ports to delete. _ports = network_fakes.create_ports(count=2) def setUp(self): - super(TestDeletePort, self).setUp() + super().setUp() - self.network.delete_port = mock.Mock(return_value=None) - self.network.find_port = network_fakes.get_ports( - ports=self._ports) + self.network_client.delete_port.return_value = None + self.network_client.find_port = network_fakes.get_ports( + ports=self._ports + ) # Get the command object to test - self.cmd = port.DeletePort(self.app, self.namespace) + self.cmd = port.DeletePort(self.app, None) def test_port_delete(self): arglist = [ @@ -803,9 +1181,10 @@ def test_port_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.find_port.assert_called_once_with( - self._ports[0].name, ignore_missing=False) - self.network.delete_port.assert_called_once_with(self._ports[0]) + self.network_client.find_port.assert_called_once_with( + self._ports[0].name, ignore_missing=False + ) + self.network_client.delete_port.assert_called_once_with(self._ports[0]) self.assertIsNone(result) def test_multi_ports_delete(self): @@ -824,7 +1203,7 @@ def test_multi_ports_delete(self): calls = [] for p in self._ports: calls.append(call(p)) - self.network.delete_port.assert_has_calls(calls) + self.network_client.delete_port.assert_has_calls(calls) self.assertIsNone(result) def test_multi_ports_delete_with_exception(self): @@ -833,15 +1212,12 @@ def test_multi_ports_delete_with_exception(self): 'unexist_port', ] verifylist = [ - ('port', - [self._ports[0].name, 'unexist_port']), + ('port', [self._ports[0].name, 'unexist_port']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._ports[0], exceptions.CommandError] - self.network.find_port = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_port.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -849,28 +1225,57 @@ def test_multi_ports_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 ports failed to delete.', str(e)) - self.network.find_port.assert_any_call( - self._ports[0].name, ignore_missing=False) - self.network.find_port.assert_any_call( - 'unexist_port', ignore_missing=False) - self.network.delete_port.assert_called_once_with( - self._ports[0] + self.network_client.find_port.assert_any_call( + self._ports[0].name, ignore_missing=False ) + self.network_client.find_port.assert_any_call( + 'unexist_port', ignore_missing=False + ) + self.network_client.delete_port.assert_called_once_with(self._ports[0]) -class TestListPort(TestPort): - - _ports = network_fakes.create_ports(count=3) +class TestListPort(compute_fakes.FakeClientMixin, TestPort): + _project = identity_fakes.FakeProject.create_one_project() + _networks = network_fakes.create_networks(count=3) + _sport1 = network_fakes.create_one_port( + attrs={'project_id': _project.id, 'network_id': _networks[1]['id']} + ) + _sport2 = network_fakes.create_one_port( + attrs={'project_id': _project.id, 'network_id': _networks[2]['id']} + ) + _trunk_details = { + 'trunk_id': str(uuid.uuid4()), + 'sub_ports': [ + { + 'segmentation_id': 100, + 'segmentation_type': 'vlan', + 'port_id': _sport1.id, + }, + { + 'segmentation_id': 102, + 'segmentation_type': 'vlan', + 'port_id': _sport2.id, + }, + ], + } + _pport = network_fakes.create_one_port( + attrs={ + 'project_id': _project.id, + 'network_id': _networks[0]['id'], + 'trunk_details': _trunk_details, + } + ) + _ports = (_pport, _sport1, _sport2) - columns = ( + columns = [ 'ID', 'Name', 'MAC Address', 'Fixed IP Addresses', 'Status', - ) + ] - columns_long = ( + columns_long = [ 'ID', 'Name', 'MAC Address', @@ -879,46 +1284,56 @@ class TestListPort(TestPort): 'Security Groups', 'Device Owner', 'Tags', - ) + 'Trunk subports', + ] data = [] for prt in _ports: - data.append(( - prt.id, - prt.name, - prt.mac_address, - format_columns.ListDictColumn(prt.fixed_ips), - prt.status, - )) + data.append( + ( + prt.id, + prt.name, + prt.mac_address, + format_columns.ListDictColumn(prt.fixed_ips), + prt.status, + ) + ) data_long = [] for prt in _ports: - data_long.append(( - prt.id, - prt.name, - prt.mac_address, - format_columns.ListDictColumn(prt.fixed_ips), - prt.status, - format_columns.ListColumn(prt.security_group_ids), - prt.device_owner, - format_columns.ListColumn(prt.tags), - )) + data_long.append( + ( + prt.id, + prt.name, + prt.mac_address, + format_columns.ListDictColumn(prt.fixed_ips), + prt.status, + format_columns.ListColumn(prt.security_group_ids), + prt.device_owner, + format_columns.ListColumn(prt.tags), + port.SubPortColumn(prt.trunk_details), + ) + ) def setUp(self): - super(TestListPort, self).setUp() + super().setUp() + + self.network_client.ports.return_value = self._ports + fake_router = network_fakes.create_one_router( + { + 'id': 'fake-router-id', + } + ) + fake_network = network_fakes.create_one_network( + { + 'id': 'fake-network-id', + } + ) + self.network_client.find_router.return_value = fake_router + self.network_client.find_network.return_value = fake_network # Get the command object to test - self.cmd = port.ListPort(self.app, self.namespace) - self.network.ports = mock.Mock(return_value=self._ports) - fake_router = network_fakes.FakeRouter.create_one_router({ - 'id': 'fake-router-id', - }) - fake_network = network_fakes.create_one_network({ - 'id': 'fake-network-id', - }) - self.network.find_router = mock.Mock(return_value=fake_router) - self.network.find_network = mock.Mock(return_value=fake_network) - self.app.client_manager.compute = mock.Mock() + self.cmd = port.ListPort(self.app, None) def test_port_list_no_options(self): arglist = [] @@ -928,38 +1343,40 @@ def test_port_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with( - fields=LIST_FIELDS_TO_RETRIEVE) + self.network_client.ports.assert_called_once_with( + fields=LIST_FIELDS_TO_RETRIEVE + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_router_opt(self): arglist = [ - '--router', 'fake-router-name', + '--router', + 'fake-router-name', ] - verifylist = [ - ('router', 'fake-router-name') - ] + verifylist = [('router', 'fake-router-name')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'device_id': 'fake-router-id', - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'device_id': 'fake-router-id', + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) - @mock.patch.object(utils, 'find_resource') - def test_port_list_with_server_option(self, mock_find): - fake_server = compute_fakes.FakeServer.create_one_server() - mock_find.return_value = fake_server + def test_port_list_with_server_option(self): + fake_server = compute_fakes.create_one_server() + self.compute_client.find_server.return_value = fake_server arglist = [ - '--server', 'fake-server-name', + '--server', + 'fake-server-name', ] verifylist = [ ('server', 'fake-server-name'), @@ -967,161 +1384,177 @@ def test_port_list_with_server_option(self, mock_find): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with( - device_id=fake_server.id, - fields=LIST_FIELDS_TO_RETRIEVE) - mock_find.assert_called_once_with(mock.ANY, 'fake-server-name') + self.network_client.ports.assert_called_once_with( + device_id=fake_server.id, fields=LIST_FIELDS_TO_RETRIEVE + ) + self.compute_client.find_server.aassert_called_once_with( + mock.ANY, 'fake-server-name' + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_device_id_opt(self): arglist = [ - '--device-id', self._ports[0].device_id, + '--device-id', + self._ports[0].device_id, ] - verifylist = [ - ('device_id', self._ports[0].device_id) - ] + verifylist = [('device_id', self._ports[0].device_id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'device_id': self._ports[0].device_id, - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'device_id': self._ports[0].device_id, + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_device_owner_opt(self): arglist = [ - '--device-owner', self._ports[0].device_owner, + '--device-owner', + self._ports[0].device_owner, ] - verifylist = [ - ('device_owner', self._ports[0].device_owner) - ] + verifylist = [('device_owner', self._ports[0].device_owner)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'device_owner': self._ports[0].device_owner, - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'device_owner': self._ports[0].device_owner, + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_all_opt(self): arglist = [ - '--device-owner', self._ports[0].device_owner, - '--router', 'fake-router-name', - '--network', 'fake-network-name', - '--mac-address', self._ports[0].mac_address, + '--device-owner', + self._ports[0].device_owner, + '--router', + 'fake-router-name', + '--network', + 'fake-network-name', + '--mac-address', + self._ports[0].mac_address, ] verifylist = [ ('device_owner', self._ports[0].device_owner), ('router', 'fake-router-name'), ('network', 'fake-network-name'), - ('mac_address', self._ports[0].mac_address) + ('mac_address', self._ports[0].mac_address), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'device_owner': self._ports[0].device_owner, - 'device_id': 'fake-router-id', - 'network_id': 'fake-network-id', - 'mac_address': self._ports[0].mac_address, - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'device_owner': self._ports[0].device_owner, + 'device_id': 'fake-router-id', + 'network_id': 'fake-network-id', + 'mac_address': self._ports[0].mac_address, + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_mac_address_opt(self): arglist = [ - '--mac-address', self._ports[0].mac_address, + '--mac-address', + self._ports[0].mac_address, ] - verifylist = [ - ('mac_address', self._ports[0].mac_address) - ] + verifylist = [('mac_address', self._ports[0].mac_address)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'mac_address': self._ports[0].mac_address, - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'mac_address': self._ports[0].mac_address, + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_fixed_ip_opt_ip_address(self): ip_address = self._ports[0].fixed_ips[0]['ip_address'] arglist = [ - '--fixed-ip', "ip-address=%s" % ip_address, - ] - verifylist = [ - ('fixed_ip', [{'ip-address': ip_address}]) + '--fixed-ip', + f"ip-address={ip_address}", ] + verifylist = [('fixed_ip', [{'ip-address': ip_address}])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'fixed_ips': ['ip_address=%s' % ip_address], - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'fixed_ips': [f'ip_address={ip_address}'], + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_fixed_ip_opt_ip_address_substr(self): ip_address_ss = self._ports[0].fixed_ips[0]['ip_address'][:-1] arglist = [ - '--fixed-ip', "ip-substring=%s" % ip_address_ss, - ] - verifylist = [ - ('fixed_ip', [{'ip-substring': ip_address_ss}]) + '--fixed-ip', + f"ip-substring={ip_address_ss}", ] + verifylist = [('fixed_ip', [{'ip-substring': ip_address_ss}])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'fixed_ips': ['ip_address_substr=%s' % ip_address_ss], - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'fixed_ips': [f'ip_address_substr={ip_address_ss}'], + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_fixed_ip_opt_subnet_id(self): subnet_id = self._ports[0].fixed_ips[0]['subnet_id'] arglist = [ - '--fixed-ip', "subnet=%s" % subnet_id, - ] - verifylist = [ - ('fixed_ip', [{'subnet': subnet_id}]) + '--fixed-ip', + f"subnet={subnet_id}", ] + verifylist = [('fixed_ip', [{'subnet': subnet_id}])] self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet( - {'id': subnet_id}) - self.network.find_subnet = mock.Mock(return_value=self.fake_subnet) + {'id': subnet_id} + ) + self.network_client.find_subnet.return_value = self.fake_subnet + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'fixed_ips': ['subnet_id=%s' % subnet_id], - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'fixed_ips': [f'subnet_id={subnet_id}'], + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -1129,25 +1562,30 @@ def test_port_list_fixed_ip_opts(self): subnet_id = self._ports[0].fixed_ips[0]['subnet_id'] ip_address = self._ports[0].fixed_ips[0]['ip_address'] arglist = [ - '--fixed-ip', "subnet=%s,ip-address=%s" % (subnet_id, - ip_address) + '--fixed-ip', + f"subnet={subnet_id},ip-address={ip_address}", ] verifylist = [ - ('fixed_ip', [{'subnet': subnet_id, - 'ip-address': ip_address}]) + ('fixed_ip', [{'subnet': subnet_id, 'ip-address': ip_address}]) ] self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet( - {'id': subnet_id}) - self.network.find_subnet = mock.Mock(return_value=self.fake_subnet) + {'id': subnet_id} + ) + self.network_client.find_subnet.return_value = self.fake_subnet + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'fixed_ips': ['subnet_id=%s' % subnet_id, - 'ip_address=%s' % ip_address], - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'fixed_ips': [ + f'subnet_id={subnet_id}', + f'ip_address={ip_address}', + ], + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -1155,27 +1593,35 @@ def test_port_list_fixed_ips(self): subnet_id = self._ports[0].fixed_ips[0]['subnet_id'] ip_address = self._ports[0].fixed_ips[0]['ip_address'] arglist = [ - '--fixed-ip', "subnet=%s" % subnet_id, - '--fixed-ip', "ip-address=%s" % ip_address, + '--fixed-ip', + f"subnet={subnet_id}", + '--fixed-ip', + f"ip-address={ip_address}", ] verifylist = [ - ('fixed_ip', [{'subnet': subnet_id}, - {'ip-address': ip_address}]) + ('fixed_ip', [{'subnet': subnet_id}, {'ip-address': ip_address}]) ] - self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet({ - 'id': subnet_id, - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) - self.network.find_subnet = mock.Mock(return_value=self.fake_subnet) + self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet( + { + 'id': subnet_id, + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) + self.network_client.find_subnet.return_value = self.fake_subnet + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with(**{ - 'fixed_ips': ['subnet_id=%s' % subnet_id, - 'ip_address=%s' % ip_address], - 'fields': LIST_FIELDS_TO_RETRIEVE, - }) + self.network_client.ports.assert_called_once_with( + **{ + 'fixed_ips': [ + f'subnet_id={subnet_id}', + f'ip_address={ip_address}', + ], + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -1192,14 +1638,16 @@ def test_list_port_with_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with( - fields=LIST_FIELDS_TO_RETRIEVE + LIST_FIELDS_TO_RETRIEVE_LONG) + self.network_client.ports.assert_called_once_with( + fields=LIST_FIELDS_TO_RETRIEVE + LIST_FIELDS_TO_RETRIEVE_LONG + ) self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) def test_port_list_host(self): arglist = [ - '--host', 'foobar', + '--host', + 'foobar', ] verifylist = [ ('host', 'foobar'), @@ -1212,7 +1660,7 @@ def test_port_list_host(self): 'fields': LIST_FIELDS_TO_RETRIEVE, } - self.network.ports.assert_called_once_with(**filters) + self.network_client.ports.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -1220,7 +1668,8 @@ def test_port_list_project(self): project = identity_fakes.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -1233,7 +1682,7 @@ def test_port_list_project(self): 'fields': LIST_FIELDS_TO_RETRIEVE, } - self.network.ports.assert_called_once_with(**filters) + self.network_client.ports.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -1241,8 +1690,10 @@ def test_port_list_project_domain(self): project = identity_fakes.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -1256,14 +1707,15 @@ def test_port_list_project_domain(self): 'fields': LIST_FIELDS_TO_RETRIEVE, } - self.network.ports.assert_called_once_with(**filters) + self.network_client.ports.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_name(self): test_name = "fakename" arglist = [ - '--name', test_name, + '--name', + test_name, ] verifylist = [ ('name', test_name), @@ -1276,16 +1728,20 @@ def test_port_list_name(self): 'fields': LIST_FIELDS_TO_RETRIEVE, } - self.network.ports.assert_called_once_with(**filters) + self.network_client.ports.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -1296,20 +1752,24 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.ports.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white', - 'fields': LIST_FIELDS_TO_RETRIEVE} + self.network_client.ports.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + 'fields': LIST_FIELDS_TO_RETRIEVE, + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_port_list_security_group(self): arglist = [ - '--security-group', 'sg-id1', - '--security-group', 'sg-id2', + '--security-group', + 'sg-id1', + '--security-group', + 'sg-id2', ] verifylist = [ ('security_groups', ['sg-id1', 'sg-id2']), @@ -1318,29 +1778,52 @@ def test_port_list_security_group(self): columns, data = self.cmd.take_action(parsed_args) filters = { - 'security_groups': ['sg-id1', 'sg-id2'], + 'security_group_ids': ['sg-id1', 'sg-id2'], 'fields': LIST_FIELDS_TO_RETRIEVE, } - self.network.ports.assert_called_once_with(**filters) + self.network_client.ports.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) + def test_port_list_status(self): + arglist = [ + '--status', + 'ACTIVE', + ] + verifylist = [ + ('status', 'ACTIVE'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) -class TestSetPort(TestPort): + columns, data = self.cmd.take_action(parsed_args) + filters = { + 'status': 'ACTIVE', + 'fields': LIST_FIELDS_TO_RETRIEVE, + } + + self.network_client.ports.assert_called_once_with(**filters) + self.assertEqual(self.columns, columns) + self.assertEqual( + self.data, + list(data), + ) + +class TestSetPort(TestPort): _port = network_fakes.create_one_port({'tags': ['green', 'red']}) def setUp(self): - super(TestSetPort, self).setUp() + super().setUp() self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_subnet = mock.Mock(return_value=self.fake_subnet) - self.network.find_port = mock.Mock(return_value=self._port) - self.network.update_port = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.find_subnet.return_value = self.fake_subnet + + self.network_client.find_port.return_value = self._port + self.network_client.update_port.return_value = None + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = port.SetPort(self.app, self.namespace) + self.cmd = port.SetPort(self.app, None) def test_set_port_defaults(self): arglist = [ @@ -1352,16 +1835,18 @@ def test_set_port_defaults(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_port.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_port.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def test_set_port_fixed_ip(self): _testport = network_fakes.create_one_port( - {'fixed_ips': [{'ip_address': '0.0.0.1'}]}) - self.network.find_port = mock.Mock(return_value=_testport) + {'fixed_ips': [{'ip_address': '0.0.0.1'}]} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--fixed-ip', 'ip-address=10.0.0.12', + '--fixed-ip', + 'ip-address=10.0.0.12', _testport.name, ] verifylist = [ @@ -1377,21 +1862,25 @@ def test_set_port_fixed_ip(self): {'ip_address': '10.0.0.12'}, ], } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_fixed_ip_clear(self): _testport = network_fakes.create_one_port( - {'fixed_ips': [{'ip_address': '0.0.0.1'}]}) - self.network.find_port = mock.Mock(return_value=_testport) + {'fixed_ips': [{'ip_address': '0.0.0.1'}]} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--fixed-ip', 'ip-address=10.0.0.12', + '--fixed-ip', + 'ip-address=10.0.0.12', '--no-fixed-ip', _testport.name, ] verifylist = [ ('fixed_ip', [{'ip-address': '10.0.0.12'}]), - ('no_fixed_ip', True) + ('no_fixed_ip', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1401,12 +1890,15 @@ def test_set_port_fixed_ip_clear(self): {'ip_address': '10.0.0.12'}, ], } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_dns_name(self): arglist = [ - '--dns-name', '8.8.8.8', + '--dns-name', + '8.8.8.8', self._port.name, ] verifylist = [ @@ -1420,37 +1912,44 @@ def test_set_port_dns_name(self): attrs = { 'dns_name': '8.8.8.8', } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_overwrite_binding_profile(self): _testport = network_fakes.create_one_port( - {'binding_profile': {'lok_i': 'visi_on'}}) - self.network.find_port = mock.Mock(return_value=_testport) + {'binding_profile': {'lok_i': 'visi_on'}} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--binding-profile', 'lok_i=than_os', + '--binding-profile', + 'lok_i=than_os', '--no-binding-profile', _testport.name, ] verifylist = [ ('binding_profile', {'lok_i': 'than_os'}), - ('no_binding_profile', True) + ('no_binding_profile', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { - 'binding:profile': - {'lok_i': 'than_os'}, + 'binding:profile': {'lok_i': 'than_os'}, } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_overwrite_mac_address(self): _testport = network_fakes.create_one_port( - {'mac_address': '11:22:33:44:55:66'}) - self.network.find_port = mock.Mock(return_value=_testport) + {'mac_address': '11:22:33:44:55:66'} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--mac-address', '66:55:44:33:22:11', + '--mac-address', + '66:55:44:33:22:11', _testport.name, ] verifylist = [ @@ -1461,7 +1960,9 @@ def test_overwrite_mac_address(self): attrs = { 'mac_address': '66:55:44:33:22:11', } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_this(self): @@ -1486,17 +1987,24 @@ def test_set_port_this(self): 'binding:profile': {}, 'fixed_ips': [], } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_that(self): arglist = [ - '--description', 'newDescription', + '--description', + 'newDescription', '--enable', - '--vnic-type', 'macvtap', - '--binding-profile', 'foo=bar', - '--host', 'binding-host-id-xxxx', - '--name', 'newName', + '--vnic-type', + 'macvtap', + '--binding-profile', + 'foo=bar', + '--host', + 'binding-host-id-xxxx', + '--name', + 'newName', self._port.name, ] verifylist = [ @@ -1520,35 +2028,45 @@ def test_set_port_that(self): 'description': 'newDescription', 'name': 'newName', } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_invalid_json_binding_profile(self): arglist = [ - '--binding-profile', '{"parent_name"}', + '--binding-profile', + '{"parent_name"}', 'test-port', ] - self.assertRaises(argparse.ArgumentTypeError, - self.check_parser, - self.cmd, - arglist, - None) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) def test_set_port_invalid_key_value_binding_profile(self): arglist = [ - '--binding-profile', 'key', + '--binding-profile', + 'key', 'test-port', ] - self.assertRaises(argparse.ArgumentTypeError, - self.check_parser, - self.cmd, - arglist, - None) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) def test_set_port_mixed_binding_profile(self): arglist = [ - '--binding-profile', 'foo=bar', - '--binding-profile', '{"foo2": "bar2"}', + '--binding-profile', + 'foo=bar', + '--binding-profile', + '{"foo2": "bar2"}', self._port.name, ] verifylist = [ @@ -1562,18 +2080,21 @@ def test_set_port_mixed_binding_profile(self): attrs = { 'binding:profile': {'foo': 'bar', 'foo2': 'bar2'}, } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_security_group(self): - sg = network_fakes.FakeSecurityGroup.create_one_security_group() - self.network.find_security_group = mock.Mock(return_value=sg) + sg = network_fakes.create_one_security_group() + self.network_client.find_security_group.return_value = sg arglist = [ - '--security-group', sg.id, + '--security-group', + sg.id, self._port.name, ] verifylist = [ - ('security_group', [sg.id]), + ('security_groups', [sg.id]), ('port', self._port.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1582,24 +2103,30 @@ def test_set_port_security_group(self): attrs = { 'security_group_ids': [sg.id], } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_security_group_append(self): - sg_1 = network_fakes.FakeSecurityGroup.create_one_security_group() - sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group() - sg_3 = network_fakes.FakeSecurityGroup.create_one_security_group() - self.network.find_security_group = mock.Mock(side_effect=[sg_2, sg_3]) + sg_1 = network_fakes.create_one_security_group() + sg_2 = network_fakes.create_one_security_group() + sg_3 = network_fakes.create_one_security_group() + self.network_client.find_security_group.side_effect = [sg_2, sg_3] + _testport = network_fakes.create_one_port( - {'security_group_ids': [sg_1.id]}) - self.network.find_port = mock.Mock(return_value=_testport) + {'security_group_ids': [sg_1.id]} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--security-group', sg_2.id, - '--security-group', sg_3.id, + '--security-group', + sg_2.id, + '--security-group', + sg_3.id, _testport.name, ] verifylist = [ - ('security_group', [sg_2.id, sg_3.id]), + ('security_groups', [sg_2.id, sg_3.id]), ('port', _testport.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1608,7 +2135,9 @@ def test_set_port_security_group_append(self): attrs = { 'security_group_ids': [sg_1.id, sg_2.id, sg_3.id], } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_security_group_clear(self): @@ -1626,24 +2155,28 @@ def test_set_port_security_group_clear(self): attrs = { 'security_group_ids': [], } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_security_group_replace(self): - sg1 = network_fakes.FakeSecurityGroup.create_one_security_group() - sg2 = network_fakes.FakeSecurityGroup.create_one_security_group() + sg1 = network_fakes.create_one_security_group() + sg2 = network_fakes.create_one_security_group() _testport = network_fakes.create_one_port( - {'security_group_ids': [sg1.id]}) - self.network.find_port = mock.Mock(return_value=_testport) - self.network.find_security_group = mock.Mock(return_value=sg2) + {'security_group_ids': [sg1.id]} + ) + self.network_client.find_port.return_value = _testport + self.network_client.find_security_group.return_value = sg2 arglist = [ - '--security-group', sg2.id, + '--security-group', + sg2.id, '--no-security-group', _testport.name, ] verifylist = [ - ('security_group', [sg2.id]), - ('no_security_group', True) + ('security_groups', [sg2.id]), + ('no_security_group', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1651,12 +2184,15 @@ def test_set_port_security_group_replace(self): attrs = { 'security_group_ids': [sg2.id], } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_allowed_address_pair(self): arglist = [ - '--allowed-address', 'ip-address=192.168.1.123', + '--allowed-address', + 'ip-address=192.168.1.123', self._port.name, ] verifylist = [ @@ -1670,15 +2206,19 @@ def test_set_port_allowed_address_pair(self): attrs = { 'allowed_address_pairs': [{'ip_address': '192.168.1.123'}], } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_append_allowed_address_pair(self): _testport = network_fakes.create_one_port( - {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]}) - self.network.find_port = mock.Mock(return_value=_testport) + {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--allowed-address', 'ip-address=192.168.1.45', + '--allowed-address', + 'ip-address=192.168.1.45', _testport.name, ] verifylist = [ @@ -1690,18 +2230,24 @@ def test_set_port_append_allowed_address_pair(self): result = self.cmd.take_action(parsed_args) attrs = { - 'allowed_address_pairs': [{'ip_address': '192.168.1.123'}, - {'ip_address': '192.168.1.45'}], + 'allowed_address_pairs': [ + {'ip_address': '192.168.1.123'}, + {'ip_address': '192.168.1.45'}, + ], } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_overwrite_allowed_address_pair(self): _testport = network_fakes.create_one_port( - {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]}) - self.network.find_port = mock.Mock(return_value=_testport) + {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]} + ) + self.network_client.find_port.return_value = _testport arglist = [ - '--allowed-address', 'ip-address=192.168.1.45', + '--allowed-address', + 'ip-address=192.168.1.45', '--no-allowed-address', _testport.name, ] @@ -1717,7 +2263,9 @@ def test_set_port_overwrite_allowed_address_pair(self): attrs = { 'allowed_address_pairs': [{'ip_address': '192.168.1.45'}], } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_no_allowed_address_pairs(self): @@ -1736,17 +2284,19 @@ def test_set_port_no_allowed_address_pairs(self): attrs = { 'allowed_address_pairs': [], } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_extra_dhcp_option(self): arglist = [ - '--extra-dhcp-option', 'name=foo,value=bar', + '--extra-dhcp-option', + 'name=foo,value=bar', self._port.name, ] verifylist = [ - ('extra_dhcp_options', [{'name': 'foo', - 'value': 'bar'}]), + ('extra_dhcp_options', [{'name': 'foo', 'value': 'bar'}]), ('port', self._port.name), ] @@ -1754,10 +2304,11 @@ def test_set_port_extra_dhcp_option(self): result = self.cmd.take_action(parsed_args) attrs = { - 'extra_dhcp_opts': [{'opt_name': 'foo', - 'opt_value': 'bar'}], + 'extra_dhcp_opts': [{'opt_name': 'foo', 'opt_value': 'bar'}], } - self.network.update_port.assert_called_once_with(self._port, **attrs) + self.network_client.update_port.assert_called_once_with( + self._port, **attrs + ) self.assertIsNone(result) def test_set_port_security_enabled(self): @@ -1767,16 +2318,22 @@ def test_set_port_security_enabled(self): ] verifylist = [ ('enable_port_security', True), - ('port', self._port.id,) + ( + 'port', + self._port.id, + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.update_port.assert_called_once_with(self._port, **{ - 'port_security_enabled': True, - }) + self.network_client.update_port.assert_called_once_with( + self._port, + **{ + 'port_security_enabled': True, + }, + ) def test_set_port_security_disabled(self): arglist = [ @@ -1785,25 +2342,32 @@ def test_set_port_security_disabled(self): ] verifylist = [ ('disable_port_security', True), - ('port', self._port.id,) + ( + 'port', + self._port.id, + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.update_port.assert_called_once_with(self._port, **{ - 'port_security_enabled': False, - }) + self.network_client.update_port.assert_called_once_with( + self._port, + **{ + 'port_security_enabled': False, + }, + ) def test_set_port_with_qos(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) - _testport = network_fakes.create_one_port( - {'qos_policy_id': None}) - self.network.find_port = mock.Mock(return_value=_testport) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + + _testport = network_fakes.create_one_port({'qos_policy_id': None}) + self.network_client.find_port.return_value = _testport arglist = [ - '--qos-policy', qos_policy.id, + '--qos-policy', + qos_policy.id, _testport.name, ] verifylist = [ @@ -1817,15 +2381,17 @@ def test_set_port_with_qos(self): attrs = { 'qos_policy_id': qos_policy.id, } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_data_plane_status(self): - _testport = network_fakes.create_one_port( - {'data_plane_status': None}) - self.network.find_port = mock.Mock(return_value=_testport) + _testport = network_fakes.create_one_port({'data_plane_status': None}) + self.network_client.find_port.return_value = _testport arglist = [ - '--data-plane-status', 'ACTIVE', + '--data-plane-status', + 'ACTIVE', _testport.name, ] verifylist = [ @@ -1840,19 +2406,24 @@ def test_set_port_data_plane_status(self): 'data_plane_status': 'ACTIVE', } - self.network.update_port.assert_called_once_with(_testport, **attrs) + self.network_client.update_port.assert_called_once_with( + _testport, **attrs + ) self.assertIsNone(result) def test_set_port_invalid_data_plane_status_value(self): arglist = [ - '--data-plane-status', 'Spider-Man', + '--data-plane-status', + 'Spider-Man', 'test-port', ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - None) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) def _test_set_tags(self, with_tags=True): if with_tags: @@ -1864,16 +2435,15 @@ def _test_set_tags(self, with_tags=True): verifylist = [('no_tag', True)] expected_args = [] arglist.append(self._port.name) - verifylist.append( - ('port', self._port.name)) + verifylist.append(('port', self._port.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_port.called) - self.network.set_tags.assert_called_once_with( - self._port, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_port.called) + self.network_client.set_tags.assert_called_once_with( + self._port, test_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -1884,20 +2454,24 @@ def test_set_with_no_tag(self): def _test_create_with_numa_affinity_policy(self, policy): arglist = [ - '--numa-policy-%s' % policy, + f'--numa-policy-{policy}', self._port.id, ] verifylist = [ - ('numa_policy_%s' % policy, True), - ('port', self._port.id,) + (f'numa_policy_{policy}', True), + ( + 'port', + self._port.id, + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - self.network.update_port.assert_called_once_with( - self._port, **{'numa_affinity_policy': policy}) + self.network_client.update_port.assert_called_once_with( + self._port, **{'numa_affinity_policy': policy} + ) def test_create_with_numa_affinity_policy_required(self): self._test_create_with_numa_affinity_policy('required') @@ -1908,27 +2482,202 @@ def test_create_with_numa_affinity_policy_preferred(self): def test_create_with_numa_affinity_policy_legacy(self): self._test_create_with_numa_affinity_policy('legacy') + def test_set_hints_invalid_json(self): + arglist = [ + '--network', + self._port.network_id, + '--hint', + 'invalid json', + 'test-port', + ] + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + None, + ) + + def test_set_hints_invalid_alias(self): + arglist = [ + '--hint', + 'invalid-alias=value', + 'test-port', + ] + verifylist = [ + ('hint', {'invalid-alias': 'value'}), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) -class TestShowPort(TestPort): + def test_set_hints_invalid_value(self): + arglist = [ + '--hint', + 'ovs-tx-steering=invalid-value', + 'test-port', + ] + verifylist = [ + ('hint', {'ovs-tx-steering': 'invalid-value'}), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + def test_set_hints_valid_alias_value(self): + testport = network_fakes.create_one_port() + self.network_client.find_port.return_value = testport + self.network_client.find_extension.return_value = [ + 'port-hints', + 'port-hint-ovs-tx-steering', + ] + + arglist = [ + '--hint', + 'ovs-tx-steering=hash', + testport.name, + ] + verifylist = [ + ('hint', {'ovs-tx-steering': 'hash'}), + ('port', testport.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.network_client.update_port.assert_called_once_with( + testport, + **{ + 'hints': { + 'openvswitch': {'other_config': {'tx-steering': 'hash'}} + } + }, + ) + self.assertIsNone(result) + + def test_set_hints_valid_json(self): + testport = network_fakes.create_one_port() + self.network_client.find_port.return_value = testport + self.network_client.find_extension.return_value = [ + 'port-hints', + 'port-hint-ovs-tx-steering', + ] + + arglist = [ + '--hint', + '{"openvswitch": {"other_config": {"tx-steering": "hash"}}}', + testport.name, + ] + verifylist = [ + ( + 'hint', + {"openvswitch": {"other_config": {"tx-steering": "hash"}}}, + ), + ('port', testport.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.network_client.update_port.assert_called_once_with( + testport, + **{ + 'hints': { + 'openvswitch': {'other_config': {'tx-steering': 'hash'}} + } + }, + ) + self.assertIsNone(result) + + def _test_set_trusted_field(self, trusted): + arglist = [self._port.id] + if trusted: + arglist += ['--trusted'] + else: + arglist += ['--not-trusted'] + + verifylist = [ + ('port', self._port.id), + ] + if trusted: + verifylist.append(('trusted', True)) + else: + verifylist.append(('trusted', False)) + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.network_client.update_port.assert_called_once_with( + self._port, **{'trusted': trusted} + ) + self.assertIsNone(result) + + def test_set_trusted_true(self): + self._test_set_trusted_field(True) + + def test_set_trusted_false(self): + self._test_set_trusted_field(False) + + def _test_set_uplink_status_propagation(self, uspropagation): + arglist = [self._port.id] + if uspropagation: + arglist += ['--enable-uplink-status-propagation'] + else: + arglist += ['--disable-uplink-status-propagation'] + + verifylist = [ + ('port', self._port.id), + ] + if uspropagation: + verifylist.append(('enable_uplink_status_propagation', True)) + else: + verifylist.append(('enable_uplink_status_propagation', False)) + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.network_client.update_port.assert_called_once_with( + self._port, **{'propagate_uplink_status': uspropagation} + ) + self.assertIsNone(result) + + def test_set_uplink_status_propagation_true(self): + self._test_set_uplink_status_propagation(True) + + def test_set_uplink_status_propagation_false(self): + self._test_set_uplink_status_propagation(False) + +class TestShowPort(TestPort): # The port to show. _port = network_fakes.create_one_port() columns, data = TestPort._get_common_cols_data(_port) def setUp(self): - super(TestShowPort, self).setUp() + super().setUp() - self.network.find_port = mock.Mock(return_value=self._port) + self.network_client.find_port.return_value = self._port # Get the command object to test - self.cmd = port.ShowPort(self.app, self.namespace) + self.cmd = port.ShowPort(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1941,70 +2690,97 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_port.assert_called_once_with( - self._port.name, ignore_missing=False) + self.network_client.find_port.assert_called_once_with( + self._port.name, ignore_missing=False + ) - self.assertEqual(set(self.columns), set(columns)) + self.assertCountEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestUnsetPort(TestPort): - def setUp(self): - super(TestUnsetPort, self).setUp() + super().setUp() self._testport = network_fakes.create_one_port( - {'fixed_ips': [{'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152', - 'ip_address': '0.0.0.1'}, - {'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152', - 'ip_address': '1.0.0.0'}], - 'binding:profile': {'batman': 'Joker', 'Superman': 'LexLuthor'}, - 'tags': ['green', 'red'], }) + { + 'fixed_ips': [ + { + 'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152', + 'ip_address': '0.0.0.1', + }, + { + 'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152', + 'ip_address': '1.0.0.0', + }, + ], + 'binding:profile': { + 'batman': 'Joker', + 'Superman': 'LexLuthor', + }, + 'tags': ['green', 'red'], + } + ) self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet( - {'id': '042eb10a-3a18-4658-ab-cf47c8d03152'}) - self.network.find_subnet = mock.Mock(return_value=self.fake_subnet) - self.network.find_port = mock.Mock(return_value=self._testport) - self.network.update_port = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + {'id': '042eb10a-3a18-4658-ab-cf47c8d03152'} + ) + self.network_client.find_subnet.return_value = self.fake_subnet + + self.network_client.find_port.return_value = self._testport + self.network_client.update_port.return_value = None + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = port.UnsetPort(self.app, self.namespace) + self.cmd = port.UnsetPort(self.app, None) def test_unset_port_parameters(self): arglist = [ '--fixed-ip', 'subnet=042eb10a-3a18-4658-ab-cf47c8d03152,ip-address=1.0.0.0', - '--binding-profile', 'Superman', + '--binding-profile', + 'Superman', '--qos-policy', '--host', self._testport.name, ] verifylist = [ - ('fixed_ip', [{ - 'subnet': '042eb10a-3a18-4658-ab-cf47c8d03152', - 'ip-address': '1.0.0.0'}]), + ( + 'fixed_ip', + [ + { + 'subnet': '042eb10a-3a18-4658-ab-cf47c8d03152', + 'ip-address': '1.0.0.0', + } + ], + ), ('binding_profile', ['Superman']), ('qos_policy', True), - ('host', True) + ('host', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { - 'fixed_ips': [{ - 'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152', - 'ip_address': '0.0.0.1'}], + 'fixed_ips': [ + { + 'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152', + 'ip_address': '0.0.0.1', + } + ], 'binding:profile': {'batman': 'Joker'}, 'qos_policy_id': None, - 'binding:host_id': None + 'binding:host_id': None, } - self.network.update_port.assert_called_once_with( - self._testport, **attrs) + self.network_client.update_port.assert_called_once_with( + self._testport, **attrs + ) self.assertIsNone(result) def test_unset_port_fixed_ip_not_existent(self): arglist = [ - '--fixed-ip', 'ip-address=1.0.0.1', - '--binding-profile', 'Superman', + '--fixed-ip', + 'ip-address=1.0.0.1', + '--binding-profile', + 'Superman', self._testport.name, ] verifylist = [ @@ -2013,14 +2789,16 @@ def test_unset_port_fixed_ip_not_existent(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_port_binding_profile_not_existent(self): arglist = [ - '--fixed-ip', 'ip-address=1.0.0.0', - '--binding-profile', 'Neo', + '--fixed-ip', + 'ip-address=1.0.0.0', + '--binding-profile', + 'Neo', self._testport.name, ] verifylist = [ @@ -2029,60 +2807,67 @@ def test_unset_port_binding_profile_not_existent(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_security_group(self): - _fake_sg1 = network_fakes.FakeSecurityGroup.create_one_security_group() - _fake_sg2 = network_fakes.FakeSecurityGroup.create_one_security_group() + _fake_sg1 = network_fakes.create_one_security_group() + _fake_sg2 = network_fakes.create_one_security_group() _fake_port = network_fakes.create_one_port( - {'security_group_ids': [_fake_sg1.id, _fake_sg2.id]}) - self.network.find_port = mock.Mock(return_value=_fake_port) - self.network.find_security_group = mock.Mock(return_value=_fake_sg2) + {'security_group_ids': [_fake_sg1.id, _fake_sg2.id]} + ) + self.network_client.find_port.return_value = _fake_port + self.network_client.find_security_group.return_value = _fake_sg2 + arglist = [ - '--security-group', _fake_sg2.id, + '--security-group', + _fake_sg2.id, _fake_port.name, ] verifylist = [ - ('security_group_ids', [_fake_sg2.id]), + ('security_groups', [_fake_sg2.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - attrs = { - 'security_group_ids': [_fake_sg1.id] - } - self.network.update_port.assert_called_once_with( - _fake_port, **attrs) + attrs = {'security_group_ids': [_fake_sg1.id]} + self.network_client.update_port.assert_called_once_with( + _fake_port, **attrs + ) self.assertIsNone(result) def test_unset_port_security_group_not_existent(self): - _fake_sg1 = network_fakes.FakeSecurityGroup.create_one_security_group() - _fake_sg2 = network_fakes.FakeSecurityGroup.create_one_security_group() + _fake_sg1 = network_fakes.create_one_security_group() + _fake_sg2 = network_fakes.create_one_security_group() _fake_port = network_fakes.create_one_port( - {'security_group_ids': [_fake_sg1.id]}) - self.network.find_security_group = mock.Mock(return_value=_fake_sg2) + {'security_group_ids': [_fake_sg1.id]} + ) + self.network_client.find_security_group.return_value = _fake_sg2 + arglist = [ - '--security-group', _fake_sg2.id, + '--security-group', + _fake_sg2.id, _fake_port.name, ] verifylist = [ - ('security_group_ids', [_fake_sg2.id]), + ('security_groups', [_fake_sg2.id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_port_allowed_address_pair(self): _fake_port = network_fakes.create_one_port( - {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]}) - self.network.find_port = mock.Mock(return_value=_fake_port) + {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]} + ) + self.network_client.find_port.return_value = _fake_port arglist = [ - '--allowed-address', 'ip-address=192.168.1.123', + '--allowed-address', + 'ip-address=192.168.1.123', _fake_port.name, ] verifylist = [ @@ -2096,15 +2881,19 @@ def test_unset_port_allowed_address_pair(self): 'allowed_address_pairs': [], } - self.network.update_port.assert_called_once_with(_fake_port, **attrs) + self.network_client.update_port.assert_called_once_with( + _fake_port, **attrs + ) self.assertIsNone(result) def test_unset_port_allowed_address_pair_not_existent(self): _fake_port = network_fakes.create_one_port( - {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]}) - self.network.find_port = mock.Mock(return_value=_fake_port) + {'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]} + ) + self.network_client.find_port.return_value = _fake_port arglist = [ - '--allowed-address', 'ip-address=192.168.1.45', + '--allowed-address', + 'ip-address=192.168.1.45', _fake_port.name, ] verifylist = [ @@ -2112,14 +2901,15 @@ def test_unset_port_allowed_address_pair_not_existent(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_port_data_plane_status(self): _fake_port = network_fakes.create_one_port( - {'data_plane_status': 'ACTIVE'}) - self.network.find_port = mock.Mock(return_value=_fake_port) + {'data_plane_status': 'ACTIVE'} + ) + self.network_client.find_port.return_value = _fake_port arglist = [ '--data-plane-status', _fake_port.name, @@ -2136,7 +2926,9 @@ def test_unset_port_data_plane_status(self): 'data_plane_status': None, } - self.network.update_port.assert_called_once_with(_fake_port, **attrs) + self.network_client.update_port.assert_called_once_with( + _fake_port, **attrs + ) self.assertIsNone(result) def _test_unset_tags(self, with_tags=True): @@ -2149,16 +2941,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self._testport.name) - verifylist.append( - ('port', self._testport.name)) + verifylist.append(('port', self._testport.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_port.called) - self.network.set_tags.assert_called_once_with( - self._testport, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_port.called) + self.network_client.set_tags.assert_called_once_with( + self._testport, test_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): @@ -2169,8 +2960,9 @@ def test_unset_with_all_tag(self): def test_unset_numa_affinity_policy(self): _fake_port = network_fakes.create_one_port( - {'numa_affinity_policy': 'required'}) - self.network.find_port = mock.Mock(return_value=_fake_port) + {'numa_affinity_policy': 'required'} + ) + self.network_client.find_port.return_value = _fake_port arglist = [ '--numa-policy', _fake_port.name, @@ -2187,5 +2979,67 @@ def test_unset_numa_affinity_policy(self): 'numa_affinity_policy': None, } - self.network.update_port.assert_called_once_with(_fake_port, **attrs) + self.network_client.update_port.assert_called_once_with( + _fake_port, **attrs + ) + self.assertIsNone(result) + + def test_unset_hints(self): + testport = network_fakes.create_one_port() + self.network_client.find_port.return_value = testport + arglist = [ + '--hints', + testport.name, + ] + verifylist = [ + ('hints', True), + ('port', testport.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.network_client.update_port.assert_called_once_with( + testport, + **{'hints': None}, + ) + self.assertIsNone(result) + + def test_unset_device(self): + testport = network_fakes.create_one_port() + self.network_client.find_port.return_value = testport + arglist = [ + '--device', + testport.name, + ] + verifylist = [ + ('device', True), + ('port', testport.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.network_client.update_port.assert_called_once_with( + testport, + **{'device_id': ''}, + ) + self.assertIsNone(result) + + def test_unset_device_owner(self): + testport = network_fakes.create_one_port() + self.network_client.find_port.return_value = testport + arglist = [ + '--device-owner', + testport.name, + ] + verifylist = [ + ('device_owner', True), + ('port', testport.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.network_client.update_port.assert_called_once_with( + testport, + **{'device_owner': ''}, + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/network/v2/test_router.py b/openstackclient/tests/unit/network/v2/test_router.py index fb9673cd71..6ebb7809eb 100644 --- a/openstackclient/tests/unit/network/v2/test_router.py +++ b/openstackclient/tests/unit/network/v2/test_router.py @@ -24,35 +24,37 @@ class TestRouter(network_fakes.TestNetworkV2): - def setUp(self): - super(TestRouter, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects class TestAddPortToRouter(TestRouter): - '''Add port to Router ''' + '''Add port to Router''' _port = network_fakes.create_one_port() - _router = network_fakes.FakeRouter.create_one_router( - attrs={'port': _port.id}) + _router = network_fakes.create_one_router(attrs={'port': _port.id}) def setUp(self): - super(TestAddPortToRouter, self).setUp() - self.network.add_interface_to_router = mock.Mock() - self.cmd = router.AddPortToRouter(self.app, self.namespace) - self.network.find_router = mock.Mock(return_value=self._router) - self.network.find_port = mock.Mock(return_value=self._port) + super().setUp() + + self.network_client.find_router.return_value = self._router + self.network_client.find_port.return_value = self._port + + self.cmd = router.AddPortToRouter(self.app, None) def test_add_port_no_option(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_add_port_required_options(self): arglist = [ @@ -67,31 +69,40 @@ def test_add_port_required_options(self): result = self.cmd.take_action(parsed_args) - self.network.add_interface_to_router.assert_called_with( - self._router, **{'port_id': self._router.port, }) + self.network_client.add_interface_to_router.assert_called_with( + self._router, + **{ + 'port_id': self._router.port, + }, + ) self.assertIsNone(result) class TestAddSubnetToRouter(TestRouter): - '''Add subnet to Router ''' + '''Add subnet to Router''' _subnet = network_fakes.FakeSubnet.create_one_subnet() - _router = network_fakes.FakeRouter.create_one_router( - attrs={'subnet': _subnet.id}) + _router = network_fakes.create_one_router(attrs={'subnet': _subnet.id}) def setUp(self): - super(TestAddSubnetToRouter, self).setUp() - self.network.add_interface_to_router = mock.Mock() - self.cmd = router.AddSubnetToRouter(self.app, self.namespace) - self.network.find_router = mock.Mock(return_value=self._router) - self.network.find_subnet = mock.Mock(return_value=self._subnet) + super().setUp() + + self.network_client.find_router.return_value = self._router + self.network_client.find_subnet.return_value = self._subnet + + self.cmd = router.AddSubnetToRouter(self.app, None) def test_add_subnet_no_option(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_add_subnet_required_options(self): arglist = [ @@ -105,64 +116,83 @@ def test_add_subnet_required_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_interface_to_router.assert_called_with( - self._router, **{'subnet_id': self._router.subnet}) + self.network_client.add_interface_to_router.assert_called_with( + self._router, **{'subnet_id': self._router.subnet} + ) self.assertIsNone(result) class TestCreateRouter(TestRouter): - # The new router created. - new_router = network_fakes.FakeRouter.create_one_router() + new_router = network_fakes.create_one_router() + _extensions = {'fake': network_fakes.create_one_extension()} columns = ( 'admin_state_up', 'availability_zone_hints', 'availability_zones', + 'created_at', 'description', 'distributed', + 'enable_ndp_proxy', 'external_gateway_info', + 'flavor_id', 'ha', 'id', 'name', 'project_id', + 'revision_number', 'routes', 'status', 'tags', + 'updated_at', ) data = ( - router.AdminStateColumn(new_router.admin_state_up), + router.AdminStateColumn(new_router.is_admin_state_up), format_columns.ListColumn(new_router.availability_zone_hints), format_columns.ListColumn(new_router.availability_zones), + new_router.created_at, new_router.description, - new_router.distributed, + new_router.is_distributed, + new_router.enable_ndp_proxy, router.RouterInfoColumn(new_router.external_gateway_info), - new_router.ha, + new_router.flavor_id, + new_router.is_ha, new_router.id, new_router.name, new_router.project_id, + new_router.revision_number, router.RoutesColumn(new_router.routes), new_router.status, format_columns.ListColumn(new_router.tags), + new_router.updated_at, ) def setUp(self): - super(TestCreateRouter, self).setUp() + super().setUp() - self.network.create_router = mock.Mock(return_value=self.new_router) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.create_router.return_value = self.new_router + self.network_client.set_tags.return_value = None + self.network_client.find_extension.side_effect = ( + lambda name, ignore_missing=True: self._extensions.get(name) + ) # Get the command object to test - self.cmd = router.CreateRouter(self.app, self.namespace) + self.cmd = router.CreateRouter(self.app, None) def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - self.assertFalse(self.network.set_tags.called) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + self.assertFalse(self.network_client.set_tags.called) def test_create_default_options(self): arglist = [ @@ -176,50 +206,56 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_router.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self.new_router.name, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_gateway(self): _network = network_fakes.create_one_network() _subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_network = mock.Mock(return_value=_network) - self.network.find_subnet = mock.Mock(return_value=_subnet) + self.network_client.find_network.return_value = _network + self.network_client.find_subnet.return_value = _subnet arglist = [ self.new_router.name, - '--external-gateway', _network.name, + '--external-gateway', + _network.name, '--enable-snat', - '--fixed-ip', 'ip-address=2001:db8::1' + '--fixed-ip', + 'ip-address=2001:db8::1', ] verifylist = [ ('name', self.new_router.name), ('enable', True), ('distributed', False), ('ha', False), - ('external_gateway', _network.name), + ('external_gateways', [_network.name]), ('enable_snat', True), - ('fixed_ip', [{'ip-address': '2001:db8::1'}]), + ('fixed_ips', [{'ip-address': '2001:db8::1'}]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_router.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self.new_router.name, - 'external_gateway_info': { - 'network_id': _network.id, - 'enable_snat': True, - 'external_fixed_ips': [{'ip_address': '2001:db8::1'}], - }, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'external_gateway_info': { + 'network_id': _network.id, + 'enable_snat': True, + 'external_fixed_ips': [{'ip_address': '2001:db8::1'}], + }, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -237,13 +273,15 @@ def _test_create_with_ha_options(self, option, ha): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_router.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self.new_router.name, - 'ha': ha, - }) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'ha': ha, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -266,13 +304,15 @@ def _test_create_with_distributed_options(self, option, distributed): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_router.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self.new_router.name, - 'distributed': distributed, - }) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'distributed': distributed, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -285,24 +325,28 @@ def test_create_with_centralized_option(self): def test_create_with_AZ_hints(self): arglist = [ self.new_router.name, - '--availability-zone-hint', 'fake-az', - '--availability-zone-hint', 'fake-az2', + '--availability-zone-hint', + 'fake-az', + '--availability-zone-hint', + 'fake-az2', ] verifylist = [ ('name', self.new_router.name), ('availability_zone_hints', ['fake-az', 'fake-az2']), ('enable', True), ('distributed', False), - ('ha', False) + ('ha', False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) - self.network.create_router.assert_called_once_with(**{ - 'admin_state_up': True, - 'name': self.new_router.name, - 'availability_zone_hints': ['fake-az', 'fake-az2'], - }) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'availability_zone_hints': ['fake-az', 'fake-az2'], + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -325,18 +369,17 @@ def _test_create_with_tag(self, add_tags=True): verifylist.append(('no_tag', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_router.assert_called_once_with( - name=self.new_router.name, - admin_state_up=True + self.network_client.create_router.assert_called_once_with( + name=self.new_router.name, admin_state_up=True ) if add_tags: - self.network.set_tags.assert_called_once_with( - self.new_router, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self.new_router, tests_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -346,22 +389,249 @@ def test_create_with_tags(self): def test_create_with_no_tag(self): self._test_create_with_tag(add_tags=False) + def test_create_with_flavor_id_id(self): + _flavor = network_fakes.create_one_network_flavor() + self.network_client.find_flavor.return_value = _flavor + arglist = [ + self.new_router.name, + '--flavor-id', + _flavor.id, + ] + verifylist = [ + ('name', self.new_router.name), + ('enable', True), + ('distributed', False), + ('ha', False), + ('flavor_id', _flavor.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'flavor_id': _flavor.id, + } + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) -class TestDeleteRouter(TestRouter): + def test_create_with_flavor_id_name(self): + _flavor = network_fakes.create_one_network_flavor() + self.network_client.find_flavor.return_value = _flavor + arglist = [self.new_router.name, '--flavor-id', _flavor.name] + verifylist = [ + ('name', self.new_router.name), + ('enable', True), + ('distributed', False), + ('ha', False), + ('flavor_id', _flavor.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'flavor_id': _flavor.id, + } + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_with_flavor_id(self): + _flavor = network_fakes.create_one_network_flavor() + self.network_client.find_flavor.return_value = _flavor + arglist = [ + self.new_router.name, + '--flavor', + _flavor.id, + ] + verifylist = [ + ('name', self.new_router.name), + ('enable', True), + ('distributed', False), + ('ha', False), + ('flavor', _flavor.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'flavor_id': _flavor.id, + } + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + def test_create_with_flavor_name(self): + _flavor = network_fakes.create_one_network_flavor() + self.network_client.find_flavor.return_value = _flavor + arglist = [self.new_router.name, '--flavor', _flavor.name] + verifylist = [ + ('name', self.new_router.name), + ('enable', True), + ('distributed', False), + ('ha', False), + ('flavor', _flavor.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + 'flavor_id': _flavor.id, + } + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_with_enable_default_route_bfd(self): + self._extensions = { + 'external-gateway-multihoming': network_fakes.create_one_extension( + attrs={'name': 'external-gateway-multihoming'} + ), + } + + arglist = [self.new_router.name, '--enable-default-route-bfd'] + verifylist = [ + ('name', self.new_router.name), + ('enable_default_route_bfd', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + name=self.new_router.name, + admin_state_up=True, + enable_default_route_bfd=True, + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_with_enable_default_route_bfd_no_extension(self): + arglist = [self.new_router.name, '--enable-default-route-bfd'] + verifylist = [ + ('name', self.new_router.name), + ('enable_default_route_bfd', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + def test_create_with_enable_default_route_ecmp(self): + self._extensions = { + 'external-gateway-multihoming': network_fakes.create_one_extension( + attrs={'name': 'external-gateway-multihoming'} + ), + } + + arglist = [self.new_router.name, '--enable-default-route-ecmp'] + verifylist = [ + ('name', self.new_router.name), + ('enable_default_route_ecmp', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_router.assert_called_once_with( + name=self.new_router.name, + admin_state_up=True, + enable_default_route_ecmp=True, + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_with_enable_default_route_ecmp_no_extension(self): + arglist = [self.new_router.name, '--enable-default-route-ecmp'] + verifylist = [ + ('name', self.new_router.name), + ('enable_default_route_ecmp', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + def test_create_with_qos_policy(self): + _network = network_fakes.create_one_network() + self.network_client.find_network.return_value = _network + _qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = _qos_policy + + arglist = [ + self.new_router.name, + '--external-gateway', + _network.id, + '--qos-policy', + _qos_policy.id, + ] + verifylist = [ + ('name', self.new_router.name), + ('enable', True), + ('distributed', False), + ('ha', False), + ('qos_policy', _qos_policy.id), + ('external_gateways', [_network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + gw_info = {'network_id': _network.id, 'qos_policy_id': _qos_policy.id} + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self.new_router.name, + **{'external_gateway_info': gw_info}, + } + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_create_with_qos_policy_no_external_gateway(self): + _qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = _qos_policy + + arglist = [ + self.new_router.name, + '--qos-policy', + _qos_policy.id, + ] + verifylist = [ + ('name', self.new_router.name), + ('enable', True), + ('distributed', False), + ('ha', False), + ('qos_policy', _qos_policy.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + +class TestDeleteRouter(TestRouter): # The routers to delete. - _routers = network_fakes.FakeRouter.create_routers(count=2) + _routers = network_fakes.create_routers(count=2) def setUp(self): - super(TestDeleteRouter, self).setUp() + super().setUp() - self.network.delete_router = mock.Mock(return_value=None) + self.network_client.delete_router.return_value = None - self.network.find_router = ( - network_fakes.FakeRouter.get_routers(self._routers)) + self.network_client.find_router = network_fakes.get_routers( + self._routers + ) # Get the command object to test - self.cmd = router.DeleteRouter(self.app, self.namespace) + self.cmd = router.DeleteRouter(self.app, None) def test_router_delete(self): arglist = [ @@ -373,7 +643,9 @@ def test_router_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_router.assert_called_once_with(self._routers[0]) + self.network_client.delete_router.assert_called_once_with( + self._routers[0] + ) self.assertIsNone(result) def test_multi_routers_delete(self): @@ -392,7 +664,7 @@ def test_multi_routers_delete(self): calls = [] for r in self._routers: calls.append(call(r)) - self.network.delete_router.assert_has_calls(calls) + self.network_client.delete_router.assert_has_calls(calls) self.assertIsNone(result) def test_multi_routers_delete_with_exception(self): @@ -401,15 +673,12 @@ def test_multi_routers_delete_with_exception(self): 'unexist_router', ] verifylist = [ - ('router', - [self._routers[0].name, 'unexist_router']), + ('router', [self._routers[0].name, 'unexist_router']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._routers[0], exceptions.CommandError] - self.network.find_router = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_router.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -417,20 +686,21 @@ def test_multi_routers_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 routers failed to delete.', str(e)) - self.network.find_router.assert_any_call( - self._routers[0].name, ignore_missing=False) - self.network.find_router.assert_any_call( - 'unexist_router', ignore_missing=False) - self.network.delete_router.assert_called_once_with( + self.network_client.find_router.assert_any_call( + self._routers[0].name, ignore_missing=False + ) + self.network_client.find_router.assert_any_call( + 'unexist_router', ignore_missing=False + ) + self.network_client.delete_router.assert_called_once_with( self._routers[0] ) class TestListRouter(TestRouter): - # The routers going to be listed up. - routers = network_fakes.FakeRouter.create_routers(count=3) - _extensions = network_fakes.FakeExtension.create_one_extension() + routers = network_fakes.create_routers(count=3) + extensions = network_fakes.create_one_extension() columns = ( 'ID', @@ -455,23 +725,27 @@ class TestListRouter(TestRouter): data = [] for r in routers: - data.append(( - r.id, - r.name, - r.status, - router.AdminStateColumn(r.admin_state_up), - r.project_id, - r.distributed, - r.ha, - )) + data.append( + ( + r.id, + r.name, + r.status, + router.AdminStateColumn(r.is_admin_state_up), + r.project_id, + r.is_distributed, + r.is_ha, + ) + ) router_agent_data = [] for r in routers: - router_agent_data.append(( - r.id, - r.name, - r.external_gateway_info, - )) + router_agent_data.append( + ( + r.id, + r.name, + r.external_gateway_info, + ) + ) agents_columns = ( 'ID', @@ -483,7 +757,8 @@ class TestListRouter(TestRouter): for i in range(0, len(routers)): r = routers[i] data_long.append( - data[i] + ( + data[i] + + ( router.RoutesColumn(r.routes), router.RouterInfoColumn(r.external_gateway_info), format_columns.ListColumn(r.availability_zones), @@ -494,7 +769,8 @@ class TestListRouter(TestRouter): for i in range(0, len(routers)): r = routers[i] data_long_no_az.append( - data[i] + ( + data[i] + + ( router.RoutesColumn(r.routes), router.RouterInfoColumn(r.external_gateway_info), format_columns.ListColumn(r.tags), @@ -502,19 +778,21 @@ class TestListRouter(TestRouter): ) def setUp(self): - super(TestListRouter, self).setUp() + super().setUp() # Get the command object to test - self.cmd = router.ListRouter(self.app, self.namespace) + self.cmd = router.ListRouter(self.app, None) + + self.network_client.agent_hosted_routers.return_value = self.routers + + self.network_client.routers.return_value = self.routers + self.network_client.find_extension.return_value = self.extensions + + self.network_client.find_router.return_value = self.routers[0] - self.network.agent_hosted_routers = mock.Mock( - return_value=self.routers) - self.network.routers = mock.Mock(return_value=self.routers) - self.network.find_extension = mock.Mock(return_value=self._extensions) - self.network.find_router = mock.Mock(return_value=self.routers[0]) self._testagent = network_fakes.create_one_network_agent() - self.network.get_agent = mock.Mock(return_value=self._testagent) - self.network.get_router = mock.Mock(return_value=self.routers[0]) + self.network_client.get_agent.return_value = self._testagent + self.network_client.get_router.return_value = self.routers[0] def test_router_list_no_options(self): arglist = [] @@ -528,15 +806,14 @@ def test_router_list_no_options(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with() + self.network_client.routers.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_router_list_no_ha_no_distributed(self): - _routers = network_fakes.FakeRouter.create_routers({ - 'ha': None, - 'distributed': None}, - count=3) + _routers = network_fakes.create_routers( + {'ha': None, 'distributed': None}, count=3 + ) arglist = [] verifylist = [ @@ -545,7 +822,8 @@ def test_router_list_no_ha_no_distributed(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object( - self.network, "routers", return_value=_routers): + self.network_client, "routers", return_value=_routers + ): columns, data = self.cmd.take_action(parsed_args) self.assertNotIn("is_distributed", columns) @@ -565,7 +843,7 @@ def test_router_list_long(self): # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with() + self.network_client.routers.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) @@ -579,21 +857,22 @@ def test_router_list_long_no_az(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) # to mock, that no availability zone - self.network.find_extension = mock.Mock(return_value=None) + self.network_client.find_extension.return_value = None # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with() + self.network_client.routers.assert_called_once_with() self.assertEqual(self.columns_long_no_az, columns) self.assertCountEqual(self.data_long_no_az, list(data)) def test_list_name(self): test_name = "fakename" arglist = [ - '--name', test_name, + '--name', + test_name, ] verifylist = [ ('long', False), @@ -602,7 +881,7 @@ def test_list_name(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with( + self.network_client.routers.assert_called_once_with( **{'name': test_name} ) self.assertEqual(self.columns, columns) @@ -619,7 +898,7 @@ def test_router_list_enable(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with( + self.network_client.routers.assert_called_once_with( **{'admin_state_up': True, 'is_admin_state_up': True} ) self.assertEqual(self.columns, columns) @@ -629,14 +908,11 @@ def test_router_list_disable(self): arglist = [ '--disable', ] - verifylist = [ - ('long', False), - ('disable', True) - ] + verifylist = [('long', False), ('disable', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with( + self.network_client.routers.assert_called_once_with( **{'admin_state_up': False, 'is_admin_state_up': False} ) @@ -647,7 +923,8 @@ def test_router_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -657,7 +934,7 @@ def test_router_list_project(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.routers.assert_called_once_with(**filters) + self.network_client.routers.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -665,8 +942,10 @@ def test_router_list_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -677,7 +956,7 @@ def test_router_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.routers.assert_called_once_with(**filters) + self.network_client.routers.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -688,33 +967,44 @@ def test_router_list_agents_no_args(self): verifylist = [] # Missing required router ID should bail here - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_router_list_agents(self): arglist = [ - '--agent', self._testagent.id, + '--agent', + self._testagent.id, ] verifylist = [ ('agent', self._testagent.id), ] - attrs = {self._testagent.id, } + attrs = { + self._testagent.id, + } parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.agent_hosted_routers( - *attrs) + self.network_client.agent_hosted_routers(*attrs) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -725,36 +1015,43 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.routers.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white'} + self.network_client.routers.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) class TestRemovePortFromRouter(TestRouter): - '''Remove port from a Router ''' + '''Remove port from a Router''' _port = network_fakes.create_one_port() - _router = network_fakes.FakeRouter.create_one_router( - attrs={'port': _port.id}) + _router = network_fakes.create_one_router(attrs={'port': _port.id}) def setUp(self): - super(TestRemovePortFromRouter, self).setUp() - self.network.remove_interface_from_router = mock.Mock() - self.cmd = router.RemovePortFromRouter(self.app, self.namespace) - self.network.find_router = mock.Mock(return_value=self._router) - self.network.find_port = mock.Mock(return_value=self._port) + super().setUp() + + self.network_client.find_router.return_value = self._router + self.network_client.find_port.return_value = self._port + + self.cmd = router.RemovePortFromRouter(self.app, None) def test_remove_port_no_option(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_remove_port_required_options(self): arglist = [ @@ -769,31 +1066,37 @@ def test_remove_port_required_options(self): result = self.cmd.take_action(parsed_args) - self.network.remove_interface_from_router.assert_called_with( - self._router, **{'port_id': self._router.port}) + self.network_client.remove_interface_from_router.assert_called_with( + self._router, **{'port_id': self._router.port} + ) self.assertIsNone(result) class TestRemoveSubnetFromRouter(TestRouter): - '''Remove subnet from Router ''' + '''Remove subnet from Router''' _subnet = network_fakes.FakeSubnet.create_one_subnet() - _router = network_fakes.FakeRouter.create_one_router( - attrs={'subnet': _subnet.id}) + _router = network_fakes.create_one_router(attrs={'subnet': _subnet.id}) def setUp(self): - super(TestRemoveSubnetFromRouter, self).setUp() - self.network.remove_interface_from_router = mock.Mock() - self.cmd = router.RemoveSubnetFromRouter(self.app, self.namespace) - self.network.find_router = mock.Mock(return_value=self._router) - self.network.find_subnet = mock.Mock(return_value=self._subnet) + super().setUp() + + self.network_client.find_router.return_value = self._router + self.network_client.find_subnet.return_value = self._subnet + + self.cmd = router.RemoveSubnetFromRouter(self.app, None) def test_remove_subnet_no_option(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_remove_subnet_required_options(self): arglist = [ @@ -807,21 +1110,23 @@ def test_remove_subnet_required_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.remove_interface_from_router.assert_called_with( - self._router, **{'subnet_id': self._router.subnet}) + self.network_client.remove_interface_from_router.assert_called_with( + self._router, **{'subnet_id': self._router.subnet} + ) self.assertIsNone(result) class TestAddExtraRoutesToRouter(TestRouter): - - _router = network_fakes.FakeRouter.create_one_router() + _router = network_fakes.create_one_router() def setUp(self): - super(TestAddExtraRoutesToRouter, self).setUp() - self.network.add_extra_routes_to_router = mock.Mock( - return_value=self._router) - self.cmd = router.AddExtraRoutesToRouter(self.app, self.namespace) - self.network.find_router = mock.Mock(return_value=self._router) + super().setUp() + self.network_client.add_extra_routes_to_router.return_value = ( + self._router + ) + + self.cmd = router.AddExtraRoutesToRouter(self.app, None) + self.network_client.find_router.return_value = self._router def test_add_no_extra_route(self): arglist = [ @@ -834,14 +1139,16 @@ def test_add_no_extra_route(self): result = self.cmd.take_action(parsed_args) - self.network.add_extra_routes_to_router.assert_called_with( - self._router, body={'router': {'routes': []}}) + self.network_client.add_extra_routes_to_router.assert_called_with( + self._router, body={'router': {'routes': []}} + ) self.assertEqual(2, len(result)) def test_add_one_extra_route(self): arglist = [ self._router.id, - '--route', 'destination=dst1,gateway=gw1', + '--route', + 'destination=dst1,gateway=gw1', ] verifylist = [ ('router', self._router.id), @@ -851,47 +1158,65 @@ def test_add_one_extra_route(self): result = self.cmd.take_action(parsed_args) - self.network.add_extra_routes_to_router.assert_called_with( - self._router, body={'router': {'routes': [ - {'destination': 'dst1', 'nexthop': 'gw1'}, - ]}}) + self.network_client.add_extra_routes_to_router.assert_called_with( + self._router, + body={ + 'router': { + 'routes': [ + {'destination': 'dst1', 'nexthop': 'gw1'}, + ] + } + }, + ) self.assertEqual(2, len(result)) def test_add_multiple_extra_routes(self): arglist = [ self._router.id, - '--route', 'destination=dst1,gateway=gw1', - '--route', 'destination=dst2,gateway=gw2', + '--route', + 'destination=dst1,gateway=gw1', + '--route', + 'destination=dst2,gateway=gw2', ] verifylist = [ ('router', self._router.id), - ('routes', [ - {'destination': 'dst1', 'gateway': 'gw1'}, - {'destination': 'dst2', 'gateway': 'gw2'}, - ]), + ( + 'routes', + [ + {'destination': 'dst1', 'gateway': 'gw1'}, + {'destination': 'dst2', 'gateway': 'gw2'}, + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.add_extra_routes_to_router.assert_called_with( - self._router, body={'router': {'routes': [ - {'destination': 'dst1', 'nexthop': 'gw1'}, - {'destination': 'dst2', 'nexthop': 'gw2'}, - ]}}) + self.network_client.add_extra_routes_to_router.assert_called_with( + self._router, + body={ + 'router': { + 'routes': [ + {'destination': 'dst1', 'nexthop': 'gw1'}, + {'destination': 'dst2', 'nexthop': 'gw2'}, + ] + } + }, + ) self.assertEqual(2, len(result)) class TestRemoveExtraRoutesFromRouter(TestRouter): - - _router = network_fakes.FakeRouter.create_one_router() + _router = network_fakes.create_one_router() def setUp(self): - super(TestRemoveExtraRoutesFromRouter, self).setUp() - self.network.remove_extra_routes_from_router = mock.Mock( - return_value=self._router) - self.cmd = router.RemoveExtraRoutesFromRouter(self.app, self.namespace) - self.network.find_router = mock.Mock(return_value=self._router) + super().setUp() + self.network_client.remove_extra_routes_from_router.return_value = ( + self._router + ) + + self.cmd = router.RemoveExtraRoutesFromRouter(self.app, None) + self.network_client.find_router.return_value = self._router def test_remove_no_extra_route(self): arglist = [ @@ -904,14 +1229,16 @@ def test_remove_no_extra_route(self): result = self.cmd.take_action(parsed_args) - self.network.remove_extra_routes_from_router.assert_called_with( - self._router, body={'router': {'routes': []}}) + self.network_client.remove_extra_routes_from_router.assert_called_with( + self._router, body={'router': {'routes': []}} + ) self.assertEqual(2, len(result)) def test_remove_one_extra_route(self): arglist = [ self._router.id, - '--route', 'destination=dst1,gateway=gw1', + '--route', + 'destination=dst1,gateway=gw1', ] verifylist = [ ('router', self._router.id), @@ -921,67 +1248,90 @@ def test_remove_one_extra_route(self): result = self.cmd.take_action(parsed_args) - self.network.remove_extra_routes_from_router.assert_called_with( - self._router, body={'router': {'routes': [ - {'destination': 'dst1', 'nexthop': 'gw1'}, - ]}}) + self.network_client.remove_extra_routes_from_router.assert_called_with( + self._router, + body={ + 'router': { + 'routes': [ + {'destination': 'dst1', 'nexthop': 'gw1'}, + ] + } + }, + ) self.assertEqual(2, len(result)) def test_remove_multiple_extra_routes(self): arglist = [ self._router.id, - '--route', 'destination=dst1,gateway=gw1', - '--route', 'destination=dst2,gateway=gw2', + '--route', + 'destination=dst1,gateway=gw1', + '--route', + 'destination=dst2,gateway=gw2', ] verifylist = [ ('router', self._router.id), - ('routes', [ - {'destination': 'dst1', 'gateway': 'gw1'}, - {'destination': 'dst2', 'gateway': 'gw2'}, - ]), + ( + 'routes', + [ + {'destination': 'dst1', 'gateway': 'gw1'}, + {'destination': 'dst2', 'gateway': 'gw2'}, + ], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.remove_extra_routes_from_router.assert_called_with( - self._router, body={'router': {'routes': [ - {'destination': 'dst1', 'nexthop': 'gw1'}, - {'destination': 'dst2', 'nexthop': 'gw2'}, - ]}}) + self.network_client.remove_extra_routes_from_router.assert_called_with( + self._router, + body={ + 'router': { + 'routes': [ + {'destination': 'dst1', 'nexthop': 'gw1'}, + {'destination': 'dst2', 'nexthop': 'gw2'}, + ] + } + }, + ) self.assertEqual(2, len(result)) class TestSetRouter(TestRouter): - # The router to set. _default_route = {'destination': '10.20.20.0/24', 'nexthop': '10.20.30.1'} _network = network_fakes.create_one_network() - _subnet = network_fakes.FakeSubnet.create_one_subnet() - _router = network_fakes.FakeRouter.create_one_router( - attrs={'routes': [_default_route], - 'tags': ['green', 'red']} + _subnet = network_fakes.FakeSubnet.create_one_subnet( + attrs={'network_id': _network.id} + ) + _router = network_fakes.create_one_router( + attrs={'routes': [_default_route], 'tags': ['green', 'red']} ) + _extensions = {'fake': network_fakes.create_one_extension()} def setUp(self): - super(TestSetRouter, self).setUp() - self.network.router_add_gateway = mock.Mock() - self.network.update_router = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) - self.network.find_router = mock.Mock(return_value=self._router) - self.network.find_network = mock.Mock(return_value=self._network) - self.network.find_subnet = mock.Mock(return_value=self._subnet) + super().setUp() + self.network_client.update_router.return_value = None + self.network_client.set_tags.return_value = None + self.network_client.find_router.return_value = self._router + self.network_client.find_network.return_value = self._network + + self.network_client.find_subnet.return_value = self._subnet + self.network_client.find_extension.side_effect = ( + lambda name, ignore_missing=True: self._extensions.get(name) + ) # Get the command object to test - self.cmd = router.SetRouter(self.app, self.namespace) + self.cmd = router.SetRouter(self.app, None) def test_set_this(self): arglist = [ self._router.name, '--enable', '--distributed', - '--name', 'noob', + '--name', + 'noob', '--no-ha', - '--description', 'router', + '--description', + 'router', ] verifylist = [ ('router', self._router.name), @@ -1002,8 +1352,9 @@ def test_set_this(self): 'ha': False, 'description': 'router', } - self.network.update_router.assert_called_once_with( - self._router, **attrs) + self.network_client.update_router.assert_called_once_with( + self._router, **attrs + ) self.assertIsNone(result) def test_set_that(self): @@ -1028,8 +1379,9 @@ def test_set_that(self): 'distributed': False, 'ha': True, } - self.network.update_router.assert_called_once_with( - self._router, **attrs) + self.network_client.update_router.assert_called_once_with( + self._router, **attrs + ) self.assertIsNone(result) def test_set_distributed_centralized(self): @@ -1044,29 +1396,35 @@ def test_set_distributed_centralized(self): ('distributed', False), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_set_route(self): arglist = [ self._router.name, - '--route', 'destination=10.20.30.0/24,gateway=10.20.30.1', + '--route', + 'destination=10.20.30.0/24,gateway=10.20.30.1', ] verifylist = [ ('router', self._router.name), - ('routes', [{'destination': '10.20.30.0/24', - 'gateway': '10.20.30.1'}]), + ( + 'routes', + [{'destination': '10.20.30.0/24', 'gateway': '10.20.30.1'}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - routes = [{'destination': '10.20.30.0/24', - 'nexthop': '10.20.30.1'}] - attrs = { - 'routes': routes + self._router.routes - } - self.network.update_router.assert_called_once_with( - self._router, **attrs) + routes = [{'destination': '10.20.30.0/24', 'nexthop': '10.20.30.1'}] + attrs = {'routes': routes + self._router.routes} + self.network_client.update_router.assert_called_once_with( + self._router, **attrs + ) self.assertIsNone(result) def test_set_no_route(self): @@ -1085,34 +1443,40 @@ def test_set_no_route(self): attrs = { 'routes': [], } - self.network.update_router.assert_called_once_with( - self._router, **attrs) + self.network_client.update_router.assert_called_once_with( + self._router, **attrs + ) self.assertIsNone(result) def test_set_route_overwrite_route(self): - _testrouter = network_fakes.FakeRouter.create_one_router( - {'routes': [{"destination": "10.0.0.2", - "nexthop": "1.1.1.1"}]}) - self.network.find_router = mock.Mock(return_value=_testrouter) + _testrouter = network_fakes.create_one_router( + {'routes': [{"destination": "10.0.0.2", "nexthop": "1.1.1.1"}]} + ) + self.network_client.find_router.return_value = _testrouter arglist = [ _testrouter.name, - '--route', 'destination=10.20.30.0/24,gateway=10.20.30.1', + '--route', + 'destination=10.20.30.0/24,gateway=10.20.30.1', '--no-route', ] verifylist = [ ('router', _testrouter.name), - ('routes', [{'destination': '10.20.30.0/24', - 'gateway': '10.20.30.1'}]), + ( + 'routes', + [{'destination': '10.20.30.0/24', 'gateway': '10.20.30.1'}], + ), ('no_route', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { - 'routes': [{'destination': '10.20.30.0/24', - 'nexthop': '10.20.30.1'}] + 'routes': [ + {'destination': '10.20.30.0/24', 'nexthop': '10.20.30.1'} + ] } - self.network.update_router.assert_called_once_with( - _testrouter, **attrs) + self.network_client.update_router.assert_called_once_with( + _testrouter, **attrs + ) self.assertIsNone(result) def test_set_nothing(self): @@ -1126,112 +1490,145 @@ def test_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_router.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_router.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def test_wrong_gateway_params(self): arglist = [ - "--fixed-ip", "subnet='abc'", + "--fixed-ip", + "subnet='abc'", self._router.id, ] verifylist = [ - ('fixed_ip', [{'subnet': "'abc'"}]), + ('fixed_ips', [{'subnet': "'abc'"}]), ('router', self._router.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_set_gateway_network_only(self): arglist = [ - "--external-gateway", self._network.id, + "--external-gateway", + self._network.id, self._router.id, ] verifylist = [ - ('external_gateway', self._network.id), + ('external_gateways', [self._network.id]), ('router', self._router.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_router.assert_called_with( - self._router, **{'external_gateway_info': { - 'network_id': self._network.id}}) + self.network_client.update_router.assert_called_with( + self._router, + **{'external_gateway_info': {'network_id': self._network.id}}, + ) self.assertIsNone(result) def test_set_gateway_options_subnet_only(self): arglist = [ - "--external-gateway", self._network.id, - "--fixed-ip", "subnet='abc'", + "--external-gateway", + self._network.id, + "--fixed-ip", + "subnet='abc'", self._router.id, '--enable-snat', ] verifylist = [ ('router', self._router.id), - ('external_gateway', self._network.id), - ('fixed_ip', [{'subnet': "'abc'"}]), + ('external_gateways', [self._network.id]), + ('fixed_ips', [{'subnet': "'abc'"}]), ('enable_snat', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_router.assert_called_with( - self._router, **{'external_gateway_info': { - 'network_id': self._network.id, - 'external_fixed_ips': [{ - 'subnet_id': self._subnet.id, }], - 'enable_snat': True, }}) + self.network_client.update_router.assert_called_with( + self._router, + **{ + 'external_gateway_info': { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + } + ], + 'enable_snat': True, + } + }, + ) self.assertIsNone(result) def test_set_gateway_option_ipaddress_only(self): arglist = [ - "--external-gateway", self._network.id, - "--fixed-ip", "ip-address=10.0.1.1", + "--external-gateway", + self._network.id, + "--fixed-ip", + "ip-address=10.0.1.1", self._router.id, '--enable-snat', ] verifylist = [ ('router', self._router.id), - ('external_gateway', self._network.id), - ('fixed_ip', [{'ip-address': "10.0.1.1"}]), + ('external_gateways', [self._network.id]), + ('fixed_ips', [{'ip-address': "10.0.1.1"}]), ('enable_snat', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_router.assert_called_with( - self._router, **{'external_gateway_info': { - 'network_id': self._network.id, - 'external_fixed_ips': [{ - 'ip_address': "10.0.1.1", }], - 'enable_snat': True, }}) + self.network_client.update_router.assert_called_with( + self._router, + **{ + 'external_gateway_info': { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'ip_address': "10.0.1.1", + } + ], + 'enable_snat': True, + } + }, + ) self.assertIsNone(result) def test_set_gateway_options_subnet_ipaddress(self): arglist = [ - "--external-gateway", self._network.id, - "--fixed-ip", "subnet='abc',ip-address=10.0.1.1", + "--external-gateway", + self._network.id, + "--fixed-ip", + "subnet='abc',ip-address=10.0.1.1", self._router.id, '--enable-snat', ] verifylist = [ ('router', self._router.id), - ('external_gateway', self._network.id), - ('fixed_ip', [{'subnet': "'abc'", - 'ip-address': "10.0.1.1"}]), + ('external_gateways', [self._network.id]), + ('fixed_ips', [{'subnet': "'abc'", 'ip-address': "10.0.1.1"}]), ('enable_snat', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_router.assert_called_with( - self._router, **{'external_gateway_info': { - 'network_id': self._network.id, - 'external_fixed_ips': [{ - 'subnet_id': self._subnet.id, - 'ip_address': "10.0.1.1", }], - 'enable_snat': True, }}) + self.network_client.update_router.assert_called_with( + self._router, + **{ + 'external_gateway_info': { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': "10.0.1.1", + } + ], + 'enable_snat': True, + } + }, + ) self.assertIsNone(result) def _test_set_tags(self, with_tags=True): @@ -1244,16 +1641,15 @@ def _test_set_tags(self, with_tags=True): verifylist = [('no_tag', True)] expected_args = [] arglist.append(self._router.name) - verifylist.append( - ('router', self._router.name)) + verifylist.append(('router', self._router.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_router.called) - self.network.set_tags.assert_called_once_with( - self._router, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_router.called) + self.network_client.set_tags.assert_called_once_with( + self._router, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -1263,73 +1659,97 @@ def test_set_with_no_tag(self): self._test_set_tags(with_tags=False) def test_set_gateway_ip_qos(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + arglist = [ - "--external-gateway", self._network.id, - "--qos-policy", qos_policy.id, + "--external-gateway", + self._network.id, + "--qos-policy", + qos_policy.id, self._router.id, ] verifylist = [ ('router', self._router.id), - ('external_gateway', self._network.id), + ('external_gateways', [self._network.id]), ('qos_policy', qos_policy.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_router.assert_called_with( - self._router, **{'external_gateway_info': { - 'network_id': self._network.id, - 'qos_policy_id': qos_policy.id, }}) + self.network_client.update_router.assert_called_with( + self._router, + **{ + 'external_gateway_info': { + 'network_id': self._network.id, + 'qos_policy_id': qos_policy.id, + } + }, + ) self.assertIsNone(result) def test_unset_gateway_ip_qos(self): arglist = [ - "--external-gateway", self._network.id, + "--external-gateway", + self._network.id, "--no-qos-policy", self._router.id, ] verifylist = [ ('router', self._router.id), - ('external_gateway', self._network.id), + ('external_gateways', [self._network.id]), ('no_qos_policy', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_router.assert_called_with( - self._router, **{'external_gateway_info': { - 'network_id': self._network.id, - 'qos_policy_id': None, }}) + self.network_client.update_router.assert_called_with( + self._router, + **{ + 'external_gateway_info': { + 'network_id': self._network.id, + 'qos_policy_id': None, + } + }, + ) self.assertIsNone(result) def test_set_unset_gateway_ip_qos(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + arglist = [ - "--external-gateway", self._network.id, - "--qos-policy", qos_policy.id, + "--external-gateway", + self._network.id, + "--qos-policy", + qos_policy.id, "--no-qos-policy", self._router.id, ] verifylist = [ ('router', self._router.id), - ('external_gateway', self._network.id), + ('external_gateways', [self._network.id]), ('qos_policy', qos_policy.id), ('no_qos_policy', True), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_set_gateway_ip_qos_no_gateway(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) - router = network_fakes.FakeRouter.create_one_router() - self.network.find_router = mock.Mock(return_value=router) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + + router = network_fakes.create_one_router() + self.network_client.find_router.return_value = router arglist = [ - "--qos-policy", qos_policy.id, + "--qos-policy", + qos_policy.id, router.id, ] verifylist = [ @@ -1338,14 +1758,16 @@ def test_set_gateway_ip_qos_no_gateway(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_gateway_ip_qos_no_gateway(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) - router = network_fakes.FakeRouter.create_one_router() - self.network.find_router = mock.Mock(return_value=router) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + + router = network_fakes.create_one_router() + self.network_client.find_router.return_value = router arglist = [ "--no-qos-policy", router.id, @@ -1355,72 +1777,92 @@ def test_unset_gateway_ip_qos_no_gateway(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestShowRouter(TestRouter): - # The router to set. - _router = network_fakes.FakeRouter.create_one_router() - _port = network_fakes.create_one_port({ - 'device_owner': 'network:router_interface', - 'device_id': _router.id - }) - setattr(_router, - 'interfaces_info', - [{'port_id': _port.id, - 'ip_address': _port.fixed_ips[0]['ip_address'], - 'subnet_id': _port.fixed_ips[0]['subnet_id']}]) + _router = network_fakes.create_one_router() + _port = network_fakes.create_one_port( + {'device_owner': 'network:router_interface', 'device_id': _router.id} + ) + setattr( + _router, + 'interfaces_info', + [ + { + 'port_id': _port.id, + 'ip_address': _port.fixed_ips[0]['ip_address'], + 'subnet_id': _port.fixed_ips[0]['subnet_id'], + } + ], + ) columns = ( 'admin_state_up', 'availability_zone_hints', 'availability_zones', + 'created_at', 'description', 'distributed', + 'enable_ndp_proxy', 'external_gateway_info', + 'flavor_id', 'ha', 'id', 'interfaces_info', 'name', 'project_id', + 'revision_number', 'routes', 'status', 'tags', + 'updated_at', ) data = ( - router.AdminStateColumn(_router.admin_state_up), + router.AdminStateColumn(_router.is_admin_state_up), format_columns.ListColumn(_router.availability_zone_hints), format_columns.ListColumn(_router.availability_zones), + _router.created_at, _router.description, - _router.distributed, + _router.is_distributed, + _router.enable_ndp_proxy, router.RouterInfoColumn(_router.external_gateway_info), - _router.ha, + _router.flavor_id, + _router.is_ha, _router.id, router.RouterInfoColumn(_router.interfaces_info), _router.name, _router.project_id, + _router.revision_number, router.RoutesColumn(_router.routes), _router.status, format_columns.ListColumn(_router.tags), + _router.updated_at, ) def setUp(self): - super(TestShowRouter, self).setUp() + super().setUp() - self.network.find_router = mock.Mock(return_value=self._router) - self.network.ports = mock.Mock(return_value=[self._port]) + self.network_client.find_router.return_value = self._router + self.network_client.ports.return_value = [self._port] # Get the command object to test - self.cmd = router.ShowRouter(self.app, self.namespace) + self.cmd = router.ShowRouter(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1433,18 +1875,19 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_router.assert_called_once_with( - self._router.name, ignore_missing=False) - self.network.ports.assert_called_with(**{ - 'device_id': self._router.id - }) + self.network_client.find_router.assert_called_once_with( + self._router.name, ignore_missing=False + ) + self.network_client.ports.assert_called_with( + **{'device_id': self._router.id} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_show_no_ha_no_distributed(self): - _router = network_fakes.FakeRouter.create_one_router({ - 'ha': None, - 'distributed': None}) + _router = network_fakes.create_one_router( + {'ha': None, 'distributed': None} + ) arglist = [ _router.name, @@ -1455,14 +1898,15 @@ def test_show_no_ha_no_distributed(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object( - self.network, "find_router", return_value=_router): + self.network_client, "find_router", return_value=_router + ): columns, data = self.cmd.take_action(parsed_args) self.assertNotIn("is_distributed", columns) self.assertNotIn("is_ha", columns) def test_show_no_extra_route_extension(self): - _router = network_fakes.FakeRouter.create_one_router({'routes': None}) + _router = network_fakes.create_one_router({'routes': None}) arglist = [ _router.name, @@ -1473,7 +1917,8 @@ def test_show_no_extra_route_extension(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) with mock.patch.object( - self.network, "find_router", return_value=_router): + self.network_client, "find_router", return_value=_router + ): columns, data = self.cmd.take_action(parsed_args) self.assertIn("routes", columns) @@ -1481,75 +1926,126 @@ def test_show_no_extra_route_extension(self): class TestUnsetRouter(TestRouter): - def setUp(self): - super(TestUnsetRouter, self).setUp() + super().setUp() self.fake_network = network_fakes.create_one_network() - self.fake_qos_policy = ( - network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()) - self._testrouter = network_fakes.FakeRouter.create_one_router( - {'routes': [{"destination": "192.168.101.1/24", - "nexthop": "172.24.4.3"}, - {"destination": "192.168.101.2/24", - "nexthop": "172.24.4.3"}], - 'tags': ['green', 'red'], - 'external_gateway_info': { - 'network_id': self.fake_network.id, - 'qos_policy_id': self.fake_qos_policy.id - }}) + self.fake_qos_policy = network_fakes.create_one_qos_policy() + self._testrouter = network_fakes.create_one_router( + { + 'routes': [ + { + "destination": "192.168.101.1/24", + "nexthop": "172.24.4.3", + }, + { + "destination": "192.168.101.2/24", + "nexthop": "172.24.4.3", + }, + ], + 'tags': ['green', 'red'], + 'external_gateway_info': { + 'network_id': self.fake_network.id, + 'qos_policy_id': self.fake_qos_policy.id, + }, + } + ) self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_router = mock.Mock(return_value=self._testrouter) - self.network.update_router = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.find_router.return_value = self._testrouter + + self.network_client.update_router.return_value = None + self.network_client.set_tags.return_value = None + self._extensions = {'fake': network_fakes.create_one_extension()} + self.network_client.find_extension.side_effect = ( + lambda name, ignore_missing=True: self._extensions.get(name) + ) + self.network_client.remove_external_gateways.return_value = None + # Get the command object to test - self.cmd = router.UnsetRouter(self.app, self.namespace) + self.cmd = router.UnsetRouter(self.app, None) def test_unset_router_params(self): arglist = [ - '--route', 'destination=192.168.101.1/24,gateway=172.24.4.3', + '--route', + 'destination=192.168.101.1/24,gateway=172.24.4.3', self._testrouter.name, ] verifylist = [ - ('routes', [ - {"destination": "192.168.101.1/24", "gateway": "172.24.4.3"}]), + ( + 'routes', + [{"destination": "192.168.101.1/24", "gateway": "172.24.4.3"}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { - 'routes': [{"destination": "192.168.101.2/24", - "nexthop": "172.24.4.3"}], + 'routes': [ + {"destination": "192.168.101.2/24", "nexthop": "172.24.4.3"} + ], } - self.network.update_router.assert_called_once_with( - self._testrouter, **attrs) + self.network_client.update_router.assert_called_once_with( + self._testrouter, **attrs + ) self.assertIsNone(result) def test_unset_router_wrong_routes(self): arglist = [ - '--route', 'destination=192.168.101.1/24,gateway=172.24.4.2', + '--route', + 'destination=192.168.101.1/24,gateway=172.24.4.2', self._testrouter.name, ] verifylist = [ - ('routes', [ - {"destination": "192.168.101.1/24", "gateway": "172.24.4.2"}]), + ( + 'routes', + [{"destination": "192.168.101.1/24", "gateway": "172.24.4.2"}], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_router_external_gateway(self): arglist = [ '--external-gateway', self._testrouter.name, ] - verifylist = [('external_gateway', True)] + verifylist = [('external_gateways', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {'external_gateway_info': {}} - self.network.update_router.assert_called_once_with( - self._testrouter, **attrs) + self.network_client.update_router.assert_called_once_with( + self._testrouter, **attrs + ) + self.assertIsNone(result) + + def test_unset_router_external_gateway_multiple_supported(self): + # Add the relevant extension in order to test the alternate behavior. + self._extensions = { + 'external-gateway-multihoming': network_fakes.create_one_extension( + attrs={'name': 'external-gateway-multihoming'} + ) + } + arglist = [ + '--external-gateway', + self._testrouter.name, + ] + verifylist = [('external_gateways', True)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + # The removal of all gateways should be requested using the multiple + # gateways API. + self.network_client.remove_external_gateways.assert_called_once_with( + self._testrouter, body={'router': {'external_gateways': {}}} + ) + # The compatibility API will also be called in order to potentially + # unset other parameters along with external_gateway_info which + # should already be empty at that point anyway. + self.network_client.update_router.assert_called_once_with( + self._testrouter, **{'external_gateway_info': {}} + ) self.assertIsNone(result) def _test_unset_tags(self, with_tags=True): @@ -1562,16 +2058,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self._testrouter.name) - verifylist.append( - ('router', self._testrouter.name)) + verifylist.append(('router', self._testrouter.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_router.called) - self.network.set_tags.assert_called_once_with( - self._testrouter, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_router.called) + self.network_client.set_tags.assert_called_once_with( + self._testrouter, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): @@ -1585,22 +2080,26 @@ def test_unset_router_qos_policy(self): '--qos-policy', self._testrouter.name, ] - verifylist = [ - ('qos_policy', True) - ] + verifylist = [('qos_policy', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - attrs = {'external_gateway_info': {"network_id": self.fake_network.id, - "qos_policy_id": None}} - self.network.update_router.assert_called_once_with( - self._testrouter, **attrs) + attrs = { + 'external_gateway_info': { + "network_id": self.fake_network.id, + "qos_policy_id": None, + } + } + self.network_client.update_router.assert_called_once_with( + self._testrouter, **attrs + ) self.assertIsNone(result) def test_unset_gateway_ip_qos_no_network(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) - router = network_fakes.FakeRouter.create_one_router() - self.network.find_router = mock.Mock(return_value=router) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + + router = network_fakes.create_one_router() + self.network_client.find_router.return_value = router arglist = [ "--qos-policy", router.id, @@ -1610,15 +2109,18 @@ def test_unset_gateway_ip_qos_no_network(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_gateway_ip_qos_no_qos(self): - qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy() - self.network.find_qos_policy = mock.Mock(return_value=qos_policy) - router = network_fakes.FakeRouter.create_one_router( - {"external_gateway_info": {"network_id": "fake-id"}}) - self.network.find_router = mock.Mock(return_value=router) + qos_policy = network_fakes.create_one_qos_policy() + self.network_client.find_qos_policy.return_value = qos_policy + + router = network_fakes.create_one_router( + {"external_gateway_info": {"network_id": "fake-id"}} + ) + self.network_client.find_router.return_value = router arglist = [ "--qos-policy", router.id, @@ -1628,5 +2130,545 @@ def test_unset_gateway_ip_qos_no_qos(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + +class TestGatewayOps(TestRouter): + def setUp(self): + super().setUp() + self._networks = [] + self._network = network_fakes.create_one_network() + self._networks.append(self._network) + + self._router = network_fakes.create_one_router( + { + 'external_gateway_info': { + 'network_id': self._network.id, + }, + } + ) + self._subnet = network_fakes.FakeSubnet.create_one_subnet( + attrs={'network_id': self._network.id} + ) + self._extensions = { + 'external-gateway-multihoming': network_fakes.create_one_extension( + attrs={'name': 'external-gateway-multihoming'} + ) + } + self.network_client.find_extension.side_effect = ( + lambda name, ignore_missing=True: self._extensions.get(name) + ) + self.network_client.find_router.return_value = self._router + + def _find_network(name_or_id, ignore_missing): + for network in self._networks: + if name_or_id in (network.id, network.name): + return network + if ignore_missing: + return None + raise Exception('Test resource not found') + + self.network_client.find_network.side_effect = _find_network + + self.network_client.find_subnet.return_value = self._subnet + self.network_client.add_external_gateways.return_value = None + + self.network_client.remove_external_gateways.return_value = None + + +class TestCreateMultipleGateways(TestGatewayOps): + _columns = ( + 'admin_state_up', + 'availability_zone_hints', + 'availability_zones', + 'created_at', + 'description', + 'distributed', + 'enable_ndp_proxy', + 'external_gateway_info', + 'flavor_id', + 'ha', + 'id', + 'name', + 'project_id', + 'revision_number', + 'routes', + 'status', + 'tags', + 'updated_at', + ) + + def setUp(self): + super().setUp() + self._second_network = network_fakes.create_one_network() + self._networks.append(self._second_network) + + self.network_client.create_router.return_value = self._router + + self.network_client.update_router.return_value = None + self.network_client.update_external_gateways.return_value = None + + self._data = ( + router.AdminStateColumn(self._router.is_admin_state_up), + format_columns.ListColumn(self._router.availability_zone_hints), + format_columns.ListColumn(self._router.availability_zones), + self._router.created_at, + self._router.description, + self._router.is_distributed, + self._router.enable_ndp_proxy, + router.RouterInfoColumn(self._router.external_gateway_info), + self._router.flavor_id, + self._router.is_ha, + self._router.id, + self._router.name, + self._router.project_id, + self._router.revision_number, + router.RoutesColumn(self._router.routes), + self._router.status, + format_columns.ListColumn(self._router.tags), + self._router.updated_at, + ) + self.cmd = router.CreateRouter(self.app, None) + + def test_create_one_gateway(self): + arglist = [ + "--external-gateway", + self._network.id, + self._router.name, + ] + verifylist = [ + ('name', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.network_client.update_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + } + ] + } + }, + ) + self.assertEqual(self._columns, columns) + self.assertCountEqual(self._data, data) + + def test_create_multiple_gateways(self): + arglist = [ + self._router.name, + "--external-gateway", + self._network.id, + "--external-gateway", + self._network.id, + "--external-gateway", + self._second_network.id, + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.1', + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.2', + ] + verifylist = [ + ('name', self._router.name), + ( + 'external_gateways', + [self._network.id, self._network.id, self._second_network.id], + ), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + # The router will not have a gateway after the create call, but it + # will be added after the update call. + self.network_client.create_router.assert_called_once_with( + **{ + 'admin_state_up': True, + 'name': self._router.name, + } + ) + self.network_client.update_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.1', + } + ], + }, + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.2', + } + ], + }, + { + 'network_id': self._second_network.id, + }, + ] + } + }, + ) + self.assertEqual(self._columns, columns) + self.assertCountEqual(self._data, data) + + +class TestUpdateMultipleGateways(TestGatewayOps): + def setUp(self): + super().setUp() + self._second_network = network_fakes.create_one_network() + self._networks.append(self._second_network) + + self.network_client.update_router.return_value = None + self.network_client.update_external_gateways.return_value = None + + self.cmd = router.SetRouter(self.app, None) + + def test_update_one_gateway(self): + arglist = [ + "--external-gateway", + self._network.id, + "--no-qos-policy", + self._router.name, + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ('no_qos_policy', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.update_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + {'network_id': self._network.id, 'qos_policy_id': None} + ] + } + }, + ) + self.assertIsNone(result) + + def test_update_multiple_gateways(self): + arglist = [ + self._router.name, + "--external-gateway", + self._network.id, + "--external-gateway", + self._network.id, + "--external-gateway", + self._second_network.id, + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.1', + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.2', + "--no-qos-policy", + ] + verifylist = [ + ('router', self._router.name), + ( + 'external_gateways', + [self._network.id, self._network.id, self._second_network.id], + ), + ('no_qos_policy', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.update_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.1', + } + ], + 'qos_policy_id': None, + }, + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.2', + } + ], + 'qos_policy_id': None, + }, + { + 'network_id': self._second_network.id, + 'qos_policy_id': None, + }, + ] + } + }, + ) + self.assertIsNone(result) + + +class TestAddGatewayRouter(TestGatewayOps): + def setUp(self): + super().setUp() + # Get the command object to test + self.cmd = router.AddGatewayToRouter(self.app, None) + + self.network_client.add_external_gateways.return_value = self._router + + def test_add_gateway_network_only(self): + arglist = [ + self._router.name, + self._network.id, + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.add_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [{'network_id': self._network.id}] + } + }, + ) + self.assertEqual(result[1][result[0].index('id')], self._router.id) + + def test_add_gateway_network_fixed_ip(self): + arglist = [ + self._router.name, + self._network.id, + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.1', + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.add_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.1', + } + ], + } + ] + } + }, + ) + self.assertEqual(result[1][result[0].index('id')], self._router.id) + + def test_add_gateway_network_multiple_fixed_ips(self): + arglist = [ + self._router.name, + self._network.id, + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.1', + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.2', + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ( + 'fixed_ips', + [ + {'ip-address': '10.0.1.1', 'subnet': self._subnet.id}, + {'ip-address': '10.0.1.2', 'subnet': self._subnet.id}, + ], + ), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.add_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.1', + }, + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.2', + }, + ], + } + ] + } + }, + ) + self.assertEqual(result[1][result[0].index('id')], self._router.id) + + def test_add_gateway_network_only_no_extension(self): + self._extensions = {} + arglist = [ + self._router.name, + self._network.id, + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + +class TestRemoveGatewayRouter(TestGatewayOps): + def setUp(self): + super().setUp() + # Get the command object to test + self.cmd = router.RemoveGatewayFromRouter(self.app, None) + + self.network_client.remove_external_gateways.return_value = ( + self._router + ) + + def test_remove_gateway_network_only(self): + arglist = [ + self._router.name, + self._network.id, + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.remove_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [{'network_id': self._network.id}] + } + }, + ) + self.assertEqual(result[1][result[0].index('id')], self._router.id) + + def test_remove_gateway_network_fixed_ip(self): + arglist = [ + self._router.name, + self._network.id, + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.1', + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.remove_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.1', + } + ], + } + ] + } + }, + ) + self.assertEqual(result[1][result[0].index('id')], self._router.id) + + def test_remove_gateway_network_multiple_fixed_ips(self): + arglist = [ + self._router.name, + self._network.id, + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.1', + '--fixed-ip', + f'subnet={self._subnet.id},ip-address=10.0.1.2', + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ( + 'fixed_ips', + [ + {'ip-address': '10.0.1.1', 'subnet': self._subnet.id}, + {'ip-address': '10.0.1.2', 'subnet': self._subnet.id}, + ], + ), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + self.network_client.remove_external_gateways.assert_called_with( + self._router, + body={ + 'router': { + 'external_gateways': [ + { + 'network_id': self._network.id, + 'external_fixed_ips': [ + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.1', + }, + { + 'subnet_id': self._subnet.id, + 'ip_address': '10.0.1.2', + }, + ], + } + ] + } + }, + ) + self.assertEqual(result[1][result[0].index('id')], self._router.id) + + def test_remove_gateway_network_only_no_extension(self): + self._extensions = {} + arglist = [ + self._router.name, + self._network.id, + ] + verifylist = [ + ('router', self._router.name), + ('external_gateways', [self._network.id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) diff --git a/openstackclient/tests/unit/network/v2/test_security_group_compute.py b/openstackclient/tests/unit/network/v2/test_security_group_compute.py index 4f1ddce590..e23bb24977 100644 --- a/openstackclient/tests/unit/network/v2/test_security_group_compute.py +++ b/openstackclient/tests/unit/network/v2/test_security_group_compute.py @@ -12,36 +12,23 @@ # from unittest import mock -from unittest.mock import call from osc_lib import exceptions +from openstackclient.api import compute_v2 from openstackclient.network.v2 import security_group from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit import utils as tests_utils -class TestSecurityGroupCompute(compute_fakes.TestComputev2): - - def setUp(self): - super(TestSecurityGroupCompute, self).setUp() - - # Get a shortcut to the compute client - self.compute = self.app.client_manager.compute - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_create' -) -class TestCreateSecurityGroupCompute(TestSecurityGroupCompute): - +@mock.patch.object(compute_v2, 'create_security_group') +class TestCreateSecurityGroupCompute(compute_fakes.TestComputev2): project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() # The security group to be shown. - _security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group() + _security_group = compute_fakes.create_one_security_group() columns = ( 'description', @@ -60,7 +47,7 @@ class TestCreateSecurityGroupCompute(TestSecurityGroupCompute): ) def setUp(self): - super(TestCreateSecurityGroupCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False @@ -68,8 +55,9 @@ def setUp(self): self.cmd = security_group.CreateSecurityGroup(self.app, None) def test_security_group_create_no_options(self, sg_mock): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_security_group_create_min_options(self, sg_mock): sg_mock.return_value = self._security_group @@ -84,6 +72,7 @@ def test_security_group_create_min_options(self, sg_mock): columns, data = self.cmd.take_action(parsed_args) sg_mock.assert_called_once_with( + self.compute_client, self._security_group['name'], self._security_group['name'], ) @@ -93,7 +82,8 @@ def test_security_group_create_min_options(self, sg_mock): def test_security_group_create_all_options(self, sg_mock): sg_mock.return_value = self._security_group arglist = [ - '--description', self._security_group['description'], + '--description', + self._security_group['description'], self._security_group['name'], ] verifylist = [ @@ -105,6 +95,7 @@ def test_security_group_create_all_options(self, sg_mock): columns, data = self.cmd.take_action(parsed_args) sg_mock.assert_called_once_with( + self.compute_client, self._security_group['name'], self._security_group['description'], ) @@ -112,23 +103,18 @@ def test_security_group_create_all_options(self, sg_mock): self.assertCountEqual(self.data, data) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_delete' -) -class TestDeleteSecurityGroupCompute(TestSecurityGroupCompute): - +@mock.patch.object(compute_v2, 'delete_security_group') +class TestDeleteSecurityGroupCompute(compute_fakes.TestComputev2): # The security groups to be deleted. - _security_groups = \ - compute_fakes.FakeSecurityGroup.create_security_groups() + _security_groups = compute_fakes.create_security_groups() def setUp(self): - super(TestDeleteSecurityGroupCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - self.compute.api.security_group_find = ( - compute_fakes.FakeSecurityGroup.get_security_groups( - self._security_groups) + compute_v2.find_security_group = mock.Mock( + side_effect=self._security_groups ) # Get the command object to test @@ -147,64 +133,65 @@ def test_security_group_delete(self, sg_mock): result = self.cmd.take_action(parsed_args) sg_mock.assert_called_once_with( + self.compute_client, self._security_groups[0]['id'], ) self.assertIsNone(result) def test_security_group_multi_delete(self, sg_mock): sg_mock.return_value = mock.Mock(return_value=None) - arglist = [] - verifylist = [] - - for s in self._security_groups: - arglist.append(s['id']) + arglist = [ + self._security_groups[0]['id'], + self._security_groups[1]['id'], + ] verifylist = [ ('group', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [] - for s in self._security_groups: - calls.append(call(s['id'])) - sg_mock.assert_has_calls(calls) + sg_mock.assert_has_calls( + [ + mock.call(self.compute_client, self._security_groups[0]['id']), + mock.call(self.compute_client, self._security_groups[1]['id']), + ] + ) self.assertIsNone(result) def test_security_group_multi_delete_with_exception(self, sg_mock): sg_mock.return_value = mock.Mock(return_value=None) - sg_mock.side_effect = ([ - mock.Mock(return_value=None), - exceptions.CommandError, - ]) + compute_v2.find_security_group.side_effect = [ + self._security_groups[0], + exceptions.NotFound('foo'), + ] arglist = [ self._security_groups[0]['id'], 'unexist_security_group', ] verifylist = [ - ('group', - [self._security_groups[0]['id'], 'unexist_security_group']), + ('group', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 groups failed to delete.', str(e)) - sg_mock.assert_any_call(self._security_groups[0]['id']) - sg_mock.assert_any_call('unexist_security_group') + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 2 groups failed to delete.', str(exc)) + sg_mock.assert_has_calls( + [ + mock.call(self.compute_client, self._security_groups[0]['id']), + ] + ) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_list' -) -class TestListSecurityGroupCompute(TestSecurityGroupCompute): +@mock.patch.object(compute_v2, 'list_security_groups') +class TestListSecurityGroupCompute(compute_fakes.TestComputev2): # The security group to be listed. - _security_groups = \ - compute_fakes.FakeSecurityGroup.create_security_groups(count=3) + _security_groups = compute_fakes.create_security_groups(count=3) columns = ( 'ID', @@ -220,22 +207,26 @@ class TestListSecurityGroupCompute(TestSecurityGroupCompute): data = [] for grp in _security_groups: - data.append(( - grp['id'], - grp['name'], - grp['description'], - )) + data.append( + ( + grp['id'], + grp['name'], + grp['description'], + ) + ) data_all_projects = [] for grp in _security_groups: - data_all_projects.append(( - grp['id'], - grp['name'], - grp['description'], - grp['tenant_id'], - )) + data_all_projects.append( + ( + grp['id'], + grp['name'], + grp['description'], + grp['tenant_id'], + ) + ) def setUp(self): - super(TestListSecurityGroupCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False @@ -252,8 +243,9 @@ def test_security_group_list_no_options(self, sg_mock): columns, data = self.cmd.take_action(parsed_args) - kwargs = {'search_opts': {'all_tenants': False}} - sg_mock.assert_called_once_with(**kwargs) + sg_mock.assert_called_once_with( + self.compute_client, all_projects=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -269,35 +261,32 @@ def test_security_group_list_all_projects(self, sg_mock): columns, data = self.cmd.take_action(parsed_args) - kwargs = {'search_opts': {'all_tenants': True}} - sg_mock.assert_called_once_with(**kwargs) + sg_mock.assert_called_once_with(self.compute_client, all_projects=True) self.assertEqual(self.columns_all_projects, columns) self.assertCountEqual(self.data_all_projects, list(data)) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_set' -) -class TestSetSecurityGroupCompute(TestSecurityGroupCompute): - +@mock.patch.object(compute_v2, 'update_security_group') +class TestSetSecurityGroupCompute(compute_fakes.TestComputev2): # The security group to be set. - _security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group() + _security_group = compute_fakes.create_one_security_group() def setUp(self): - super(TestSetSecurityGroupCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - self.compute.api.security_group_find = mock.Mock( - return_value=self._security_group) + compute_v2.find_security_group = mock.Mock( + return_value=self._security_group + ) # Get the command object to test self.cmd = security_group.SetSecurityGroup(self.app, None) def test_security_group_set_no_options(self, sg_mock): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_security_group_set_no_updates(self, sg_mock): sg_mock.return_value = mock.Mock(return_value=None) @@ -312,9 +301,7 @@ def test_security_group_set_no_updates(self, sg_mock): result = self.cmd.take_action(parsed_args) sg_mock.assert_called_once_with( - self._security_group, - self._security_group['name'], - self._security_group['description'], + self.compute_client, self._security_group['id'] ) self.assertIsNone(result) @@ -323,8 +310,10 @@ def test_security_group_set_all_options(self, sg_mock): new_name = 'new-' + self._security_group['name'] new_description = 'new-' + self._security_group['description'] arglist = [ - '--name', new_name, - '--description', new_description, + '--name', + new_name, + '--description', + new_description, self._security_group['name'], ] verifylist = [ @@ -337,27 +326,23 @@ def test_security_group_set_all_options(self, sg_mock): result = self.cmd.take_action(parsed_args) sg_mock.assert_called_once_with( - self._security_group, - new_name, - new_description + self.compute_client, + self._security_group['id'], + name=new_name, + description=new_description, ) self.assertIsNone(result) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_find' -) -class TestShowSecurityGroupCompute(TestSecurityGroupCompute): - +@mock.patch.object(compute_v2, 'find_security_group') +class TestShowSecurityGroupCompute(compute_fakes.TestComputev2): # The security group rule to be shown with the group. - _security_group_rule = \ - compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule() + _security_group_rule = compute_fakes.create_one_security_group_rule() # The security group to be shown. - _security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group( - attrs={'rules': [_security_group_rule]} - ) + _security_group = compute_fakes.create_one_security_group( + attrs={'rules': [_security_group_rule]} + ) columns = ( 'description', @@ -376,7 +361,7 @@ class TestShowSecurityGroupCompute(TestSecurityGroupCompute): ) def setUp(self): - super(TestShowSecurityGroupCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False @@ -384,8 +369,9 @@ def setUp(self): self.cmd = security_group.ShowSecurityGroup(self.app, None) def test_security_group_show_no_options(self, sg_mock): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_security_group_show_all_options(self, sg_mock): sg_mock.return_value = self._security_group @@ -399,6 +385,8 @@ def test_security_group_show_all_options(self, sg_mock): columns, data = self.cmd.take_action(parsed_args) - sg_mock.assert_called_once_with(self._security_group['id']) + sg_mock.assert_called_once_with( + self.compute_client, self._security_group['id'] + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_security_group_network.py b/openstackclient/tests/unit/network/v2/test_security_group_network.py index 95262bf1ed..c5ccf628ee 100644 --- a/openstackclient/tests/unit/network/v2/test_security_group_network.py +++ b/openstackclient/tests/unit/network/v2/test_security_group_network.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions @@ -23,62 +22,67 @@ class TestSecurityGroupNetwork(network_fakes.TestNetworkV2): - def setUp(self): - super(TestSecurityGroupNetwork, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateSecurityGroupNetwork(TestSecurityGroupNetwork): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() # The security group to be created. - _security_group = ( - network_fakes.FakeSecurityGroup.create_one_security_group()) + _security_group = network_fakes.create_one_security_group() columns = ( + 'created_at', 'description', 'id', + 'is_shared', 'name', 'project_id', + 'revision_number', 'rules', 'stateful', 'tags', + 'updated_at', ) data = ( + _security_group.created_at, _security_group.description, _security_group.id, + _security_group.is_shared, _security_group.name, _security_group.project_id, + _security_group.revision_number, security_group.NetworkSecurityGroupRulesColumn([]), _security_group.stateful, _security_group.tags, + _security_group.updated_at, ) def setUp(self): - super(TestCreateSecurityGroupNetwork, self).setUp() + super().setUp() - self.network.create_security_group = mock.Mock( - return_value=self._security_group) + self.network_client.create_security_group.return_value = ( + self._security_group + ) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = security_group.CreateSecurityGroup(self.app, self.namespace) + self.cmd = security_group.CreateSecurityGroup(self.app, None) def test_create_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_create_min_options(self): arglist = [ @@ -91,18 +95,23 @@ def test_create_min_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group.assert_called_once_with(**{ - 'description': self._security_group.name, - 'name': self._security_group.name, - }) + self.network_client.create_security_group.assert_called_once_with( + **{ + 'description': self._security_group.name, + 'name': self._security_group.name, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_all_options(self): arglist = [ - '--description', self._security_group.description, - '--project', self.project.name, - '--project-domain', self.domain.name, + '--description', + self._security_group.description, + '--project', + self.project.name, + '--project-domain', + self.domain.name, '--stateful', self._security_group.name, ] @@ -117,12 +126,14 @@ def test_create_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group.assert_called_once_with(**{ - 'description': self._security_group.description, - 'stateful': self._security_group.stateful, - 'name': self._security_group.name, - 'project_id': self.project.id, - }) + self.network_client.create_security_group.assert_called_once_with( + **{ + 'description': self._security_group.description, + 'stateful': self._security_group.stateful, + 'name': self._security_group.name, + 'project_id': self.project.id, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -142,20 +153,22 @@ def _test_create_with_tag(self, add_tags=True): verifylist.append(('no_tag', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group.assert_called_once_with(**{ - 'description': self._security_group.name, - 'name': self._security_group.name, - }) + self.network_client.create_security_group.assert_called_once_with( + **{ + 'description': self._security_group.name, + 'name': self._security_group.name, + } + ) if add_tags: - self.network.set_tags.assert_called_once_with( - self._security_group, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self._security_group, tests_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) + self.assertEqual(self.data, data) def test_create_with_tags(self): self._test_create_with_tag(add_tags=True) @@ -165,23 +178,20 @@ def test_create_with_no_tag(self): class TestDeleteSecurityGroupNetwork(TestSecurityGroupNetwork): - # The security groups to be deleted. - _security_groups = \ - network_fakes.FakeSecurityGroup.create_security_groups() + _security_groups = network_fakes.create_security_groups() def setUp(self): - super(TestDeleteSecurityGroupNetwork, self).setUp() + super().setUp() - self.network.delete_security_group = mock.Mock(return_value=None) + self.network_client.delete_security_group.return_value = None - self.network.find_security_group = ( - network_fakes.FakeSecurityGroup.get_security_groups( - self._security_groups) + self.network_client.find_security_group = ( + network_fakes.get_security_groups(self._security_groups) ) # Get the command object to test - self.cmd = security_group.DeleteSecurityGroup(self.app, self.namespace) + self.cmd = security_group.DeleteSecurityGroup(self.app, None) def test_security_group_delete(self): arglist = [ @@ -194,8 +204,9 @@ def test_security_group_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_security_group.assert_called_once_with( - self._security_groups[0]) + self.network_client.delete_security_group.assert_called_once_with( + self._security_groups[0] + ) self.assertIsNone(result) def test_multi_security_groups_delete(self): @@ -214,7 +225,7 @@ def test_multi_security_groups_delete(self): calls = [] for s in self._security_groups: calls.append(call(s)) - self.network.delete_security_group.assert_has_calls(calls) + self.network_client.delete_security_group.assert_has_calls(calls) self.assertIsNone(result) def test_multi_security_groups_delete_with_exception(self): @@ -223,15 +234,15 @@ def test_multi_security_groups_delete_with_exception(self): 'unexist_security_group', ] verifylist = [ - ('group', - [self._security_groups[0].name, 'unexist_security_group']), + ( + 'group', + [self._security_groups[0].name, 'unexist_security_group'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._security_groups[0], exceptions.CommandError] - self.network.find_security_group = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_security_group.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -239,20 +250,20 @@ def test_multi_security_groups_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 groups failed to delete.', str(e)) - self.network.find_security_group.assert_any_call( - self._security_groups[0].name, ignore_missing=False) - self.network.find_security_group.assert_any_call( - 'unexist_security_group', ignore_missing=False) - self.network.delete_security_group.assert_called_once_with( + self.network_client.find_security_group.assert_any_call( + self._security_groups[0].name, ignore_missing=False + ) + self.network_client.find_security_group.assert_any_call( + 'unexist_security_group', ignore_missing=False + ) + self.network_client.delete_security_group.assert_called_once_with( self._security_groups[0] ) class TestListSecurityGroupNetwork(TestSecurityGroupNetwork): - # The security group to be listed. - _security_groups = \ - network_fakes.FakeSecurityGroup.create_security_groups(count=3) + _security_groups = network_fakes.create_security_groups(count=3) columns = ( 'ID', @@ -260,26 +271,31 @@ class TestListSecurityGroupNetwork(TestSecurityGroupNetwork): 'Description', 'Project', 'Tags', + 'Shared', ) data = [] for grp in _security_groups: - data.append(( - grp.id, - grp.name, - grp.description, - grp.project_id, - grp.tags, - )) + data.append( + ( + grp.id, + grp.name, + grp.description, + grp.project_id, + grp.tags, + grp.is_shared, + ) + ) def setUp(self): - super(TestListSecurityGroupNetwork, self).setUp() + super().setUp() - self.network.security_groups = mock.Mock( - return_value=self._security_groups) + self.network_client.security_groups.return_value = ( + self._security_groups + ) # Get the command object to test - self.cmd = security_group.ListSecurityGroup(self.app, self.namespace) + self.cmd = security_group.ListSecurityGroup(self.app, None) def test_security_group_list_no_options(self): arglist = [] @@ -290,8 +306,9 @@ def test_security_group_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_groups.assert_called_once_with( - fields=security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE) + self.network_client.security_groups.assert_called_once_with( + fields=security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -306,8 +323,9 @@ def test_security_group_list_all_projects(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_groups.assert_called_once_with( - fields=security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE) + self.network_client.security_groups.assert_called_once_with( + fields=security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -315,7 +333,8 @@ def test_security_group_list_project(self): project = identity_fakes.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -325,9 +344,10 @@ def test_security_group_list_project(self): columns, data = self.cmd.take_action(parsed_args) filters = { 'project_id': project.id, - 'fields': security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE} + 'fields': security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE, + } - self.network.security_groups.assert_called_once_with(**filters) + self.network_client.security_groups.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -335,8 +355,10 @@ def test_security_group_list_project_domain(self): project = identity_fakes.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -347,18 +369,23 @@ def test_security_group_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = { 'project_id': project.id, - 'fields': security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE} + 'fields': security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE, + } - self.network.security_groups.assert_called_once_with(**filters) + self.network_client.security_groups.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -369,39 +396,43 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.security_groups.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white', - 'fields': security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE} + self.network_client.security_groups.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + 'fields': security_group.ListSecurityGroup.FIELDS_TO_RETRIEVE, + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestSetSecurityGroupNetwork(TestSecurityGroupNetwork): - # The security group to be set. - _security_group = ( - network_fakes.FakeSecurityGroup.create_one_security_group( - attrs={'tags': ['green', 'red']})) + _security_group = network_fakes.create_one_security_group( + attrs={'tags': ['green', 'red']} + ) def setUp(self): - super(TestSetSecurityGroupNetwork, self).setUp() + super().setUp() + + self.network_client.update_security_group.return_value = None - self.network.update_security_group = mock.Mock(return_value=None) + self.network_client.find_security_group.return_value = ( + self._security_group + ) - self.network.find_security_group = mock.Mock( - return_value=self._security_group) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = security_group.SetSecurityGroup(self.app, self.namespace) + self.cmd = security_group.SetSecurityGroup(self.app, None) def test_set_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_set_no_updates(self): arglist = [ @@ -414,9 +445,8 @@ def test_set_no_updates(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_security_group.assert_called_once_with( - self._security_group, - **{} + self.network_client.update_security_group.assert_called_once_with( + self._security_group, **{} ) self.assertIsNone(result) @@ -424,8 +454,10 @@ def test_set_all_options(self): new_name = 'new-' + self._security_group.name new_description = 'new-' + self._security_group.description arglist = [ - '--name', new_name, - '--description', new_description, + '--name', + new_name, + '--description', + new_description, '--stateful', self._security_group.name, ] @@ -444,9 +476,8 @@ def test_set_all_options(self): 'name': new_name, 'stateful': True, } - self.network.update_security_group.assert_called_once_with( - self._security_group, - **attrs + self.network_client.update_security_group.assert_called_once_with( + self._security_group, **attrs ) self.assertIsNone(result) @@ -460,16 +491,15 @@ def _test_set_tags(self, with_tags=True): verifylist = [('no_tag', True)] expected_args = [] arglist.append(self._security_group.name) - verifylist.append( - ('group', self._security_group.name)) + verifylist.append(('group', self._security_group.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertTrue(self.network.update_security_group.called) - self.network.set_tags.assert_called_once_with( - self._security_group, - tests_utils.CompareBySet(expected_args)) + self.assertTrue(self.network_client.update_security_group.called) + self.network_client.set_tags.assert_called_once_with( + self._security_group, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -480,50 +510,58 @@ def test_set_with_no_tag(self): class TestShowSecurityGroupNetwork(TestSecurityGroupNetwork): - # The security group rule to be shown with the group. - _security_group_rule = \ - network_fakes.FakeSecurityGroupRule.create_one_security_group_rule() + _security_group_rule = network_fakes.create_one_security_group_rule() # The security group to be shown. - _security_group = \ - network_fakes.FakeSecurityGroup.create_one_security_group( - attrs={'security_group_rules': [_security_group_rule._info]} - ) + _security_group = network_fakes.create_one_security_group( + attrs={'security_group_rules': [dict(_security_group_rule)]} + ) columns = ( + 'created_at', 'description', 'id', + 'is_shared', 'name', 'project_id', + 'revision_number', 'rules', 'stateful', 'tags', + 'updated_at', ) data = ( + _security_group.created_at, _security_group.description, _security_group.id, + _security_group.is_shared, _security_group.name, _security_group.project_id, + _security_group.revision_number, security_group.NetworkSecurityGroupRulesColumn( - [_security_group_rule._info]), + [dict(_security_group_rule)] + ), _security_group.stateful, _security_group.tags, + _security_group.updated_at, ) def setUp(self): - super(TestShowSecurityGroupNetwork, self).setUp() + super().setUp() - self.network.find_security_group = mock.Mock( - return_value=self._security_group) + self.network_client.find_security_group.return_value = ( + self._security_group + ) # Get the command object to test - self.cmd = security_group.ShowSecurityGroup(self.app, self.namespace) + self.cmd = security_group.ShowSecurityGroup(self.app, None) def test_show_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_show_all_options(self): arglist = [ @@ -536,34 +574,37 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_security_group.assert_called_once_with( - self._security_group.id, ignore_missing=False) + self.network_client.find_security_group.assert_called_once_with( + self._security_group.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestUnsetSecurityGroupNetwork(TestSecurityGroupNetwork): - # The security group to be unset. - _security_group = ( - network_fakes.FakeSecurityGroup.create_one_security_group( - attrs={'tags': ['green', 'red']})) + _security_group = network_fakes.create_one_security_group( + attrs={'tags': ['green', 'red']} + ) def setUp(self): - super(TestUnsetSecurityGroupNetwork, self).setUp() + super().setUp() + + self.network_client.update_security_group.return_value = None - self.network.update_security_group = mock.Mock(return_value=None) + self.network_client.find_security_group.return_value = ( + self._security_group + ) - self.network.find_security_group = mock.Mock( - return_value=self._security_group) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = security_group.UnsetSecurityGroup(self.app, self.namespace) + self.cmd = security_group.UnsetSecurityGroup(self.app, None) def test_set_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_set_no_updates(self): arglist = [ @@ -576,8 +617,8 @@ def test_set_no_updates(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_security_group.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_security_group.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def _test_unset_tags(self, with_tags=True): @@ -590,16 +631,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self._security_group.name) - verifylist.append( - ('group', self._security_group.name)) + verifylist.append(('group', self._security_group.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_security_group.called) - self.network.set_tags.assert_called_once_with( - self._security_group, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_security_group.called) + self.network_client.set_tags.assert_called_once_with( + self._security_group, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): diff --git a/openstackclient/tests/unit/network/v2/test_security_group_rule_compute.py b/openstackclient/tests/unit/network/v2/test_security_group_rule_compute.py index b7e38afb18..9cab52e392 100644 --- a/openstackclient/tests/unit/network/v2/test_security_group_rule_compute.py +++ b/openstackclient/tests/unit/network/v2/test_security_group_rule_compute.py @@ -9,13 +9,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# from unittest import mock -from unittest.mock import call from osc_lib import exceptions +from openstackclient.api import compute_v2 from openstackclient.network import utils as network_utils from openstackclient.network.v2 import security_group_rule from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes @@ -23,20 +22,8 @@ from openstackclient.tests.unit import utils as tests_utils -class TestSecurityGroupRuleCompute(compute_fakes.TestComputev2): - - def setUp(self): - super(TestSecurityGroupRuleCompute, self).setUp() - - # Get a shortcut to the network client - self.compute = self.app.client_manager.compute - - -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_rule_create' -) -class TestCreateSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): - +@mock.patch.object(compute_v2, 'create_security_group_rule') +class TestCreateSecurityGroupRuleCompute(compute_fakes.TestComputev2): project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() @@ -44,24 +31,26 @@ class TestCreateSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): _security_group_rule = None # The security group that will contain the rule created. - _security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group() + _security_group = compute_fakes.create_one_security_group() def _setup_security_group_rule(self, attrs=None): - self._security_group_rule = \ - compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule( - attrs) - expected_columns, expected_data = \ - security_group_rule._format_security_group_rule_show( - self._security_group_rule) + self._security_group_rule = ( + compute_fakes.create_one_security_group_rule(attrs) + ) + ( + expected_columns, + expected_data, + ) = network_utils.format_security_group_rule_show( + self._security_group_rule + ) return expected_columns, expected_data def setUp(self): - super(TestCreateSecurityGroupRuleCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - self.compute.api.security_group_find = mock.Mock( + compute_v2.find_security_group = mock.Mock( return_value=self._security_group, ) @@ -69,69 +58,108 @@ def setUp(self): self.cmd = security_group_rule.CreateSecurityGroupRule(self.app, None) def test_security_group_rule_create_no_options(self, sgr_mock): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_security_group_rule_create_all_remote_options(self, sgr_mock): arglist = [ - '--remote-ip', '10.10.0.0/24', - '--remote-group', self._security_group['id'], + '--remote-ip', + '10.10.0.0/24', + '--remote-group', + self._security_group['id'], self._security_group['id'], ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_security_group_rule_create_bad_protocol(self, sgr_mock): arglist = [ - '--protocol', 'foo', + '--protocol', + 'foo', self._security_group['id'], ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_security_group_rule_create_all_protocol_options(self, sgr_mock): arglist = [ - '--protocol', 'tcp', - '--proto', 'tcp', + '--protocol', + 'tcp', + '--proto', + 'tcp', self._security_group['id'], ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_security_group_rule_create_network_options(self, sgr_mock): arglist = [ '--ingress', - '--ethertype', 'IPv4', - '--icmp-type', '3', - '--icmp-code', '11', - '--project', self.project.name, - '--project-domain', self.domain.name, + '--ethertype', + 'IPv4', + '--icmp-type', + '3', + '--icmp-code', + '11', + '--project', + self.project.name, + '--project-domain', + self.domain.name, self._security_group['id'], ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_security_group_rule_create_default_rule(self, sgr_mock): expected_columns, expected_data = self._setup_security_group_rule() sgr_mock.return_value = self._security_group_rule - dst_port = str(self._security_group_rule['from_port']) + ':' + \ - str(self._security_group_rule['to_port']) + dst_port = ( + str(self._security_group_rule['from_port']) + + ':' + + str(self._security_group_rule['to_port']) + ) arglist = [ - '--dst-port', dst_port, + '--dst-port', + dst_port, self._security_group['id'], ] verifylist = [ - ('dst_port', (self._security_group_rule['from_port'], - self._security_group_rule['to_port'])), + ( + 'dst_port', + ( + self._security_group_rule['from_port'], + self._security_group_rule['to_port'], + ), + ), ('group', self._security_group['id']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - # TODO(dtroyer): save this for the security group rule changes - # self.compute.api.security_group_rule_create.assert_called_once_with( sgr_mock.assert_called_once_with( + self.compute_client, security_group_id=self._security_group['id'], ip_protocol=self._security_group_rule['ip_protocol'], from_port=self._security_group_rule['from_port'], @@ -143,20 +171,29 @@ def test_security_group_rule_create_default_rule(self, sgr_mock): self.assertEqual(expected_data, data) def test_security_group_rule_create_remote_group(self, sgr_mock): - expected_columns, expected_data = self._setup_security_group_rule({ - 'from_port': 22, - 'to_port': 22, - 'group': {'name': self._security_group['name']}, - }) + expected_columns, expected_data = self._setup_security_group_rule( + { + 'from_port': 22, + 'to_port': 22, + 'group': {'name': self._security_group['name']}, + } + ) sgr_mock.return_value = self._security_group_rule arglist = [ - '--dst-port', str(self._security_group_rule['from_port']), - '--remote-group', self._security_group['name'], + '--dst-port', + str(self._security_group_rule['from_port']), + '--remote-group', + self._security_group['name'], self._security_group['id'], ] verifylist = [ - ('dst_port', (self._security_group_rule['from_port'], - self._security_group_rule['to_port'])), + ( + 'dst_port', + ( + self._security_group_rule['from_port'], + self._security_group_rule['to_port'], + ), + ), ('remote_group', self._security_group['name']), ('group', self._security_group['id']), ] @@ -164,9 +201,8 @@ def test_security_group_rule_create_remote_group(self, sgr_mock): columns, data = self.cmd.take_action(parsed_args) - # TODO(dtroyer): save this for the security group rule changes - # self.compute.api.security_group_rule_create.assert_called_once_with( sgr_mock.assert_called_once_with( + self.compute_client, security_group_id=self._security_group['id'], ip_protocol=self._security_group_rule['ip_protocol'], from_port=self._security_group_rule['from_port'], @@ -178,16 +214,20 @@ def test_security_group_rule_create_remote_group(self, sgr_mock): self.assertEqual(expected_data, data) def test_security_group_rule_create_remote_ip(self, sgr_mock): - expected_columns, expected_data = self._setup_security_group_rule({ - 'ip_protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'ip_range': {'cidr': '10.0.2.0/24'}, - }) + expected_columns, expected_data = self._setup_security_group_rule( + { + 'ip_protocol': 'icmp', + 'from_port': -1, + 'to_port': -1, + 'ip_range': {'cidr': '10.0.2.0/24'}, + } + ) sgr_mock.return_value = self._security_group_rule arglist = [ - '--protocol', self._security_group_rule['ip_protocol'], - '--remote-ip', self._security_group_rule['ip_range']['cidr'], + '--protocol', + self._security_group_rule['ip_protocol'], + '--remote-ip', + self._security_group_rule['ip_range']['cidr'], self._security_group['id'], ] verifylist = [ @@ -199,9 +239,8 @@ def test_security_group_rule_create_remote_ip(self, sgr_mock): columns, data = self.cmd.take_action(parsed_args) - # TODO(dtroyer): save this for the security group rule changes - # self.compute.api.security_group_rule_create.assert_called_once_with( sgr_mock.assert_called_once_with( + self.compute_client, security_group_id=self._security_group['id'], ip_protocol=self._security_group_rule['ip_protocol'], from_port=self._security_group_rule['from_port'], @@ -213,16 +252,20 @@ def test_security_group_rule_create_remote_ip(self, sgr_mock): self.assertEqual(expected_data, data) def test_security_group_rule_create_proto_option(self, sgr_mock): - expected_columns, expected_data = self._setup_security_group_rule({ - 'ip_protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'ip_range': {'cidr': '10.0.2.0/24'}, - }) + expected_columns, expected_data = self._setup_security_group_rule( + { + 'ip_protocol': 'icmp', + 'from_port': -1, + 'to_port': -1, + 'ip_range': {'cidr': '10.0.2.0/24'}, + } + ) sgr_mock.return_value = self._security_group_rule arglist = [ - '--proto', self._security_group_rule['ip_protocol'], - '--remote-ip', self._security_group_rule['ip_range']['cidr'], + '--proto', + self._security_group_rule['ip_protocol'], + '--remote-ip', + self._security_group_rule['ip_range']['cidr'], self._security_group['id'], ] verifylist = [ @@ -235,9 +278,8 @@ def test_security_group_rule_create_proto_option(self, sgr_mock): columns, data = self.cmd.take_action(parsed_args) - # TODO(dtroyer): save this for the security group rule changes - # self.compute.api.security_group_rule_create.assert_called_once_with( sgr_mock.assert_called_once_with( + self.compute_client, security_group_id=self._security_group['id'], ip_protocol=self._security_group_rule['ip_protocol'], from_port=self._security_group_rule['from_port'], @@ -249,18 +291,13 @@ def test_security_group_rule_create_proto_option(self, sgr_mock): self.assertEqual(expected_data, data) -@mock.patch( - 'openstackclient.api.compute_v2.APIv2.security_group_rule_delete' -) -class TestDeleteSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): - +@mock.patch.object(compute_v2, 'delete_security_group_rule') +class TestDeleteSecurityGroupRuleCompute(compute_fakes.TestComputev2): # The security group rule to be deleted. - _security_group_rules = \ - compute_fakes.FakeSecurityGroupRule.create_security_group_rules( - count=2) + _security_group_rules = compute_fakes.create_security_group_rules(count=2) def setUp(self): - super(TestDeleteSecurityGroupRuleCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False @@ -279,26 +316,34 @@ def test_security_group_rule_delete(self, sgr_mock): result = self.cmd.take_action(parsed_args) sgr_mock.assert_called_once_with( - self._security_group_rules[0]['id']) + self.compute_client, self._security_group_rules[0]['id'] + ) self.assertIsNone(result) def test_security_group_rule_delete_multi(self, sgr_mock): - arglist = [] - verifylist = [] - - for s in self._security_group_rules: - arglist.append(s['id']) + arglist = [ + self._security_group_rules[0]['id'], + self._security_group_rules[1]['id'], + ] verifylist = [ ('rule', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - calls = [] - for s in self._security_group_rules: - calls.append(call(s['id'])) - sgr_mock.assert_has_calls(calls) + sgr_mock.assert_has_calls( + [ + mock.call( + self.compute_client, + self._security_group_rules[0]['id'], + ), + mock.call( + self.compute_client, + self._security_group_rules[1]['id'], + ), + ] + ) self.assertIsNone(result) def test_security_group_rule_delete_multi_with_exception(self, sgr_mock): @@ -307,13 +352,11 @@ def test_security_group_rule_delete_multi_with_exception(self, sgr_mock): 'unexist_rule', ] verifylist = [ - ('rule', - [self._security_group_rules[0]['id'], 'unexist_rule']), + ('rule', arglist), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [None, exceptions.CommandError] - sgr_mock.side_effect = find_mock_result + sgr_mock.side_effect = [None, exceptions.NotFound('foo')] try: self.cmd.take_action(parsed_args) @@ -321,36 +364,39 @@ def test_security_group_rule_delete_multi_with_exception(self, sgr_mock): except exceptions.CommandError as e: self.assertEqual('1 of 2 rules failed to delete.', str(e)) - sgr_mock.assert_any_call( - self._security_group_rules[0]['id']) - sgr_mock.assert_any_call( - 'unexist_rule') - + sgr_mock.assert_has_calls( + [ + mock.call( + self.compute_client, + self._security_group_rules[0]['id'], + ), + mock.call(self.compute_client, 'unexist_rule'), + ] + ) -class TestListSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): +class TestListSecurityGroupRuleCompute(compute_fakes.TestComputev2): # The security group to hold the rules. - _security_group = \ - compute_fakes.FakeSecurityGroup.create_one_security_group() + _security_group = compute_fakes.create_one_security_group() # The security group rule to be listed. - _security_group_rule_tcp = \ - compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule({ + _security_group_rule_tcp = compute_fakes.create_one_security_group_rule( + { 'ip_protocol': 'tcp', - 'ethertype': 'IPv4', 'from_port': 80, 'to_port': 80, 'group': {'name': _security_group['name']}, - }) - _security_group_rule_icmp = \ - compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule({ + } + ) + _security_group_rule_icmp = compute_fakes.create_one_security_group_rule( + { 'ip_protocol': 'icmp', - 'ethertype': 'IPv4', 'from_port': -1, 'to_port': -1, 'ip_range': {'cidr': '10.0.2.0/24'}, 'group': {'name': _security_group['name']}, - }) + } + ) _security_group['rules'] = [ _security_group_rule_tcp, _security_group_rule_icmp, @@ -365,8 +411,9 @@ class TestListSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): 'Direction', 'Remote Security Group', ) - expected_columns_no_group = \ - expected_columns_with_group + ('Security Group',) + expected_columns_no_group = expected_columns_with_group + ( + 'Security Group', + ) expected_data_with_group = [] expected_data_no_group = [] @@ -377,25 +424,26 @@ class TestListSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): expected_rule_with_group = ( rule['id'], rule['ip_protocol'], - rule['ethertype'], + '', # ethertype is a neutron-only thing rule['ip_range'], rule['port_range'], rule['remote_security_group'], ) - expected_rule_no_group = expected_rule_with_group + \ - (_security_group_rule['parent_group_id'],) + expected_rule_no_group = expected_rule_with_group + ( + _security_group_rule['parent_group_id'], + ) expected_data_with_group.append(expected_rule_with_group) expected_data_no_group.append(expected_rule_no_group) def setUp(self): - super(TestListSecurityGroupRuleCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False - self.compute.api.security_group_find = mock.Mock( + compute_v2.find_security_group = mock.Mock( return_value=self._security_group, ) - self.compute.api.security_group_list = mock.Mock( + compute_v2.list_security_groups = mock.Mock( return_value=[self._security_group], ) @@ -406,8 +454,8 @@ def test_security_group_rule_list_default(self): parsed_args = self.check_parser(self.cmd, [], []) columns, data = self.cmd.take_action(parsed_args) - self.compute.api.security_group_list.assert_called_once_with( - search_opts={'all_tenants': False} + compute_v2.list_security_groups.assert_called_once_with( + self.compute_client, all_projects=False ) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) @@ -422,8 +470,8 @@ def test_security_group_rule_list_with_group(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.compute.api.security_group_find.assert_called_once_with( - self._security_group['id'] + compute_v2.find_security_group.assert_called_once_with( + self.compute_client, self._security_group['id'] ) self.assertEqual(self.expected_columns_with_group, columns) self.assertEqual(self.expected_data_with_group, list(data)) @@ -438,8 +486,8 @@ def test_security_group_rule_list_all_projects(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.compute.api.security_group_list.assert_called_once_with( - search_opts={'all_tenants': True} + compute_v2.list_security_groups.assert_called_once_with( + self.compute_client, all_projects=True ) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) @@ -454,32 +502,30 @@ def test_security_group_rule_list_with_ignored_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.compute.api.security_group_list.assert_called_once_with( - search_opts={'all_tenants': False} + compute_v2.list_security_groups.assert_called_once_with( + self.compute_client, all_projects=False ) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) -class TestShowSecurityGroupRuleCompute(TestSecurityGroupRuleCompute): - +class TestShowSecurityGroupRuleCompute(compute_fakes.TestComputev2): # The security group rule to be shown. - _security_group_rule = \ - compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule() + _security_group_rule = compute_fakes.create_one_security_group_rule() - columns, data = \ - security_group_rule._format_security_group_rule_show( - _security_group_rule) + columns, data = network_utils.format_security_group_rule_show( + _security_group_rule + ) def setUp(self): - super(TestShowSecurityGroupRuleCompute, self).setUp() + super().setUp() self.app.client_manager.network_endpoint_enabled = False # Build a security group fake customized for this test. security_group_rules = [self._security_group_rule] security_group = {'rules': security_group_rules} - self.compute.api.security_group_list = mock.Mock( + compute_v2.list_security_groups = mock.Mock( return_value=[security_group], ) @@ -487,8 +533,9 @@ def setUp(self): self.cmd = security_group_rule.ShowSecurityGroupRule(self.app, None) def test_security_group_rule_show_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_security_group_rule_show_all_options(self): arglist = [ @@ -501,6 +548,8 @@ def test_security_group_rule_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.compute.api.security_group_list.assert_called_once_with() + compute_v2.list_security_groups.assert_called_once_with( + self.compute_client + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_security_group_rule_network.py b/openstackclient/tests/unit/network/v2/test_security_group_rule_network.py index 3c90f2814c..920e891413 100644 --- a/openstackclient/tests/unit/network/v2/test_security_group_rule_network.py +++ b/openstackclient/tests/unit/network/v2/test_security_group_rule_network.py @@ -11,11 +11,11 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib import exceptions +from openstackclient.network import utils as network_utils from openstackclient.network.v2 import security_group_rule from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit.network.v2 import fakes as network_fakes @@ -23,33 +23,29 @@ class TestSecurityGroupRuleNetwork(network_fakes.TestNetworkV2): - def setUp(self): - super(TestSecurityGroupRuleNetwork, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): - project = identity_fakes.FakeProject.create_one_project() domain = identity_fakes.FakeDomain.create_one_domain() # The security group rule to be created. _security_group_rule = None # The security group that will contain the rule created. - _security_group = \ - network_fakes.FakeSecurityGroup.create_one_security_group() + _security_group = network_fakes.create_one_security_group() # The address group to be used in security group rules _address_group = network_fakes.create_one_address_group() expected_columns = ( + 'created_at', 'description', 'direction', 'ether_type', @@ -61,18 +57,23 @@ class TestCreateSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): 'remote_address_group_id', 'remote_group_id', 'remote_ip_prefix', + 'revision_number', 'security_group_id', + 'updated_at', ) expected_data = None def _setup_security_group_rule(self, attrs=None): - self._security_group_rule = \ - network_fakes.FakeSecurityGroupRule.create_one_security_group_rule( - attrs) - self.network.create_security_group_rule = mock.Mock( - return_value=self._security_group_rule) + self._security_group_rule = ( + network_fakes.create_one_security_group_rule(attrs) + ) + self.network_client.create_security_group_rule.return_value = ( + self._security_group_rule + ) + self.expected_data = ( + self._security_group_rule.created_at, self._security_group_rule.description, self._security_group_rule.direction, self._security_group_rule.ether_type, @@ -84,50 +85,69 @@ def _setup_security_group_rule(self, attrs=None): self._security_group_rule.remote_address_group_id, self._security_group_rule.remote_group_id, self._security_group_rule.remote_ip_prefix, + self._security_group_rule.revision_number, self._security_group_rule.security_group_id, + self._security_group_rule.updated_at, ) def setUp(self): - super(TestCreateSecurityGroupRuleNetwork, self).setUp() + super().setUp() - self.network.find_security_group = mock.Mock( - return_value=self._security_group) + self.network_client.find_security_group.return_value = ( + self._security_group + ) - self.network.find_address_group = mock.Mock( - return_value=self._address_group) + self.network_client.find_address_group.return_value = ( + self._address_group + ) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain # Get the command object to test - self.cmd = security_group_rule.CreateSecurityGroupRule( - self.app, self.namespace) + self.cmd = security_group_rule.CreateSecurityGroupRule(self.app, None) def test_create_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_create_all_remote_options(self): arglist = [ - '--remote-ip', '10.10.0.0/24', - '--remote-group', self._security_group.id, - '--remote-address-group', self._address_group.id, + '--remote-ip', + '10.10.0.0/24', + '--remote-group', + self._security_group.id, + '--remote-address-group', + self._address_group.id, self._security_group.id, ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_create_bad_ethertype(self): arglist = [ - '--ethertype', 'foo', + '--ethertype', + 'foo', self._security_group.id, ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_lowercase_ethertype(self): arglist = [ - '--ethertype', 'ipv4', + '--ethertype', + 'ipv4', self._security_group.id, ] parsed_args = self.check_parser(self.cmd, arglist, []) @@ -135,7 +155,8 @@ def test_lowercase_ethertype(self): def test_lowercase_v6_ethertype(self): arglist = [ - '--ethertype', 'ipv6', + '--ethertype', + 'ipv6', self._security_group.id, ] parsed_args = self.check_parser(self.cmd, arglist, []) @@ -143,7 +164,8 @@ def test_lowercase_v6_ethertype(self): def test_proper_case_ethertype(self): arglist = [ - '--ethertype', 'IPv6', + '--ethertype', + 'IPv6', self._security_group.id, ] parsed_args = self.check_parser(self.cmd, arglist, []) @@ -151,18 +173,28 @@ def test_proper_case_ethertype(self): def test_create_all_protocol_options(self): arglist = [ - '--protocol', 'tcp', - '--proto', 'tcp', + '--protocol', + 'tcp', + '--proto', + 'tcp', self._security_group.id, ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, []) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + [], + ) def test_create_all_port_range_options(self): arglist = [ - '--dst-port', '80:80', - '--icmp-type', '3', - '--icmp-code', '1', + '--dst-port', + '80:80', + '--icmp-type', + '3', + '--icmp-code', + '1', self._security_group.id, ] verifylist = [ @@ -172,49 +204,65 @@ def test_create_all_port_range_options(self): ('group', self._security_group.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_default_rule(self): - self._setup_security_group_rule({ - 'protocol': 'tcp', - 'port_range_max': 443, - 'port_range_min': 443, - }) + self._setup_security_group_rule( + { + 'protocol': 'tcp', + 'port_range_max': 443, + 'port_range_min': 443, + } + ) arglist = [ - '--protocol', 'tcp', - '--dst-port', str(self._security_group_rule.port_range_min), + '--protocol', + 'tcp', + '--dst-port', + str(self._security_group_rule.port_range_min), self._security_group.id, ] verifylist = [ - ('dst_port', (self._security_group_rule.port_range_min, - self._security_group_rule.port_range_max)), + ( + 'dst_port', + ( + self._security_group_rule.port_range_min, + self._security_group_rule.port_range_max, + ), + ), ('group', self._security_group.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_max': self._security_group_rule.port_range_max, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_max': self._security_group_rule.port_range_max, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_proto_option(self): - self._setup_security_group_rule({ - 'protocol': 'icmp', - 'remote_ip_prefix': '10.0.2.0/24', - }) + self._setup_security_group_rule( + { + 'protocol': 'icmp', + 'remote_ip_prefix': '10.0.2.0/24', + } + ) arglist = [ - '--proto', self._security_group_rule.protocol, - '--remote-ip', self._security_group_rule.remote_ip_prefix, + '--proto', + self._security_group_rule.protocol, + '--remote-ip', + self._security_group_rule.remote_ip_prefix, self._security_group.id, ] verifylist = [ @@ -227,24 +275,30 @@ def test_create_proto_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_protocol_any(self): - self._setup_security_group_rule({ - 'protocol': None, - 'remote_ip_prefix': '10.0.2.0/24', - }) + self._setup_security_group_rule( + { + 'protocol': None, + 'remote_ip_prefix': '10.0.2.0/24', + } + ) arglist = [ - '--proto', 'any', - '--remote-ip', self._security_group_rule.remote_ip_prefix, + '--proto', + 'any', + '--remote-ip', + self._security_group_rule.remote_ip_prefix, self._security_group.id, ] verifylist = [ @@ -257,24 +311,30 @@ def test_create_protocol_any(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_remote_address_group(self): - self._setup_security_group_rule({ - 'protocol': 'icmp', - 'remote_address_group_id': self._address_group.id, - }) + self._setup_security_group_rule( + { + 'protocol': 'icmp', + 'remote_address_group_id': self._address_group.id, + } + ) arglist = [ - '--protocol', 'icmp', - '--remote-address-group', self._address_group.name, + '--protocol', + 'icmp', + '--remote-address-group', + self._address_group.name, self._security_group.id, ] verifylist = [ @@ -285,32 +345,44 @@ def test_create_remote_address_group(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_address_group_id': self._address_group.id, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_address_group_id': self._address_group.id, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_remote_group(self): - self._setup_security_group_rule({ - 'protocol': 'tcp', - 'port_range_max': 22, - 'port_range_min': 22, - }) + self._setup_security_group_rule( + { + 'protocol': 'tcp', + 'port_range_max': 22, + 'port_range_min': 22, + } + ) arglist = [ - '--protocol', 'tcp', - '--dst-port', str(self._security_group_rule.port_range_min), + '--protocol', + 'tcp', + '--dst-port', + str(self._security_group_rule.port_range_min), '--ingress', - '--remote-group', self._security_group.name, + '--remote-group', + self._security_group.name, self._security_group.id, ] verifylist = [ - ('dst_port', (self._security_group_rule.port_range_min, - self._security_group_rule.port_range_max)), + ( + 'dst_port', + ( + self._security_group_rule.port_range_min, + self._security_group_rule.port_range_max, + ), + ), ('ingress', True), ('remote_group', self._security_group.name), ('group', self._security_group.id), @@ -319,25 +391,30 @@ def test_create_remote_group(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_max': self._security_group_rule.port_range_max, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_group_id': self._security_group.id, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_max': self._security_group_rule.port_range_max, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_group_id': self._security_group.id, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_source_group(self): - self._setup_security_group_rule({ - 'remote_group_id': self._security_group.id, - }) + self._setup_security_group_rule( + { + 'remote_group_id': self._security_group.id, + } + ) arglist = [ '--ingress', - '--remote-group', self._security_group.name, + '--remote-group', + self._security_group.name, self._security_group.id, ] verifylist = [ @@ -349,24 +426,30 @@ def test_create_source_group(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_group_id': self._security_group_rule.remote_group_id, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_group_id': self._security_group_rule.remote_group_id, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_source_ip(self): - self._setup_security_group_rule({ - 'protocol': 'icmp', - 'remote_ip_prefix': '10.0.2.0/24', - }) + self._setup_security_group_rule( + { + 'protocol': 'icmp', + 'remote_ip_prefix': '10.0.2.0/24', + } + ) arglist = [ - '--protocol', self._security_group_rule.protocol, - '--remote-ip', self._security_group_rule.remote_ip_prefix, + '--protocol', + self._security_group_rule.protocol, + '--remote-ip', + self._security_group_rule.remote_ip_prefix, self._security_group.id, ] verifylist = [ @@ -378,24 +461,30 @@ def test_create_source_ip(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_remote_ip(self): - self._setup_security_group_rule({ - 'protocol': 'icmp', - 'remote_ip_prefix': '10.0.2.0/24', - }) + self._setup_security_group_rule( + { + 'protocol': 'icmp', + 'remote_ip_prefix': '10.0.2.0/24', + } + ) arglist = [ - '--protocol', self._security_group_rule.protocol, - '--remote-ip', self._security_group_rule.remote_ip_prefix, + '--protocol', + self._security_group_rule.protocol, + '--remote-ip', + self._security_group_rule.remote_ip_prefix, self._security_group.id, ] verifylist = [ @@ -407,38 +496,52 @@ def test_create_remote_ip(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_network_options(self): - self._setup_security_group_rule({ - 'direction': 'egress', - 'ether_type': 'IPv6', - 'port_range_max': 443, - 'port_range_min': 443, - 'protocol': '6', - 'remote_group_id': None, - 'remote_ip_prefix': '::/0', - }) + self._setup_security_group_rule( + { + 'direction': 'egress', + 'ether_type': 'IPv6', + 'port_range_max': 443, + 'port_range_min': 443, + 'protocol': '6', + 'remote_group_id': None, + 'remote_ip_prefix': '::/0', + } + ) arglist = [ - '--dst-port', str(self._security_group_rule.port_range_min), + '--dst-port', + str(self._security_group_rule.port_range_min), '--egress', - '--ethertype', self._security_group_rule.ether_type, - '--project', self.project.name, - '--project-domain', self.domain.name, - '--protocol', self._security_group_rule.protocol, + '--ethertype', + self._security_group_rule.ether_type, + '--project', + self.project.name, + '--project-domain', + self.domain.name, + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ - ('dst_port', (self._security_group_rule.port_range_min, - self._security_group_rule.port_range_max)), + ( + 'dst_port', + ( + self._security_group_rule.port_range_min, + self._security_group_rule.port_range_max, + ), + ), ('egress', True), ('ethertype', self._security_group_rule.ether_type), ('project', self.project.name), @@ -450,23 +553,27 @@ def test_create_network_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_max': self._security_group_rule.port_range_max, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - 'project_id': self.project.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_max': self._security_group_rule.port_range_max, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + 'project_id': self.project.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_tcp_with_icmp_type(self): arglist = [ - '--protocol', 'tcp', - '--icmp-type', '15', + '--protocol', + 'tcp', + '--icmp-type', + '15', self._security_group.id, ] verifylist = [ @@ -475,13 +582,16 @@ def test_create_tcp_with_icmp_type(self): ('group', self._security_group.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_icmp_code(self): arglist = [ - '--protocol', '1', - '--icmp-code', '1', + '--protocol', + '1', + '--icmp-code', + '1', self._security_group.id, ] verifylist = [ @@ -490,20 +600,26 @@ def test_create_icmp_code(self): ('group', self._security_group.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_create_icmp_code_zero(self): - self._setup_security_group_rule({ - 'port_range_min': 15, - 'port_range_max': 0, - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': 15, + 'port_range_max': 0, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--protocol', self._security_group_rule.protocol, - '--icmp-type', str(self._security_group_rule.port_range_min), - '--icmp-code', str(self._security_group_rule.port_range_max), + '--protocol', + self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--icmp-code', + str(self._security_group_rule.port_range_max), self._security_group.id, ] verifylist = [ @@ -518,16 +634,21 @@ def test_create_icmp_code_zero(self): self.assertEqual(self.expected_data, data) def test_create_icmp_code_greater_than_zero(self): - self._setup_security_group_rule({ - 'port_range_min': 15, - 'port_range_max': 18, - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': 15, + 'port_range_max': 18, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--protocol', self._security_group_rule.protocol, - '--icmp-type', str(self._security_group_rule.port_range_min), - '--icmp-code', str(self._security_group_rule.port_range_max), + '--protocol', + self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--icmp-code', + str(self._security_group_rule.port_range_max), self._security_group.id, ] verifylist = [ @@ -542,16 +663,21 @@ def test_create_icmp_code_greater_than_zero(self): self.assertEqual(self.expected_data, data) def test_create_icmp_code_negative_value(self): - self._setup_security_group_rule({ - 'port_range_min': 15, - 'port_range_max': None, - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': 15, + 'port_range_max': None, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--protocol', self._security_group_rule.protocol, - '--icmp-type', str(self._security_group_rule.port_range_min), - '--icmp-code', '-2', + '--protocol', + self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--icmp-code', + '-2', self._security_group.id, ] verifylist = [ @@ -566,14 +692,18 @@ def test_create_icmp_code_negative_value(self): self.assertEqual(self.expected_data, data) def test_create_icmp_type(self): - self._setup_security_group_rule({ - 'port_range_min': 15, - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': 15, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--icmp-type', str(self._security_group_rule.port_range_min), - '--protocol', self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ @@ -587,26 +717,32 @@ def test_create_icmp_type(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_icmp_type_zero(self): - self._setup_security_group_rule({ - 'port_range_min': 0, - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': 0, + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--icmp-type', str(self._security_group_rule.port_range_min), - '--protocol', self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ @@ -620,26 +756,32 @@ def test_create_icmp_type_zero(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_icmp_type_greater_than_zero(self): - self._setup_security_group_rule({ - 'port_range_min': 13, # timestamp - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': 13, # timestamp + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--icmp-type', str(self._security_group_rule.port_range_min), - '--protocol', self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ @@ -653,26 +795,32 @@ def test_create_icmp_type_greater_than_zero(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_icmp_type_negative_value(self): - self._setup_security_group_rule({ - 'port_range_min': None, # timestamp - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - }) + self._setup_security_group_rule( + { + 'port_range_min': None, # timestamp + 'protocol': 'icmp', + 'remote_ip_prefix': '0.0.0.0/0', + } + ) arglist = [ - '--icmp-type', '-13', - '--protocol', self._security_group_rule.protocol, + '--icmp-type', + '-13', + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ @@ -686,28 +834,35 @@ def test_create_icmp_type_negative_value(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_ipv6_icmp_type_code(self): - self._setup_security_group_rule({ - 'ether_type': 'IPv6', - 'port_range_min': 139, - 'port_range_max': 2, - 'protocol': 'ipv6-icmp', - 'remote_ip_prefix': '::/0', - }) + self._setup_security_group_rule( + { + 'ether_type': 'IPv6', + 'port_range_min': 139, + 'port_range_max': 2, + 'protocol': 'ipv6-icmp', + 'remote_ip_prefix': '::/0', + } + ) arglist = [ - '--icmp-type', str(self._security_group_rule.port_range_min), - '--icmp-code', str(self._security_group_rule.port_range_max), - '--protocol', self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--icmp-code', + str(self._security_group_rule.port_range_max), + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ @@ -721,28 +876,34 @@ def test_create_ipv6_icmp_type_code(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_min': self._security_group_rule.port_range_min, - 'port_range_max': self._security_group_rule.port_range_max, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_min': self._security_group_rule.port_range_min, + 'port_range_max': self._security_group_rule.port_range_max, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_icmpv6_type(self): - self._setup_security_group_rule({ - 'ether_type': 'IPv6', - 'port_range_min': 139, - 'protocol': 'icmpv6', - 'remote_ip_prefix': '::/0', - }) + self._setup_security_group_rule( + { + 'ether_type': 'IPv6', + 'port_range_min': 139, + 'protocol': 'icmpv6', + 'remote_ip_prefix': '::/0', + } + ) arglist = [ - '--icmp-type', str(self._security_group_rule.port_range_min), - '--protocol', self._security_group_rule.protocol, + '--icmp-type', + str(self._security_group_rule.port_range_min), + '--protocol', + self._security_group_rule.protocol, self._security_group.id, ] verifylist = [ @@ -756,23 +917,28 @@ def test_create_icmpv6_type(self): columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'port_range_min': self._security_group_rule.port_range_min, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'port_range_min': self._security_group_rule.port_range_min, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) def test_create_with_description(self): - self._setup_security_group_rule({ - 'description': 'Setting SGR', - }) + self._setup_security_group_rule( + { + 'description': 'Setting SGR', + } + ) arglist = [ - '--description', self._security_group_rule.description, + '--description', + self._security_group_rule.description, self._security_group.id, ] verifylist = [ @@ -781,40 +947,37 @@ def test_create_with_description(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_security_group_rule.assert_called_once_with(**{ - 'description': self._security_group_rule.description, - 'direction': self._security_group_rule.direction, - 'ethertype': self._security_group_rule.ether_type, - 'protocol': self._security_group_rule.protocol, - 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, - 'security_group_id': self._security_group.id, - }) + self.network_client.create_security_group_rule.assert_called_once_with( + **{ + 'description': self._security_group_rule.description, + 'direction': self._security_group_rule.direction, + 'ethertype': self._security_group_rule.ether_type, + 'protocol': self._security_group_rule.protocol, + 'remote_ip_prefix': self._security_group_rule.remote_ip_prefix, + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns, columns) self.assertEqual(self.expected_data, data) class TestDeleteSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): - # The security group rules to be deleted. - _security_group_rules = \ - network_fakes.FakeSecurityGroupRule.create_security_group_rules( - count=2) + _security_group_rules = network_fakes.create_security_group_rules(count=2) def setUp(self): - super(TestDeleteSecurityGroupRuleNetwork, self).setUp() + super().setUp() - self.network.delete_security_group_rule = mock.Mock(return_value=None) + self.network_client.delete_security_group_rule.return_value = None - self.network.find_security_group_rule = ( - network_fakes.FakeSecurityGroupRule.get_security_group_rules( - self._security_group_rules) + self.network_client.find_security_group_rule = ( + network_fakes.get_security_group_rules(self._security_group_rules) ) # Get the command object to test - self.cmd = security_group_rule.DeleteSecurityGroupRule( - self.app, self.namespace) + self.cmd = security_group_rule.DeleteSecurityGroupRule(self.app, None) def test_security_group_rule_delete(self): arglist = [ @@ -827,8 +990,9 @@ def test_security_group_rule_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_security_group_rule.assert_called_once_with( - self._security_group_rules[0]) + self.network_client.delete_security_group_rule.assert_called_once_with( + self._security_group_rules[0] + ) self.assertIsNone(result) def test_multi_security_group_rules_delete(self): @@ -847,7 +1011,7 @@ def test_multi_security_group_rules_delete(self): calls = [] for s in self._security_group_rules: calls.append(call(s)) - self.network.delete_security_group_rule.assert_has_calls(calls) + self.network_client.delete_security_group_rule.assert_has_calls(calls) self.assertIsNone(result) def test_multi_security_group_rules_delete_with_exception(self): @@ -856,15 +1020,16 @@ def test_multi_security_group_rules_delete_with_exception(self): 'unexist_rule', ] verifylist = [ - ('rule', - [self._security_group_rules[0].id, 'unexist_rule']), + ('rule', [self._security_group_rules[0].id, 'unexist_rule']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [ - self._security_group_rules[0], exceptions.CommandError] - self.network.find_security_group_rule = ( - mock.Mock(side_effect=find_mock_result) + self._security_group_rules[0], + exceptions.CommandError, + ] + self.network_client.find_security_group_rule.side_effect = ( + find_mock_result ) try: @@ -873,39 +1038,45 @@ def test_multi_security_group_rules_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 rules failed to delete.', str(e)) - self.network.find_security_group_rule.assert_any_call( - self._security_group_rules[0].id, ignore_missing=False) - self.network.find_security_group_rule.assert_any_call( - 'unexist_rule', ignore_missing=False) - self.network.delete_security_group_rule.assert_called_once_with( + self.network_client.find_security_group_rule.assert_any_call( + self._security_group_rules[0].id, ignore_missing=False + ) + self.network_client.find_security_group_rule.assert_any_call( + 'unexist_rule', ignore_missing=False + ) + self.network_client.delete_security_group_rule.assert_called_once_with( self._security_group_rules[0] ) class TestListSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): - # The security group to hold the rules. - _security_group = \ - network_fakes.FakeSecurityGroup.create_one_security_group() + _security_group = network_fakes.create_one_security_group() # The security group rule to be listed. - _security_group_rule_tcp = \ - network_fakes.FakeSecurityGroupRule.create_one_security_group_rule({ + _security_group_rule_tcp = network_fakes.create_one_security_group_rule( + { 'protocol': 'tcp', 'port_range_max': 80, 'port_range_min': 80, 'security_group_id': _security_group.id, - }) - _security_group_rule_icmp = \ - network_fakes.FakeSecurityGroupRule.create_one_security_group_rule({ + } + ) + _security_group_rule_icmp = network_fakes.create_one_security_group_rule( + { 'protocol': 'icmp', 'remote_ip_prefix': '10.0.2.0/24', 'security_group_id': _security_group.id, - }) - _security_group.security_group_rules = [_security_group_rule_tcp._info, - _security_group_rule_icmp._info] - _security_group_rules = [_security_group_rule_tcp, - _security_group_rule_icmp] + } + ) + _security_group.security_group_rules = [ + dict(_security_group_rule_tcp), + dict(_security_group_rule_icmp), + ] + _security_group_rules = [ + _security_group_rule_tcp, + _security_group_rule_icmp, + ] expected_columns_with_group = ( 'ID', @@ -932,41 +1103,45 @@ class TestListSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): expected_data_with_group = [] expected_data_no_group = [] for _security_group_rule in _security_group_rules: - expected_data_with_group.append(( - _security_group_rule.id, - _security_group_rule.protocol, - _security_group_rule.ether_type, - _security_group_rule.remote_ip_prefix, - security_group_rule._format_network_port_range( - _security_group_rule), - _security_group_rule.direction, - _security_group_rule.remote_group_id, - _security_group_rule.remote_address_group_id, - )) - expected_data_no_group.append(( - _security_group_rule.id, - _security_group_rule.protocol, - _security_group_rule.ether_type, - _security_group_rule.remote_ip_prefix, - security_group_rule._format_network_port_range( - _security_group_rule), - _security_group_rule.direction, - _security_group_rule.remote_group_id, - _security_group_rule.remote_address_group_id, - _security_group_rule.security_group_id, - )) + expected_data_with_group.append( + ( + _security_group_rule.id, + _security_group_rule.protocol, + _security_group_rule.ether_type, + _security_group_rule.remote_ip_prefix, + network_utils.format_network_port_range(_security_group_rule), + _security_group_rule.direction, + _security_group_rule.remote_group_id, + _security_group_rule.remote_address_group_id, + ) + ) + expected_data_no_group.append( + ( + _security_group_rule.id, + _security_group_rule.protocol, + _security_group_rule.ether_type, + _security_group_rule.remote_ip_prefix, + network_utils.format_network_port_range(_security_group_rule), + _security_group_rule.direction, + _security_group_rule.remote_group_id, + _security_group_rule.remote_address_group_id, + _security_group_rule.security_group_id, + ) + ) def setUp(self): - super(TestListSecurityGroupRuleNetwork, self).setUp() + super().setUp() + + self.network_client.find_security_group.return_value = ( + self._security_group + ) - self.network.find_security_group = mock.Mock( - return_value=self._security_group) - self.network.security_group_rules = mock.Mock( - return_value=self._security_group_rules) + self.network_client.security_group_rules.return_value = ( + self._security_group_rules + ) # Get the command object to test - self.cmd = security_group_rule.ListSecurityGroupRule( - self.app, self.namespace) + self.cmd = security_group_rule.ListSecurityGroupRule(self.app, None) def test_list_default(self): self._security_group_rule_tcp.port_range_min = 80 @@ -974,7 +1149,7 @@ def test_list_default(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_group_rules.assert_called_once_with(**{}) + self.network_client.security_group_rules.assert_called_once_with(**{}) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) @@ -990,9 +1165,11 @@ def test_list_with_group(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_group_rules.assert_called_once_with(**{ - 'security_group_id': self._security_group.id, - }) + self.network_client.security_group_rules.assert_called_once_with( + **{ + 'security_group_id': self._security_group.id, + } + ) self.assertEqual(self.expected_columns_with_group, columns) self.assertEqual(self.expected_data_with_group, list(data)) @@ -1008,14 +1185,15 @@ def test_list_with_ignored_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_group_rules.assert_called_once_with(**{}) + self.network_client.security_group_rules.assert_called_once_with(**{}) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) def test_list_with_protocol(self): self._security_group_rule_tcp.port_range_min = 80 arglist = [ - '--protocol', 'tcp', + '--protocol', + 'tcp', ] verifylist = [ ('protocol', 'tcp'), @@ -1024,9 +1202,11 @@ def test_list_with_protocol(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_group_rules.assert_called_once_with(**{ - 'protocol': 'tcp', - }) + self.network_client.security_group_rules.assert_called_once_with( + **{ + 'protocol': 'tcp', + } + ) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) @@ -1042,9 +1222,11 @@ def test_list_with_ingress(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_group_rules.assert_called_once_with(**{ - 'direction': 'ingress', - }) + self.network_client.security_group_rules.assert_called_once_with( + **{ + 'direction': 'ingress', + } + ) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) @@ -1060,20 +1242,70 @@ def test_list_with_wrong_egress(self): columns, data = self.cmd.take_action(parsed_args) - self.network.security_group_rules.assert_called_once_with(**{ - 'direction': 'egress', - }) + self.network_client.security_group_rules.assert_called_once_with( + **{ + 'direction': 'egress', + } + ) self.assertEqual(self.expected_columns_no_group, columns) self.assertEqual(self.expected_data_no_group, list(data)) + def test_list_with_project(self): + project = identity_fakes.FakeProject.create_one_project() + self._security_group_rule_tcp.port_range_min = 80 + self.projects_mock.get.return_value = project -class TestShowSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): + arglist = [ + '--project', + project.id, + ] + verifylist = [ + ('project', project.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + filters = {'tenant_id': project.id, 'project_id': project.id} + + self.network_client.security_group_rules.assert_called_once_with( + **filters + ) + self.assertEqual(self.expected_columns_no_group, columns) + self.assertEqual(self.expected_data_no_group, list(data)) + + def test_list_with_project_domain(self): + project = identity_fakes.FakeProject.create_one_project() + self._security_group_rule_tcp.port_range_min = 80 + self.projects_mock.get.return_value = project + arglist = [ + '--project', + project.id, + '--project-domain', + project.domain_id, + ] + verifylist = [ + ('project', project.id), + ('project_domain', project.domain_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + filters = {'tenant_id': project.id, 'project_id': project.id} + + self.network_client.security_group_rules.assert_called_once_with( + **filters + ) + self.assertEqual(self.expected_columns_no_group, columns) + self.assertEqual(self.expected_data_no_group, list(data)) + + +class TestShowSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): # The security group rule to be shown. - _security_group_rule = \ - network_fakes.FakeSecurityGroupRule.create_one_security_group_rule() + _security_group_rule = network_fakes.create_one_security_group_rule() columns = ( + 'created_at', 'description', 'direction', 'ether_type', @@ -1085,10 +1317,13 @@ class TestShowSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): 'remote_address_group_id', 'remote_group_id', 'remote_ip_prefix', + 'revision_number', 'security_group_id', + 'updated_at', ) data = ( + _security_group_rule.created_at, _security_group_rule.description, _security_group_rule.direction, _security_group_rule.ether_type, @@ -1100,22 +1335,25 @@ class TestShowSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork): _security_group_rule.remote_address_group_id, _security_group_rule.remote_group_id, _security_group_rule.remote_ip_prefix, + _security_group_rule.revision_number, _security_group_rule.security_group_id, + _security_group_rule.updated_at, ) def setUp(self): - super(TestShowSecurityGroupRuleNetwork, self).setUp() + super().setUp() - self.network.find_security_group_rule = mock.Mock( - return_value=self._security_group_rule) + self.network_client.find_security_group_rule.return_value = ( + self._security_group_rule + ) # Get the command object to test - self.cmd = security_group_rule.ShowSecurityGroupRule( - self.app, self.namespace) + self.cmd = security_group_rule.ShowSecurityGroupRule(self.app, None) def test_show_no_options(self): - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, [], []) + self.assertRaises( + tests_utils.ParserException, self.check_parser, self.cmd, [], [] + ) def test_show_all_options(self): arglist = [ @@ -1128,7 +1366,8 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_security_group_rule.assert_called_once_with( - self._security_group_rule.id, ignore_missing=False) + self.network_client.find_security_group_rule.assert_called_once_with( + self._security_group_rule.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/network/v2/test_subnet.py b/openstackclient/tests/unit/network/v2/test_subnet.py index 7aaa583df8..e59168e517 100644 --- a/openstackclient/tests/unit/network/v2/test_subnet.py +++ b/openstackclient/tests/unit/network/v2/test_subnet.py @@ -11,7 +11,6 @@ # under the License. # -from unittest import mock from unittest.mock import call from osc_lib.cli import format_columns @@ -24,20 +23,16 @@ class TestSubnet(network_fakes.TestNetworkV2): - def setUp(self): - super(TestSubnet, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateSubnet(TestSubnet): - def _init_subnet_variables(self): self.project = identity_fakes_v3.FakeProject.create_one_project() self.domain = identity_fakes_v3.FakeDomain.create_one_domain() @@ -49,22 +44,24 @@ def _init_subnet_variables(self): ) # Subnet pool to be used to create a subnet from a pool - self._subnet_pool = \ + self._subnet_pool = ( network_fakes.FakeSubnetPool.create_one_subnet_pool() + ) # An IPv4 subnet to be created using a specific subnet pool self._subnet_from_pool = network_fakes.FakeSubnet.create_one_subnet( attrs={ 'project_id': self.project.id, 'subnetpool_id': self._subnet_pool.id, - 'dns_nameservers': ['8.8.8.8', - '8.8.4.4'], - 'host_routes': [{'destination': '10.20.20.0/24', - 'nexthop': '10.20.20.1'}, - {'destination': '10.30.30.0/24', - 'nexthop': '10.30.30.1'}], - 'service_types': ['network:router_gateway', - 'network:floatingip_agent_gateway'], + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'host_routes': [ + {'destination': '10.20.20.0/24', 'nexthop': '10.20.20.1'}, + {'destination': '10.30.30.0/24', 'nexthop': '10.30.30.1'}, + ], + 'service_types': [ + 'network:router_gateway', + 'network:floatingip_agent_gateway', + ], } ) @@ -74,23 +71,59 @@ def _init_subnet_variables(self): 'project_id': self.project.id, 'cidr': 'fe80:0:0:a00a::/64', 'enable_dhcp': True, - 'dns_nameservers': ['fe80:27ff:a00a:f00f::ffff', - 'fe80:37ff:a00a:f00f::ffff'], - 'allocation_pools': [{'start': 'fe80::a00a:0:c0de:0:100', - 'end': 'fe80::a00a:0:c0de:0:f000'}, - {'start': 'fe80::a00a:0:c0de:1:100', - 'end': 'fe80::a00a:0:c0de:1:f000'}], - 'host_routes': [{'destination': 'fe80:27ff:a00a:f00f::/64', - 'nexthop': 'fe80:27ff:a00a:f00f::1'}, - {'destination': 'fe80:37ff:a00a:f00f::/64', - 'nexthop': 'fe80:37ff:a00a:f00f::1'}], + 'dns_nameservers': [ + 'fe80:27ff:a00a:f00f::ffff', + 'fe80:37ff:a00a:f00f::ffff', + ], + 'allocation_pools': [ + { + 'start': 'fe80::a00a:0:c0de:0:100', + 'end': 'fe80::a00a:0:c0de:0:f000', + }, + { + 'start': 'fe80::a00a:0:c0de:1:100', + 'end': 'fe80::a00a:0:c0de:1:f000', + }, + ], + 'host_routes': [ + { + 'destination': 'fe80:27ff:a00a:f00f::/64', + 'nexthop': 'fe80:27ff:a00a:f00f::1', + }, + { + 'destination': 'fe80:37ff:a00a:f00f::/64', + 'nexthop': 'fe80:37ff:a00a:f00f::1', + }, + ], 'ip_version': 6, 'gateway_ip': 'fe80::a00a:0:c0de:0:1', 'ipv6_address_mode': 'slaac', 'ipv6_ra_mode': 'slaac', 'subnetpool_id': 'None', - 'service_types': ['network:router_gateway', - 'network:floatingip_agent_gateway'], + 'service_types': [ + 'network:router_gateway', + 'network:floatingip_agent_gateway', + ], + } + ) + + # An IPv6 subnet to be created with Prefix Delegation options specified + self._subnet_ipv6_pd = network_fakes.FakeSubnet.create_one_subnet( + attrs={ + 'project_id': self.project.id, + 'cidr': '::/64', + 'enable_dhcp': True, + 'allocation_pools': [ + { + 'start': '::1', + 'end': '::ffff:ffff:ffff:ffff', + }, + ], + 'ip_version': 6, + 'gateway_ip': '::', + 'ipv6_address_mode': 'slaac', + 'ipv6_ra_mode': 'slaac', + 'subnetpool_id': 'prefix_delegation', } ) @@ -102,12 +135,11 @@ def _init_subnet_variables(self): ) # The network segment to be returned from find_segment - self._network_segment = \ - network_fakes.create_one_network_segment( - attrs={ - 'network_id': self._subnet.network_id, - } - ) + self._network_segment = network_fakes.create_one_network_segment( + attrs={ + 'network_id': self._subnet.network_id, + } + ) self.columns = ( 'allocation_pools', @@ -134,7 +166,7 @@ def _init_subnet_variables(self): subnet_v2.AllocationPoolsColumn(self._subnet.allocation_pools), self._subnet.cidr, self._subnet.description, - format_columns.ListColumn(self._subnet.dns_nameservers), + subnet_v2.UnsortedListColumn(self._subnet.dns_nameservers), self._subnet.enable_dhcp, self._subnet.gateway_ip, subnet_v2.HostRoutesColumn(self._subnet.host_routes), @@ -153,10 +185,13 @@ def _init_subnet_variables(self): self.data_subnet_pool = ( subnet_v2.AllocationPoolsColumn( - self._subnet_from_pool.allocation_pools), + self._subnet_from_pool.allocation_pools + ), self._subnet_from_pool.cidr, self._subnet_from_pool.description, - format_columns.ListColumn(self._subnet_from_pool.dns_nameservers), + subnet_v2.UnsortedListColumn( + self._subnet_from_pool.dns_nameservers + ), self._subnet_from_pool.enable_dhcp, self._subnet_from_pool.gateway_ip, subnet_v2.HostRoutesColumn(self._subnet_from_pool.host_routes), @@ -175,10 +210,11 @@ def _init_subnet_variables(self): self.data_ipv6 = ( subnet_v2.AllocationPoolsColumn( - self._subnet_ipv6.allocation_pools), + self._subnet_ipv6.allocation_pools + ), self._subnet_ipv6.cidr, self._subnet_ipv6.description, - format_columns.ListColumn(self._subnet_ipv6.dns_nameservers), + subnet_v2.UnsortedListColumn(self._subnet_ipv6.dns_nameservers), self._subnet_ipv6.enable_dhcp, self._subnet_ipv6.gateway_ip, subnet_v2.HostRoutesColumn(self._subnet_ipv6.host_routes), @@ -195,26 +231,48 @@ def _init_subnet_variables(self): format_columns.ListColumn(self._subnet_ipv6.tags), ) + self.data_ipv6_pd = ( + subnet_v2.AllocationPoolsColumn( + self._subnet_ipv6_pd.allocation_pools + ), + self._subnet_ipv6_pd.cidr, + self._subnet_ipv6_pd.description, + subnet_v2.UnsortedListColumn(self._subnet_ipv6_pd.dns_nameservers), + self._subnet_ipv6_pd.enable_dhcp, + self._subnet_ipv6_pd.gateway_ip, + subnet_v2.HostRoutesColumn(self._subnet_ipv6_pd.host_routes), + self._subnet_ipv6_pd.id, + self._subnet_ipv6_pd.ip_version, + self._subnet_ipv6_pd.ipv6_address_mode, + self._subnet_ipv6_pd.ipv6_ra_mode, + self._subnet_ipv6_pd.name, + self._subnet_ipv6_pd.network_id, + self._subnet_ipv6_pd.project_id, + self._subnet_ipv6_pd.segment_id, + format_columns.ListColumn(self._subnet_ipv6_pd.service_types), + self._subnet_ipv6_pd.subnetpool_id, + format_columns.ListColumn(self._subnet_ipv6_pd.tags), + ) + def setUp(self): self._init_subnet_variables() - super(TestCreateSubnet, self).setUp() + super().setUp() # Get the command object to test - self.cmd = subnet_v2.CreateSubnet(self.app, self.namespace) + self.cmd = subnet_v2.CreateSubnet(self.app, None) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain # Mock SDK calls for all tests. - self.network.create_subnet = mock.Mock(return_value=self._subnet) - self.network.set_tags = mock.Mock(return_value=None) - self.network.find_network = mock.Mock(return_value=self._network) - self.network.find_segment = mock.Mock( - return_value=self._network_segment - ) - self.network.find_subnet_pool = mock.Mock( - return_value=self._subnet_pool - ) + self.network_client.create_subnet.return_value = self._subnet + + self.network_client.set_tags.return_value = None + self.network_client.find_network.return_value = self._network + + self.network_client.find_segment.return_value = self._network_segment + + self.network_client.find_subnet_pool.return_value = self._subnet_pool def test_create_no_options(self): arglist = [] @@ -222,18 +280,25 @@ def test_create_no_options(self): # Testing that a call without the required argument will fail and # throw a "ParserExecption" - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, verifylist) - self.assertFalse(self.network.create_subnet.called) - self.assertFalse(self.network.set_tags.called) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + self.assertFalse(self.network_client.create_subnet.called) + self.assertFalse(self.network_client.set_tags.called) def test_create_default_options(self): # Mock SDK calls for this test. self._network.id = self._subnet.network_id arglist = [ - "--subnet-range", self._subnet.cidr, - "--network", self._subnet.network_id, + "--subnet-range", + self._subnet.cidr, + "--network", + self._subnet.network_id, self._subnet.name, ] verifylist = [ @@ -247,29 +312,36 @@ def test_create_default_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with(**{ - 'cidr': self._subnet.cidr, - 'ip_version': self._subnet.ip_version, - 'name': self._subnet.name, - 'network_id': self._subnet.network_id, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_subnet.assert_called_once_with( + **{ + 'cidr': self._subnet.cidr, + 'ip_version': self._subnet.ip_version, + 'name': self._subnet.name, + 'network_id': self._subnet.network_id, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_from_subnet_pool_options(self): # Mock SDK calls for this test. - self.network.create_subnet.return_value = self._subnet_from_pool - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.create_subnet.return_value = self._subnet_from_pool + self.network_client.set_tags.return_value = None self._network.id = self._subnet_from_pool.network_id arglist = [ self._subnet_from_pool.name, - "--subnet-pool", self._subnet_from_pool.subnetpool_id, - "--prefix-length", '24', - "--network", self._subnet_from_pool.network_id, - "--ip-version", str(self._subnet_from_pool.ip_version), - "--gateway", self._subnet_from_pool.gateway_ip, + "--subnet-pool", + self._subnet_from_pool.subnetpool_id, + "--prefix-length", + '24', + "--network", + self._subnet_from_pool.network_id, + "--ip-version", + str(self._subnet_from_pool.ip_version), + "--gateway", + self._subnet_from_pool.gateway_ip, "--dhcp", ] @@ -279,8 +351,12 @@ def test_create_from_subnet_pool_options(self): for host_route in self._subnet_from_pool.host_routes: arglist.append('--host-route') - value = 'gateway=' + host_route.get('nexthop', '') + \ - ',destination=' + host_route.get('destination', '') + value = ( + 'gateway=' + + host_route.get('nexthop', '') + + ',destination=' + + host_route.get('destination', '') + ) arglist.append(value) for service_type in self._subnet_from_pool.service_types: @@ -295,8 +371,12 @@ def test_create_from_subnet_pool_options(self): ('gateway', self._subnet_from_pool.gateway_ip), ('dns_nameservers', self._subnet_from_pool.dns_nameservers), ('dhcp', self._subnet_from_pool.enable_dhcp), - ('host_routes', subnet_v2.convert_entries_to_gateway( - self._subnet_from_pool.host_routes)), + ( + 'host_routes', + subnet_v2.convert_entries_to_gateway( + self._subnet_from_pool.host_routes + ), + ), ('subnet_pool', self._subnet_from_pool.subnetpool_id), ('service_types', self._subnet_from_pool.service_types), ] @@ -304,34 +384,42 @@ def test_create_from_subnet_pool_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with(**{ - 'dns_nameservers': self._subnet_from_pool.dns_nameservers, - 'enable_dhcp': self._subnet_from_pool.enable_dhcp, - 'gateway_ip': self._subnet_from_pool.gateway_ip, - 'host_routes': self._subnet_from_pool.host_routes, - 'ip_version': self._subnet_from_pool.ip_version, - 'name': self._subnet_from_pool.name, - 'network_id': self._subnet_from_pool.network_id, - 'prefixlen': '24', - 'subnetpool_id': self._subnet_from_pool.subnetpool_id, - 'service_types': self._subnet_from_pool.service_types, - }) + self.network_client.create_subnet.assert_called_once_with( + **{ + 'dns_nameservers': self._subnet_from_pool.dns_nameservers, + 'enable_dhcp': self._subnet_from_pool.enable_dhcp, + 'gateway_ip': self._subnet_from_pool.gateway_ip, + 'host_routes': self._subnet_from_pool.host_routes, + 'ip_version': self._subnet_from_pool.ip_version, + 'name': self._subnet_from_pool.name, + 'network_id': self._subnet_from_pool.network_id, + 'prefixlen': '24', + 'subnetpool_id': self._subnet_from_pool.subnetpool_id, + 'service_types': self._subnet_from_pool.service_types, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data_subnet_pool, data) def test_create_options_subnet_range_ipv6(self): # Mock SDK calls for this test. - self.network.create_subnet.return_value = self._subnet_ipv6 + self.network_client.create_subnet.return_value = self._subnet_ipv6 self._network.id = self._subnet_ipv6.network_id arglist = [ self._subnet_ipv6.name, - "--subnet-range", self._subnet_ipv6.cidr, - "--network", self._subnet_ipv6.network_id, - "--ip-version", str(self._subnet_ipv6.ip_version), - "--ipv6-ra-mode", self._subnet_ipv6.ipv6_ra_mode, - "--ipv6-address-mode", self._subnet_ipv6.ipv6_address_mode, - "--gateway", self._subnet_ipv6.gateway_ip, + "--subnet-range", + self._subnet_ipv6.cidr, + "--network", + self._subnet_ipv6.network_id, + "--ip-version", + str(self._subnet_ipv6.ip_version), + "--ipv6-ra-mode", + self._subnet_ipv6.ipv6_ra_mode, + "--ipv6-address-mode", + self._subnet_ipv6.ipv6_address_mode, + "--gateway", + self._subnet_ipv6.gateway_ip, "--dhcp", ] @@ -341,14 +429,22 @@ def test_create_options_subnet_range_ipv6(self): for host_route in self._subnet_ipv6.host_routes: arglist.append('--host-route') - value = 'gateway=' + host_route.get('nexthop', '') + \ - ',destination=' + host_route.get('destination', '') + value = ( + 'gateway=' + + host_route.get('nexthop', '') + + ',destination=' + + host_route.get('destination', '') + ) arglist.append(value) for pool in self._subnet_ipv6.allocation_pools: arglist.append('--allocation-pool') - value = 'start=' + pool.get('start', '') + \ - ',end=' + pool.get('end', '') + value = ( + 'start=' + + pool.get('start', '') + + ',end=' + + pool.get('end', '') + ) arglist.append(value) for service_type in self._subnet_ipv6.service_types: @@ -365,8 +461,12 @@ def test_create_options_subnet_range_ipv6(self): ('gateway', self._subnet_ipv6.gateway_ip), ('dns_nameservers', self._subnet_ipv6.dns_nameservers), ('dhcp', self._subnet_ipv6.enable_dhcp), - ('host_routes', subnet_v2.convert_entries_to_gateway( - self._subnet_ipv6.host_routes)), + ( + 'host_routes', + subnet_v2.convert_entries_to_gateway( + self._subnet_ipv6.host_routes + ), + ), ('allocation_pools', self._subnet_ipv6.allocation_pools), ('service_types', self._subnet_ipv6.service_types), ] @@ -374,32 +474,86 @@ def test_create_options_subnet_range_ipv6(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with(**{ - 'cidr': self._subnet_ipv6.cidr, - 'dns_nameservers': self._subnet_ipv6.dns_nameservers, - 'enable_dhcp': self._subnet_ipv6.enable_dhcp, - 'gateway_ip': self._subnet_ipv6.gateway_ip, - 'host_routes': self._subnet_ipv6.host_routes, - 'ip_version': self._subnet_ipv6.ip_version, - 'ipv6_address_mode': self._subnet_ipv6.ipv6_address_mode, - 'ipv6_ra_mode': self._subnet_ipv6.ipv6_ra_mode, - 'name': self._subnet_ipv6.name, - 'network_id': self._subnet_ipv6.network_id, - 'allocation_pools': self._subnet_ipv6.allocation_pools, - 'service_types': self._subnet_ipv6.service_types, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_subnet.assert_called_once_with( + **{ + 'cidr': self._subnet_ipv6.cidr, + 'dns_nameservers': self._subnet_ipv6.dns_nameservers, + 'enable_dhcp': self._subnet_ipv6.enable_dhcp, + 'gateway_ip': self._subnet_ipv6.gateway_ip, + 'host_routes': self._subnet_ipv6.host_routes, + 'ip_version': self._subnet_ipv6.ip_version, + 'ipv6_address_mode': self._subnet_ipv6.ipv6_address_mode, + 'ipv6_ra_mode': self._subnet_ipv6.ipv6_ra_mode, + 'name': self._subnet_ipv6.name, + 'network_id': self._subnet_ipv6.network_id, + 'allocation_pools': self._subnet_ipv6.allocation_pools, + 'service_types': self._subnet_ipv6.service_types, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data_ipv6, data) + def test_create_options_subnet_ipv6_pd(self): + # Mock SDK calls for this test. + self.network_client.create_subnet.return_value = self._subnet_ipv6_pd + self._network.id = self._subnet_ipv6_pd.network_id + + arglist = [ + self._subnet_ipv6_pd.name, + "--network", + self._subnet_ipv6_pd.network_id, + "--ip-version", + str(self._subnet_ipv6_pd.ip_version), + "--ipv6-ra-mode", + self._subnet_ipv6_pd.ipv6_ra_mode, + "--ipv6-address-mode", + self._subnet_ipv6_pd.ipv6_address_mode, + "--dhcp", + "--use-prefix-delegation", + ] + + verifylist = [ + ('name', self._subnet_ipv6_pd.name), + ('network', self._subnet_ipv6_pd.network_id), + ('ip_version', self._subnet_ipv6_pd.ip_version), + ('ipv6_ra_mode', self._subnet_ipv6_pd.ipv6_ra_mode), + ('ipv6_address_mode', self._subnet_ipv6_pd.ipv6_address_mode), + ('dhcp', self._subnet_ipv6_pd.enable_dhcp), + ('use_prefix_delegation', True), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + # Calling with --use-prefix-delegation will set the subnetpool_id + # to 'prefix_delegation' + self.network_client.create_subnet.assert_called_once_with( + **{ + 'enable_dhcp': self._subnet_ipv6_pd.enable_dhcp, + 'ip_version': self._subnet_ipv6_pd.ip_version, + 'ipv6_address_mode': self._subnet_ipv6_pd.ipv6_address_mode, + 'ipv6_ra_mode': self._subnet_ipv6_pd.ipv6_ra_mode, + 'name': self._subnet_ipv6_pd.name, + 'network_id': self._subnet_ipv6_pd.network_id, + 'subnetpool_id': self._subnet_ipv6_pd.subnetpool_id, + } + ) + self.assertFalse(self.network_client.set_tags.called) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data_ipv6_pd, data) + def test_create_with_network_segment(self): # Mock SDK calls for this test. self._network.id = self._subnet.network_id arglist = [ - "--subnet-range", self._subnet.cidr, - "--network-segment", self._network_segment.id, - "--network", self._subnet.network_id, + "--subnet-range", + self._subnet.cidr, + "--network-segment", + self._network_segment.id, + "--network", + self._subnet.network_id, self._subnet.name, ] verifylist = [ @@ -409,20 +563,21 @@ def test_create_with_network_segment(self): ('network', self._subnet.network_id), ('ip_version', self._subnet.ip_version), ('gateway', 'auto'), - ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with(**{ - 'cidr': self._subnet.cidr, - 'ip_version': self._subnet.ip_version, - 'name': self._subnet.name, - 'network_id': self._subnet.network_id, - 'segment_id': self._network_segment.id, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_subnet.assert_called_once_with( + **{ + 'cidr': self._subnet.cidr, + 'ip_version': self._subnet.ip_version, + 'name': self._subnet.name, + 'network_id': self._subnet.network_id, + 'segment_id': self._network_segment.id, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -431,9 +586,12 @@ def test_create_with_description(self): self._network.id = self._subnet.network_id arglist = [ - "--subnet-range", self._subnet.cidr, - "--network", self._subnet.network_id, - "--description", self._subnet.description, + "--subnet-range", + self._subnet.cidr, + "--network", + self._subnet.network_id, + "--description", + self._subnet.description, self._subnet.name, ] verifylist = [ @@ -443,27 +601,30 @@ def test_create_with_description(self): ('network', self._subnet.network_id), ('ip_version', self._subnet.ip_version), ('gateway', 'auto'), - ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with(**{ - 'cidr': self._subnet.cidr, - 'ip_version': self._subnet.ip_version, - 'name': self._subnet.name, - 'network_id': self._subnet.network_id, - 'description': self._subnet.description, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_subnet.assert_called_once_with( + **{ + 'cidr': self._subnet.cidr, + 'ip_version': self._subnet.ip_version, + 'name': self._subnet.name, + 'network_id': self._subnet.network_id, + 'description': self._subnet.description, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def _test_create_with_dns(self, publish_dns=True): arglist = [ - "--subnet-range", self._subnet.cidr, - "--network", self._subnet.network_id, + "--subnet-range", + self._subnet.cidr, + "--network", + self._subnet.network_id, self._subnet.name, ] if publish_dns: @@ -480,9 +641,9 @@ def _test_create_with_dns(self, publish_dns=True): verifylist.append(('dns_publish_fixed_ip', publish_dns)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with( + self.network_client.create_subnet.assert_called_once_with( cidr=self._subnet.cidr, ip_version=self._subnet.ip_version, name=self._subnet.name, @@ -500,8 +661,10 @@ def test_create_with_no_dns(self): def _test_create_with_tag(self, add_tags=True): arglist = [ - "--subnet-range", self._subnet.cidr, - "--network", self._subnet.network_id, + "--subnet-range", + self._subnet.cidr, + "--network", + self._subnet.network_id, self._subnet.name, ] if add_tags: @@ -521,19 +684,20 @@ def _test_create_with_tag(self, add_tags=True): verifylist.append(('no_tag', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet.assert_called_once_with( + self.network_client.create_subnet.assert_called_once_with( cidr=self._subnet.cidr, ip_version=self._subnet.ip_version, name=self._subnet.name, - network_id=self._subnet.network_id) + network_id=self._subnet.network_id, + ) if add_tags: - self.network.set_tags.assert_called_once_with( - self._subnet, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self._subnet, tests_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -545,20 +709,20 @@ def test_create_with_no_tag(self): class TestDeleteSubnet(TestSubnet): - # The subnets to delete. _subnets = network_fakes.FakeSubnet.create_subnets(count=2) def setUp(self): - super(TestDeleteSubnet, self).setUp() + super().setUp() - self.network.delete_subnet = mock.Mock(return_value=None) + self.network_client.delete_subnet.return_value = None - self.network.find_subnet = ( - network_fakes.FakeSubnet.get_subnets(self._subnets)) + self.network_client.find_subnet = network_fakes.FakeSubnet.get_subnets( + self._subnets + ) # Get the command object to test - self.cmd = subnet_v2.DeleteSubnet(self.app, self.namespace) + self.cmd = subnet_v2.DeleteSubnet(self.app, None) def test_subnet_delete(self): arglist = [ @@ -570,7 +734,9 @@ def test_subnet_delete(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.delete_subnet.assert_called_once_with(self._subnets[0]) + self.network_client.delete_subnet.assert_called_once_with( + self._subnets[0] + ) self.assertIsNone(result) def test_multi_subnets_delete(self): @@ -589,7 +755,7 @@ def test_multi_subnets_delete(self): calls = [] for s in self._subnets: calls.append(call(s)) - self.network.delete_subnet.assert_has_calls(calls) + self.network_client.delete_subnet.assert_has_calls(calls) self.assertIsNone(result) def test_multi_subnets_delete_with_exception(self): @@ -598,15 +764,12 @@ def test_multi_subnets_delete_with_exception(self): 'unexist_subnet', ] verifylist = [ - ('subnet', - [self._subnets[0].name, 'unexist_subnet']), + ('subnet', [self._subnets[0].name, 'unexist_subnet']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._subnets[0], exceptions.CommandError] - self.network.find_subnet = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_subnet.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -614,11 +777,13 @@ def test_multi_subnets_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 subnets failed to delete.', str(e)) - self.network.find_subnet.assert_any_call( - self._subnets[0].name, ignore_missing=False) - self.network.find_subnet.assert_any_call( - 'unexist_subnet', ignore_missing=False) - self.network.delete_subnet.assert_called_once_with( + self.network_client.find_subnet.assert_any_call( + self._subnets[0].name, ignore_missing=False + ) + self.network_client.find_subnet.assert_any_call( + 'unexist_subnet', ignore_missing=False + ) + self.network_client.delete_subnet.assert_called_once_with( self._subnets[0] ) @@ -647,38 +812,42 @@ class TestListSubnet(TestSubnet): data = [] for subnet in _subnet: - data.append(( - subnet.id, - subnet.name, - subnet.network_id, - subnet.cidr, - )) + data.append( + ( + subnet.id, + subnet.name, + subnet.network_id, + subnet.cidr, + ) + ) data_long = [] for subnet in _subnet: - data_long.append(( - subnet.id, - subnet.name, - subnet.network_id, - subnet.cidr, - subnet.project_id, - subnet.enable_dhcp, - format_columns.ListColumn(subnet.dns_nameservers), - subnet_v2.AllocationPoolsColumn(subnet.allocation_pools), - subnet_v2.HostRoutesColumn(subnet.host_routes), - subnet.ip_version, - subnet.gateway_ip, - format_columns.ListColumn(subnet.service_types), - format_columns.ListColumn(subnet.tags), - )) + data_long.append( + ( + subnet.id, + subnet.name, + subnet.network_id, + subnet.cidr, + subnet.project_id, + subnet.enable_dhcp, + subnet_v2.UnsortedListColumn(subnet.dns_nameservers), + subnet_v2.AllocationPoolsColumn(subnet.allocation_pools), + subnet_v2.HostRoutesColumn(subnet.host_routes), + subnet.ip_version, + subnet.gateway_ip, + format_columns.ListColumn(subnet.service_types), + format_columns.ListColumn(subnet.tags), + ) + ) def setUp(self): - super(TestListSubnet, self).setUp() + super().setUp() # Get the command object to test - self.cmd = subnet_v2.ListSubnet(self.app, self.namespace) + self.cmd = subnet_v2.ListSubnet(self.app, None) - self.network.subnets = mock.Mock(return_value=self._subnet) + self.network_client.subnets.return_value = self._subnet def test_subnet_list_no_options(self): arglist = [] @@ -689,7 +858,7 @@ def test_subnet_list_no_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.subnets.assert_called_once_with() + self.network_client.subnets.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -704,13 +873,14 @@ def test_subnet_list_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.subnets.assert_called_once_with() + self.network_client.subnets.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) def test_subnet_list_ip_version(self): arglist = [ - '--ip-version', str(4), + '--ip-version', + str(4), ] verifylist = [ ('ip_version', 4), @@ -720,7 +890,7 @@ def test_subnet_list_ip_version(self): columns, data = self.cmd.take_action(parsed_args) filters = {'ip_version': 4} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -736,7 +906,7 @@ def test_subnet_list_dhcp(self): columns, data = self.cmd.take_action(parsed_args) filters = {'enable_dhcp': True, 'is_dhcp_enabled': True} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -752,13 +922,14 @@ def test_subnet_list_no_dhcp(self): columns, data = self.cmd.take_action(parsed_args) filters = {'enable_dhcp': False, 'is_dhcp_enabled': False} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_service_type(self): arglist = [ - '--service-type', 'network:router_gateway', + '--service-type', + 'network:router_gateway', ] verifylist = [ ('service_types', ['network:router_gateway']), @@ -767,7 +938,7 @@ def test_subnet_list_service_type(self): columns, data = self.cmd.take_action(parsed_args) filters = {'service_types': ['network:router_gateway']} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -775,7 +946,8 @@ def test_subnet_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -785,25 +957,33 @@ def test_subnet_list_project(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_service_type_multiple(self): arglist = [ - '--service-type', 'network:router_gateway', - '--service-type', 'network:floatingip_agent_gateway', + '--service-type', + 'network:router_gateway', + '--service-type', + 'network:floatingip_agent_gateway', ] verifylist = [ - ('service_types', ['network:router_gateway', - 'network:floatingip_agent_gateway']), + ( + 'service_types', + ['network:router_gateway', 'network:floatingip_agent_gateway'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - filters = {'service_types': ['network:router_gateway', - 'network:floatingip_agent_gateway']} - self.network.subnets.assert_called_once_with(**filters) + filters = { + 'service_types': [ + 'network:router_gateway', + 'network:floatingip_agent_gateway', + ] + } + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -811,8 +991,10 @@ def test_subnet_list_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -823,15 +1005,16 @@ def test_subnet_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_network(self): network = network_fakes.create_one_network() - self.network.find_network = mock.Mock(return_value=network) + self.network_client.find_network.return_value = network arglist = [ - '--network', network.id, + '--network', + network.id, ] verifylist = [ ('network', network.id), @@ -841,15 +1024,16 @@ def test_subnet_list_network(self): columns, data = self.cmd.take_action(parsed_args) filters = {'network_id': network.id} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_gateway(self): subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_network = mock.Mock(return_value=subnet) + self.network_client.find_network.return_value = subnet arglist = [ - '--gateway', subnet.gateway_ip, + '--gateway', + subnet.gateway_ip, ] verifylist = [ ('gateway', subnet.gateway_ip), @@ -859,15 +1043,16 @@ def test_subnet_list_gateway(self): columns, data = self.cmd.take_action(parsed_args) filters = {'gateway_ip': subnet.gateway_ip} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_name(self): subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_network = mock.Mock(return_value=subnet) + self.network_client.find_network.return_value = subnet arglist = [ - '--name', subnet.name, + '--name', + subnet.name, ] verifylist = [ ('name', subnet.name), @@ -877,15 +1062,16 @@ def test_subnet_list_name(self): columns, data = self.cmd.take_action(parsed_args) filters = {'name': subnet.name} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_subnet_range(self): subnet = network_fakes.FakeSubnet.create_one_subnet() - self.network.find_network = mock.Mock(return_value=subnet) + self.network_client.find_network.return_value = subnet arglist = [ - '--subnet-range', subnet.cidr, + '--subnet-range', + subnet.cidr, ] verifylist = [ ('subnet_range', subnet.cidr), @@ -895,18 +1081,21 @@ def test_subnet_list_subnet_range(self): columns, data = self.cmd.take_action(parsed_args) filters = {'cidr': subnet.cidr} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_subnetpool_by_name(self): subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool() subnet = network_fakes.FakeSubnet.create_one_subnet( - {'subnetpool_id': subnet_pool.id}) - self.network.find_network = mock.Mock(return_value=subnet) - self.network.find_subnet_pool = mock.Mock(return_value=subnet_pool) + {'subnetpool_id': subnet_pool.id} + ) + self.network_client.find_network.return_value = subnet + self.network_client.find_subnet_pool.return_value = subnet_pool + arglist = [ - '--subnet-pool', subnet_pool.name, + '--subnet-pool', + subnet_pool.name, ] verifylist = [ ('subnet_pool', subnet_pool.name), @@ -916,18 +1105,21 @@ def test_subnet_list_subnetpool_by_name(self): columns, data = self.cmd.take_action(parsed_args) filters = {'subnetpool_id': subnet_pool.id} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_list_subnetpool_by_id(self): subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool() subnet = network_fakes.FakeSubnet.create_one_subnet( - {'subnetpool_id': subnet_pool.id}) - self.network.find_network = mock.Mock(return_value=subnet) - self.network.find_subnet_pool = mock.Mock(return_value=subnet_pool) + {'subnetpool_id': subnet_pool.id} + ) + self.network_client.find_network.return_value = subnet + self.network_client.find_subnet_pool.return_value = subnet_pool + arglist = [ - '--subnet-pool', subnet_pool.id, + '--subnet-pool', + subnet_pool.id, ] verifylist = [ ('subnet_pool', subnet_pool.id), @@ -937,16 +1129,20 @@ def test_subnet_list_subnetpool_by_id(self): columns, data = self.cmd.take_action(parsed_args) filters = {'subnetpool_id': subnet_pool.id} - self.network.subnets.assert_called_once_with(**filters) + self.network_client.subnets.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -957,33 +1153,37 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.subnets.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white'} + self.network_client.subnets.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + } ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestSetSubnet(TestSubnet): - _subnet = network_fakes.FakeSubnet.create_one_subnet( - {'tags': ['green', 'red']}) + {'tags': ['green', 'red']} + ) def setUp(self): - super(TestSetSubnet, self).setUp() - self.network.update_subnet = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) - self.network.find_subnet = mock.Mock(return_value=self._subnet) - self.cmd = subnet_v2.SetSubnet(self.app, self.namespace) + super().setUp() + self.network_client.update_subnet.return_value = None + self.network_client.set_tags.return_value = None + self.network_client.find_subnet.return_value = self._subnet + self.cmd = subnet_v2.SetSubnet(self.app, None) def test_set_this(self): arglist = [ - "--name", "new_subnet", + "--name", + "new_subnet", "--dhcp", - "--gateway", self._subnet.gateway_ip, + "--gateway", + self._subnet.gateway_ip, self._subnet.name, ] verifylist = [ @@ -1000,14 +1200,18 @@ def test_set_this(self): 'gateway_ip': self._subnet.gateway_ip, 'name': "new_subnet", } - self.network.update_subnet.assert_called_with(self._subnet, **attrs) + self.network_client.update_subnet.assert_called_with( + self._subnet, **attrs + ) self.assertIsNone(result) def test_set_that(self): arglist = [ - "--name", "new_subnet", + "--name", + "new_subnet", "--no-dhcp", - "--gateway", "none", + "--gateway", + "none", self._subnet.name, ] verifylist = [ @@ -1024,28 +1228,37 @@ def test_set_that(self): 'gateway_ip': None, 'name': "new_subnet", } - self.network.update_subnet.assert_called_with(self._subnet, **attrs) + self.network_client.update_subnet.assert_called_with( + self._subnet, **attrs + ) self.assertIsNone(result) def test_set_nothing(self): - arglist = [self._subnet.name, ] + arglist = [ + self._subnet.name, + ] verifylist = [('subnet', self._subnet.name)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_subnet.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_subnet.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def test_append_options(self): _testsubnet = network_fakes.FakeSubnet.create_one_subnet( - {'dns_nameservers': ["10.0.0.1"], - 'service_types': ["network:router_gateway"]}) - self.network.find_subnet = mock.Mock(return_value=_testsubnet) + { + 'dns_nameservers': ["10.0.0.1"], + 'service_types': ["network:router_gateway"], + } + ) + self.network_client.find_subnet.return_value = _testsubnet arglist = [ - '--dns-nameserver', '10.0.0.2', - '--service-type', 'network:floatingip_agent_gateway', + '--dns-nameserver', + '10.0.0.2', + '--service-type', + 'network:floatingip_agent_gateway', _testsubnet.name, ] verifylist = [ @@ -1056,18 +1269,23 @@ def test_append_options(self): result = self.cmd.take_action(parsed_args) attrs = { 'dns_nameservers': ['10.0.0.2', '10.0.0.1'], - 'service_types': ['network:floatingip_agent_gateway', - 'network:router_gateway'], + 'service_types': [ + 'network:floatingip_agent_gateway', + 'network:router_gateway', + ], } - self.network.update_subnet.assert_called_once_with( - _testsubnet, **attrs) + self.network_client.update_subnet.assert_called_once_with( + _testsubnet, **attrs + ) self.assertIsNone(result) def test_set_non_append_options(self): arglist = [ - "--description", "new_description", + "--description", + "new_description", "--dhcp", - "--gateway", self._subnet.gateway_ip, + "--gateway", + self._subnet.gateway_ip, self._subnet.name, ] verifylist = [ @@ -1084,31 +1302,42 @@ def test_set_non_append_options(self): 'gateway_ip': self._subnet.gateway_ip, 'description': "new_description", } - self.network.update_subnet.assert_called_with(self._subnet, **attrs) + self.network_client.update_subnet.assert_called_with( + self._subnet, **attrs + ) self.assertIsNone(result) def test_overwrite_options(self): _testsubnet = network_fakes.FakeSubnet.create_one_subnet( - {'host_routes': [{'destination': '10.20.20.0/24', - 'nexthop': '10.20.20.1'}], - 'allocation_pools': [{'start': '8.8.8.200', - 'end': '8.8.8.250'}], - 'dns_nameservers': ["10.0.0.1"], }) - self.network.find_subnet = mock.Mock(return_value=_testsubnet) + { + 'host_routes': [ + {'destination': '10.20.20.0/24', 'nexthop': '10.20.20.1'} + ], + 'allocation_pools': [ + {'start': '8.8.8.200', 'end': '8.8.8.250'} + ], + 'dns_nameservers': ["10.0.0.1"], + } + ) + self.network_client.find_subnet.return_value = _testsubnet arglist = [ - '--host-route', 'destination=10.30.30.30/24,gateway=10.30.30.1', + '--host-route', + 'destination=10.30.30.30/24,gateway=10.30.30.1', '--no-host-route', - '--allocation-pool', 'start=8.8.8.100,end=8.8.8.150', + '--allocation-pool', + 'start=8.8.8.100,end=8.8.8.150', '--no-allocation-pool', - '--dns-nameserver', '10.1.10.1', + '--dns-nameserver', + '10.1.10.1', '--no-dns-nameservers', _testsubnet.name, ] verifylist = [ - ('host_routes', [{ - "destination": "10.30.30.30/24", "gateway": "10.30.30.1"}]), - ('allocation_pools', [{ - 'start': '8.8.8.100', 'end': '8.8.8.150'}]), + ( + 'host_routes', + [{"destination": "10.30.30.30/24", "gateway": "10.30.30.1"}], + ), + ('allocation_pools', [{'start': '8.8.8.100', 'end': '8.8.8.150'}]), ('dns_nameservers', ['10.1.10.1']), ('no_dns_nameservers', True), ('no_host_route', True), @@ -1117,23 +1346,30 @@ def test_overwrite_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { - 'host_routes': [{ - "destination": "10.30.30.30/24", "nexthop": "10.30.30.1"}], + 'host_routes': [ + {"destination": "10.30.30.30/24", "nexthop": "10.30.30.1"} + ], 'allocation_pools': [{'start': '8.8.8.100', 'end': '8.8.8.150'}], 'dns_nameservers': ["10.1.10.1"], } - self.network.update_subnet.assert_called_once_with( - _testsubnet, **attrs) + self.network_client.update_subnet.assert_called_once_with( + _testsubnet, **attrs + ) self.assertIsNone(result) def test_clear_options(self): _testsubnet = network_fakes.FakeSubnet.create_one_subnet( - {'host_routes': [{'destination': '10.20.20.0/24', - 'nexthop': '10.20.20.1'}], - 'allocation_pools': [{'start': '8.8.8.200', - 'end': '8.8.8.250'}], - 'dns_nameservers': ['10.0.0.1'], }) - self.network.find_subnet = mock.Mock(return_value=_testsubnet) + { + 'host_routes': [ + {'destination': '10.20.20.0/24', 'nexthop': '10.20.20.1'} + ], + 'allocation_pools': [ + {'start': '8.8.8.200', 'end': '8.8.8.250'} + ], + 'dns_nameservers': ['10.0.0.1'], + } + ) + self.network_client.find_subnet.return_value = _testsubnet arglist = [ '--no-host-route', '--no-allocation-pool', @@ -1152,8 +1388,9 @@ def test_clear_options(self): 'allocation_pools': [], 'dns_nameservers': [], } - self.network.update_subnet.assert_called_once_with( - _testsubnet, **attrs) + self.network_client.update_subnet.assert_called_once_with( + _testsubnet, **attrs + ) self.assertIsNone(result) def _test_set_tags(self, with_tags=True): @@ -1166,16 +1403,15 @@ def _test_set_tags(self, with_tags=True): verifylist = [('no_tag', True)] expected_args = [] arglist.append(self._subnet.name) - verifylist.append( - ('subnet', self._subnet.name)) + verifylist.append(('subnet', self._subnet.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_subnet.called) - self.network.set_tags.assert_called_once_with( - self._subnet, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_subnet.called) + self.network_client.set_tags.assert_called_once_with( + self._subnet, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -1187,24 +1423,32 @@ def test_set_with_no_tag(self): def test_set_segment(self): _net = network_fakes.create_one_network() _segment = network_fakes.create_one_network_segment( - attrs={'network_id': _net.id}) + attrs={'network_id': _net.id} + ) _subnet = network_fakes.FakeSubnet.create_one_subnet( - {'host_routes': [{'destination': '10.20.20.0/24', - 'nexthop': '10.20.20.1'}], - 'allocation_pools': [{'start': '8.8.8.200', - 'end': '8.8.8.250'}], - 'dns_nameservers': ["10.0.0.1"], - 'network_id': _net.id, - 'segment_id': None}) - self.network.find_subnet = mock.Mock(return_value=_subnet) - self.network.find_segment = mock.Mock(return_value=_segment) + { + 'host_routes': [ + {'destination': '10.20.20.0/24', 'nexthop': '10.20.20.1'} + ], + 'allocation_pools': [ + {'start': '8.8.8.200', 'end': '8.8.8.250'} + ], + 'dns_nameservers': ["10.0.0.1"], + 'network_id': _net.id, + 'segment_id': None, + } + ) + self.network_client.find_subnet.return_value = _subnet + self.network_client.find_segment.return_value = _segment arglist = ['--network-segment', _segment.id, _subnet.name] verifylist = [('network_segment', _segment.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {'segment_id': _segment.id} - self.network.update_subnet.assert_called_once_with(_subnet, **attrs) - self.network.update_subnet.assert_called_with(_subnet, **attrs) + self.network_client.update_subnet.assert_called_once_with( + _subnet, **attrs + ) + self.network_client.update_subnet.assert_called_with(_subnet, **attrs) self.assertIsNone(result) @@ -1237,7 +1481,7 @@ class TestShowSubnet(TestSubnet): subnet_v2.AllocationPoolsColumn(_subnet.allocation_pools), _subnet.cidr, _subnet.description, - format_columns.ListColumn(_subnet.dns_nameservers), + subnet_v2.UnsortedListColumn(_subnet.dns_nameservers), _subnet.enable_dhcp, _subnet.gateway_ip, subnet_v2.HostRoutesColumn(_subnet.host_routes), @@ -1255,12 +1499,12 @@ class TestShowSubnet(TestSubnet): ) def setUp(self): - super(TestShowSubnet, self).setUp() + super().setUp() # Get the command object to test - self.cmd = subnet_v2.ShowSubnet(self.app, self.namespace) + self.cmd = subnet_v2.ShowSubnet(self.app, None) - self.network.find_subnet = mock.Mock(return_value=self._subnet) + self.network_client.find_subnet.return_value = self._subnet def test_show_no_options(self): arglist = [] @@ -1268,8 +1512,13 @@ def test_show_no_options(self): # Testing that a call without the required argument will fail and # throw a "ParserExecption" - self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1282,53 +1531,65 @@ def test_show_all_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.find_subnet.assert_called_once_with( - self._subnet.name, ignore_missing=False) + self.network_client.find_subnet.assert_called_once_with( + self._subnet.name, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestUnsetSubnet(TestSubnet): - def setUp(self): - super(TestUnsetSubnet, self).setUp() + super().setUp() + # Add three dns_nameserver entries so we can verify ordering self._testsubnet = network_fakes.FakeSubnet.create_one_subnet( - {'dns_nameservers': ['8.8.8.8', - '8.8.8.4'], - 'host_routes': [{'destination': '10.20.20.0/24', - 'nexthop': '10.20.20.1'}, - {'destination': '10.30.30.30/24', - 'nexthop': '10.30.30.1'}], - 'allocation_pools': [{'start': '8.8.8.100', - 'end': '8.8.8.150'}, - {'start': '8.8.8.160', - 'end': '8.8.8.170'}], - 'service_types': ['network:router_gateway', - 'network:floatingip_agent_gateway'], - 'gateway_ip': 'fe80::a00a:0:c0de:0:1', - 'tags': ['green', 'red'], }) - self.network.find_subnet = mock.Mock(return_value=self._testsubnet) - self.network.update_subnet = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + { + 'dns_nameservers': ['8.8.8.8', '8.8.8.4', '8.8.4.4'], + 'host_routes': [ + {'destination': '10.20.20.0/24', 'nexthop': '10.20.20.1'}, + {'destination': '10.30.30.30/24', 'nexthop': '10.30.30.1'}, + ], + 'allocation_pools': [ + {'start': '8.8.8.100', 'end': '8.8.8.150'}, + {'start': '8.8.8.160', 'end': '8.8.8.170'}, + ], + 'service_types': [ + 'network:router_gateway', + 'network:floatingip_agent_gateway', + ], + 'gateway_ip': 'fe80::a00a:0:c0de:0:1', + 'tags': ['green', 'red'], + } + ) + self.network_client.find_subnet.return_value = self._testsubnet + + self.network_client.update_subnet.return_value = None + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = subnet_v2.UnsetSubnet(self.app, self.namespace) + self.cmd = subnet_v2.UnsetSubnet(self.app, None) def test_unset_subnet_params(self): + # Remove just the middle dns_nameserver entry, verify still in order arglist = [ - '--dns-nameserver', '8.8.8.8', - '--host-route', 'destination=10.30.30.30/24,gateway=10.30.30.1', - '--allocation-pool', 'start=8.8.8.100,end=8.8.8.150', - '--service-type', 'network:router_gateway', + '--dns-nameserver', + '8.8.8.4', + '--host-route', + 'destination=10.30.30.30/24,gateway=10.30.30.1', + '--allocation-pool', + 'start=8.8.8.100,end=8.8.8.150', + '--service-type', + 'network:router_gateway', '--gateway', self._testsubnet.name, ] verifylist = [ - ('dns_nameservers', ['8.8.8.8']), - ('host_routes', [{ - "destination": "10.30.30.30/24", "gateway": "10.30.30.1"}]), - ('allocation_pools', [{ - 'start': '8.8.8.100', 'end': '8.8.8.150'}]), + ('dns_nameservers', ['8.8.8.4']), + ( + 'host_routes', + [{"destination": "10.30.30.30/24", "gateway": "10.30.30.1"}], + ), + ('allocation_pools', [{'start': '8.8.8.100', 'end': '8.8.8.150'}]), ('service_types', ['network:router_gateway']), ('gateway', True), ] @@ -1337,94 +1598,117 @@ def test_unset_subnet_params(self): result = self.cmd.take_action(parsed_args) attrs = { - 'dns_nameservers': ['8.8.8.4'], - 'host_routes': [{ - "destination": "10.20.20.0/24", "nexthop": "10.20.20.1"}], + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'host_routes': [ + {"destination": "10.20.20.0/24", "nexthop": "10.20.20.1"} + ], 'allocation_pools': [{'start': '8.8.8.160', 'end': '8.8.8.170'}], 'service_types': ['network:floatingip_agent_gateway'], 'gateway_ip': None, } - self.network.update_subnet.assert_called_once_with( - self._testsubnet, **attrs) + self.network_client.update_subnet.assert_called_once_with( + self._testsubnet, **attrs + ) self.assertIsNone(result) def test_unset_subnet_wrong_host_routes(self): arglist = [ - '--dns-nameserver', '8.8.8.8', - '--host-route', 'destination=10.30.30.30/24,gateway=10.30.30.2', - '--allocation-pool', 'start=8.8.8.100,end=8.8.8.150', + '--dns-nameserver', + '8.8.8.8', + '--host-route', + 'destination=10.30.30.30/24,gateway=10.30.30.2', + '--allocation-pool', + 'start=8.8.8.100,end=8.8.8.150', self._testsubnet.name, ] verifylist = [ ('dns_nameservers', ['8.8.8.8']), - ('host_routes', [{ - "destination": "10.30.30.30/24", "gateway": "10.30.30.2"}]), - ('allocation_pools', [{ - 'start': '8.8.8.100', 'end': '8.8.8.150'}]), + ( + 'host_routes', + [{"destination": "10.30.30.30/24", "gateway": "10.30.30.2"}], + ), + ('allocation_pools', [{'start': '8.8.8.100', 'end': '8.8.8.150'}]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_subnet_wrong_allocation_pool(self): arglist = [ - '--dns-nameserver', '8.8.8.8', - '--host-route', 'destination=10.30.30.30/24,gateway=10.30.30.1', - '--allocation-pool', 'start=8.8.8.100,end=8.8.8.156', + '--dns-nameserver', + '8.8.8.8', + '--host-route', + 'destination=10.30.30.30/24,gateway=10.30.30.1', + '--allocation-pool', + 'start=8.8.8.100,end=8.8.8.156', self._testsubnet.name, ] verifylist = [ ('dns_nameservers', ['8.8.8.8']), - ('host_routes', [{ - "destination": "10.30.30.30/24", "gateway": "10.30.30.1"}]), - ('allocation_pools', [{ - 'start': '8.8.8.100', 'end': '8.8.8.156'}]), + ( + 'host_routes', + [{"destination": "10.30.30.30/24", "gateway": "10.30.30.1"}], + ), + ('allocation_pools', [{'start': '8.8.8.100', 'end': '8.8.8.156'}]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_subnet_wrong_dns_nameservers(self): arglist = [ - '--dns-nameserver', '8.8.8.1', - '--host-route', 'destination=10.30.30.30/24,gateway=10.30.30.1', - '--allocation-pool', 'start=8.8.8.100,end=8.8.8.150', + '--dns-nameserver', + '8.8.8.1', + '--host-route', + 'destination=10.30.30.30/24,gateway=10.30.30.1', + '--allocation-pool', + 'start=8.8.8.100,end=8.8.8.150', self._testsubnet.name, ] verifylist = [ ('dns_nameservers', ['8.8.8.1']), - ('host_routes', [{ - "destination": "10.30.30.30/24", "gateway": "10.30.30.1"}]), - ('allocation_pools', [{ - 'start': '8.8.8.100', 'end': '8.8.8.150'}]), + ( + 'host_routes', + [{"destination": "10.30.30.30/24", "gateway": "10.30.30.1"}], + ), + ('allocation_pools', [{'start': '8.8.8.100', 'end': '8.8.8.150'}]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_unset_subnet_wrong_service_type(self): arglist = [ - '--dns-nameserver', '8.8.8.8', - '--host-route', 'destination=10.30.30.30/24,gateway=10.30.30.1', - '--allocation-pool', 'start=8.8.8.100,end=8.8.8.150', - '--service-type', 'network:dhcp', + '--dns-nameserver', + '8.8.8.8', + '--host-route', + 'destination=10.30.30.30/24,gateway=10.30.30.1', + '--allocation-pool', + 'start=8.8.8.100,end=8.8.8.150', + '--service-type', + 'network:dhcp', self._testsubnet.name, ] verifylist = [ ('dns_nameservers', ['8.8.8.8']), - ('host_routes', [{ - "destination": "10.30.30.30/24", "gateway": "10.30.30.1"}]), - ('allocation_pools', [{ - 'start': '8.8.8.100', 'end': '8.8.8.150'}]), + ( + 'host_routes', + [{"destination": "10.30.30.30/24", "gateway": "10.30.30.1"}], + ), + ('allocation_pools', [{'start': '8.8.8.100', 'end': '8.8.8.150'}]), ('service_types', ['network:dhcp']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def _test_unset_tags(self, with_tags=True): if with_tags: @@ -1436,16 +1720,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self._testsubnet.name) - verifylist.append( - ('subnet', self._testsubnet.name)) + verifylist.append(('subnet', self._testsubnet.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_subnet.called) - self.network.set_tags.assert_called_once_with( - self._testsubnet, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_subnet.called) + self.network_client.set_tags.assert_called_once_with( + self._testsubnet, tests_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): diff --git a/openstackclient/tests/unit/network/v2/test_subnet_pool.py b/openstackclient/tests/unit/network/v2/test_subnet_pool.py index b24906b58b..013550ec1e 100644 --- a/openstackclient/tests/unit/network/v2/test_subnet_pool.py +++ b/openstackclient/tests/unit/network/v2/test_subnet_pool.py @@ -9,10 +9,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse -from unittest import mock from unittest.mock import call from osc_lib.cli import format_columns @@ -21,24 +18,20 @@ from openstackclient.network.v2 import subnet_pool from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3 from openstackclient.tests.unit.network.v2 import fakes as network_fakes -from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit import utils as test_utils class TestSubnetPool(network_fakes.TestNetworkV2): - def setUp(self): - super(TestSubnetPool, self).setUp() + super().setUp() - # Get a shortcut to the network client - self.network = self.app.client_manager.network # Get a shortcut to the ProjectManager Mock - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects # Get a shortcut to the DomainManager Mock - self.domains_mock = self.app.client_manager.identity.domains + self.domains_mock = self.identity_client.domains class TestCreateSubnetPool(TestSubnetPool): - project = identity_fakes_v3.FakeProject.create_one_project() domain = identity_fakes_v3.FakeDomain.create_one_domain() # The new subnet pool to create. @@ -80,17 +73,18 @@ class TestCreateSubnetPool(TestSubnetPool): ) def setUp(self): - super(TestCreateSubnetPool, self).setUp() + super().setUp() + + self.network_client.create_subnet_pool.return_value = self._subnet_pool - self.network.create_subnet_pool = mock.Mock( - return_value=self._subnet_pool) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = subnet_pool.CreateSubnetPool(self.app, self.namespace) + self.cmd = subnet_pool.CreateSubnetPool(self.app, None) - self.network.find_address_scope = mock.Mock( - return_value=self._address_scope) + self.network_client.find_address_scope.return_value = ( + self._address_scope + ) self.projects_mock.get.return_value = self.project self.domains_mock.get.return_value = self.domain @@ -99,9 +93,14 @@ def test_create_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - self.assertFalse(self.network.set_tags.called) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + self.assertFalse(self.network_client.set_tags.called) def test_create_no_pool_prefix(self): """Make sure --pool-prefix is a required argument""" @@ -111,12 +110,18 @@ def test_create_no_pool_prefix(self): verifylist = [ ('name', self._subnet_pool.name), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_default_options(self): arglist = [ - '--pool-prefix', '10.0.10.0/24', + '--pool-prefix', + '10.0.10.0/24', self._subnet_pool.name, ] verifylist = [ @@ -125,27 +130,35 @@ def test_create_default_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'prefixes': ['10.0.10.0/24'], - 'name': self._subnet_pool.name, - }) - self.assertFalse(self.network.set_tags.called) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'prefixes': ['10.0.10.0/24'], + 'name': self._subnet_pool.name, + } + ) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_prefixlen_options(self): arglist = [ - '--default-prefix-length', self._subnet_pool.default_prefixlen, - '--max-prefix-length', self._subnet_pool.max_prefixlen, - '--min-prefix-length', self._subnet_pool.min_prefixlen, - '--pool-prefix', '10.0.10.0/24', + '--default-prefix-length', + self._subnet_pool.default_prefixlen, + '--max-prefix-length', + self._subnet_pool.max_prefixlen, + '--min-prefix-length', + self._subnet_pool.min_prefixlen, + '--pool-prefix', + '10.0.10.0/24', self._subnet_pool.name, ] verifylist = [ - ('default_prefix_length', - int(self._subnet_pool.default_prefixlen)), + ( + 'default_prefix_length', + int(self._subnet_pool.default_prefixlen), + ), ('max_prefix_length', int(self._subnet_pool.max_prefixlen)), ('min_prefix_length', int(self._subnet_pool.min_prefixlen)), ('name', self._subnet_pool.name), @@ -153,36 +166,47 @@ def test_create_prefixlen_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'default_prefixlen': int(self._subnet_pool.default_prefixlen), - 'max_prefixlen': int(self._subnet_pool.max_prefixlen), - 'min_prefixlen': int(self._subnet_pool.min_prefixlen), - 'prefixes': ['10.0.10.0/24'], - 'name': self._subnet_pool.name, - }) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'default_prefixlen': int(self._subnet_pool.default_prefixlen), + 'max_prefixlen': int(self._subnet_pool.max_prefixlen), + 'min_prefixlen': int(self._subnet_pool.min_prefixlen), + 'prefixes': ['10.0.10.0/24'], + 'name': self._subnet_pool.name, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_len_negative(self): arglist = [ self._subnet_pool.name, - '--min-prefix-length', '-16', + '--min-prefix-length', + '-16', ] verifylist = [ ('subnet_pool', self._subnet_pool.name), ('min_prefix_length', '-16'), ] - self.assertRaises(argparse.ArgumentTypeError, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_create_project_domain(self): arglist = [ - '--pool-prefix', '10.0.10.0/24', - "--project", self.project.name, - "--project-domain", self.domain.name, + '--pool-prefix', + '10.0.10.0/24', + "--project", + self.project.name, + "--project-domain", + self.domain.name, self._subnet_pool.name, ] verifylist = [ @@ -193,20 +217,24 @@ def test_create_project_domain(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'prefixes': ['10.0.10.0/24'], - 'project_id': self.project.id, - 'name': self._subnet_pool.name, - }) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'prefixes': ['10.0.10.0/24'], + 'project_id': self.project.id, + 'name': self._subnet_pool.name, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_address_scope_option(self): arglist = [ - '--pool-prefix', '10.0.10.0/24', - '--address-scope', self._address_scope.id, + '--pool-prefix', + '10.0.10.0/24', + '--address-scope', + self._address_scope.id, self._subnet_pool.name, ] verifylist = [ @@ -216,19 +244,22 @@ def test_create_address_scope_option(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'prefixes': ['10.0.10.0/24'], - 'address_scope_id': self._address_scope.id, - 'name': self._subnet_pool.name, - }) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'prefixes': ['10.0.10.0/24'], + 'address_scope_id': self._address_scope.id, + 'name': self._subnet_pool.name, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_default_and_shared_options(self): arglist = [ - '--pool-prefix', '10.0.10.0/24', + '--pool-prefix', + '10.0.10.0/24', '--default', '--share', self._subnet_pool.name, @@ -241,21 +272,25 @@ def test_create_default_and_shared_options(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'is_default': True, - 'name': self._subnet_pool.name, - 'prefixes': ['10.0.10.0/24'], - 'shared': True, - }) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'is_default': True, + 'name': self._subnet_pool.name, + 'prefixes': ['10.0.10.0/24'], + 'shared': True, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_description(self): arglist = [ - '--pool-prefix', '10.0.10.0/24', - '--description', self._subnet_pool.description, + '--pool-prefix', + '10.0.10.0/24', + '--description', + self._subnet_pool.description, self._subnet_pool.name, ] verifylist = [ @@ -265,20 +300,24 @@ def test_create_with_description(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'name': self._subnet_pool.name, - 'prefixes': ['10.0.10.0/24'], - 'description': self._subnet_pool.description, - }) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'name': self._subnet_pool.name, + 'prefixes': ['10.0.10.0/24'], + 'description': self._subnet_pool.description, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_create_with_default_quota(self): arglist = [ - '--pool-prefix', '10.0.10.0/24', - '--default-quota', '10', + '--pool-prefix', + '10.0.10.0/24', + '--default-quota', + '10', self._subnet_pool.name, ] verifylist = [ @@ -287,18 +326,21 @@ def test_create_with_default_quota(self): ('name', self._subnet_pool.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) - self.network.create_subnet_pool.assert_called_once_with(**{ - 'name': self._subnet_pool.name, - 'prefixes': ['10.0.10.0/24'], - 'default_quota': 10, - }) + columns, data = self.cmd.take_action(parsed_args) + self.network_client.create_subnet_pool.assert_called_once_with( + **{ + 'name': self._subnet_pool.name, + 'prefixes': ['10.0.10.0/24'], + 'default_quota': 10, + } + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def _test_create_with_tag(self, add_tags=True): arglist = [ - '--pool-prefix', '10.0.10.0/24', + '--pool-prefix', + '10.0.10.0/24', self._subnet_pool.name, ] if add_tags: @@ -315,18 +357,17 @@ def _test_create_with_tag(self, add_tags=True): verifylist.append(('no_tag', True)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = (self.cmd.take_action(parsed_args)) + columns, data = self.cmd.take_action(parsed_args) - self.network.create_subnet_pool.assert_called_once_with( - prefixes=['10.0.10.0/24'], - name=self._subnet_pool.name + self.network_client.create_subnet_pool.assert_called_once_with( + prefixes=['10.0.10.0/24'], name=self._subnet_pool.name ) if add_tags: - self.network.set_tags.assert_called_once_with( - self._subnet_pool, - tests_utils.CompareBySet(['red', 'blue'])) + self.network_client.set_tags.assert_called_once_with( + self._subnet_pool, test_utils.CompareBySet(['red', 'blue']) + ) else: - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.set_tags.called) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) @@ -338,21 +379,20 @@ def test_create_with_no_tag(self): class TestDeleteSubnetPool(TestSubnetPool): - # The subnet pools to delete. _subnet_pools = network_fakes.FakeSubnetPool.create_subnet_pools(count=2) def setUp(self): - super(TestDeleteSubnetPool, self).setUp() + super().setUp() - self.network.delete_subnet_pool = mock.Mock(return_value=None) + self.network_client.delete_subnet_pool.return_value = None - self.network.find_subnet_pool = ( + self.network_client.find_subnet_pool = ( network_fakes.FakeSubnetPool.get_subnet_pools(self._subnet_pools) ) # Get the command object to test - self.cmd = subnet_pool.DeleteSubnetPool(self.app, self.namespace) + self.cmd = subnet_pool.DeleteSubnetPool(self.app, None) def test_subnet_pool_delete(self): arglist = [ @@ -365,8 +405,9 @@ def test_subnet_pool_delete(self): result = self.cmd.take_action(parsed_args) - self.network.delete_subnet_pool.assert_called_once_with( - self._subnet_pools[0]) + self.network_client.delete_subnet_pool.assert_called_once_with( + self._subnet_pools[0] + ) self.assertIsNone(result) def test_multi_subnet_pools_delete(self): @@ -385,7 +426,7 @@ def test_multi_subnet_pools_delete(self): calls = [] for s in self._subnet_pools: calls.append(call(s)) - self.network.delete_subnet_pool.assert_has_calls(calls) + self.network_client.delete_subnet_pool.assert_has_calls(calls) self.assertIsNone(result) def test_multi_subnet_pools_delete_with_exception(self): @@ -394,15 +435,15 @@ def test_multi_subnet_pools_delete_with_exception(self): 'unexist_subnet_pool', ] verifylist = [ - ('subnet_pool', - [self._subnet_pools[0].name, 'unexist_subnet_pool']), + ( + 'subnet_pool', + [self._subnet_pools[0].name, 'unexist_subnet_pool'], + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self._subnet_pools[0], exceptions.CommandError] - self.network.find_subnet_pool = ( - mock.Mock(side_effect=find_mock_result) - ) + self.network_client.find_subnet_pool.side_effect = find_mock_result try: self.cmd.take_action(parsed_args) @@ -410,11 +451,13 @@ def test_multi_subnet_pools_delete_with_exception(self): except exceptions.CommandError as e: self.assertEqual('1 of 2 subnet pools failed to delete.', str(e)) - self.network.find_subnet_pool.assert_any_call( - self._subnet_pools[0].name, ignore_missing=False) - self.network.find_subnet_pool.assert_any_call( - 'unexist_subnet_pool', ignore_missing=False) - self.network.delete_subnet_pool.assert_called_once_with( + self.network_client.find_subnet_pool.assert_any_call( + self._subnet_pools[0].name, ignore_missing=False + ) + self.network_client.find_subnet_pool.assert_any_call( + 'unexist_subnet_pool', ignore_missing=False + ) + self.network_client.delete_subnet_pool.assert_called_once_with( self._subnet_pools[0] ) @@ -438,32 +481,36 @@ class TestListSubnetPool(TestSubnetPool): data = [] for pool in _subnet_pools: - data.append(( - pool.id, - pool.name, - format_columns.ListColumn(pool.prefixes), - )) + data.append( + ( + pool.id, + pool.name, + format_columns.ListColumn(pool.prefixes), + ) + ) data_long = [] for pool in _subnet_pools: - data_long.append(( - pool.id, - pool.name, - format_columns.ListColumn(pool.prefixes), - pool.default_prefixlen, - pool.address_scope_id, - pool.is_default, - pool.shared, - format_columns.ListColumn(pool.tags), - )) + data_long.append( + ( + pool.id, + pool.name, + format_columns.ListColumn(pool.prefixes), + pool.default_prefixlen, + pool.address_scope_id, + pool.is_default, + pool.shared, + format_columns.ListColumn(pool.tags), + ) + ) def setUp(self): - super(TestListSubnetPool, self).setUp() + super().setUp() # Get the command object to test - self.cmd = subnet_pool.ListSubnetPool(self.app, self.namespace) + self.cmd = subnet_pool.ListSubnetPool(self.app, None) - self.network.subnet_pools = mock.Mock(return_value=self._subnet_pools) + self.network_client.subnet_pools.return_value = self._subnet_pools def test_subnet_pool_list_no_option(self): arglist = [] @@ -474,7 +521,7 @@ def test_subnet_pool_list_no_option(self): columns, data = self.cmd.take_action(parsed_args) - self.network.subnet_pools.assert_called_once_with() + self.network_client.subnet_pools.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -489,7 +536,7 @@ def test_subnet_pool_list_long(self): columns, data = self.cmd.take_action(parsed_args) - self.network.subnet_pools.assert_called_once_with() + self.network_client.subnet_pools.assert_called_once_with() self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) @@ -505,7 +552,7 @@ def test_subnet_pool_list_no_share(self): columns, data = self.cmd.take_action(parsed_args) filters = {'shared': False, 'is_shared': False} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -521,7 +568,7 @@ def test_subnet_pool_list_share(self): columns, data = self.cmd.take_action(parsed_args) filters = {'shared': True, 'is_shared': True} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -537,7 +584,7 @@ def test_subnet_pool_list_no_default(self): columns, data = self.cmd.take_action(parsed_args) filters = {'is_default': False} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -553,7 +600,7 @@ def test_subnet_pool_list_default(self): columns, data = self.cmd.take_action(parsed_args) filters = {'is_default': True} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -561,7 +608,8 @@ def test_subnet_pool_list_project(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, + '--project', + project.id, ] verifylist = [ ('project', project.id), @@ -571,7 +619,7 @@ def test_subnet_pool_list_project(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -579,8 +627,10 @@ def test_subnet_pool_list_project_domain(self): project = identity_fakes_v3.FakeProject.create_one_project() self.projects_mock.get.return_value = project arglist = [ - '--project', project.id, - '--project-domain', project.domain_id, + '--project', + project.id, + '--project-domain', + project.domain_id, ] verifylist = [ ('project', project.id), @@ -591,15 +641,16 @@ def test_subnet_pool_list_project_domain(self): columns, data = self.cmd.take_action(parsed_args) filters = {'project_id': project.id} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_pool_list_name(self): subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool() - self.network.find_network = mock.Mock(return_value=subnet_pool) + self.network_client.find_network.return_value = subnet_pool arglist = [ - '--name', subnet_pool.name, + '--name', + subnet_pool.name, ] verifylist = [ ('name', subnet_pool.name), @@ -609,15 +660,17 @@ def test_subnet_pool_list_name(self): columns, data = self.cmd.take_action(parsed_args) filters = {'name': subnet_pool.name} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_subnet_pool_list_address_scope(self): addr_scope = network_fakes.create_one_address_scope() - self.network.find_address_scope = mock.Mock(return_value=addr_scope) + self.network_client.find_address_scope.return_value = addr_scope + arglist = [ - '--address-scope', addr_scope.id, + '--address-scope', + addr_scope.id, ] verifylist = [ ('address_scope', addr_scope.id), @@ -627,16 +680,20 @@ def test_subnet_pool_list_address_scope(self): columns, data = self.cmd.take_action(parsed_args) filters = {'address_scope_id': addr_scope.id} - self.network.subnet_pools.assert_called_once_with(**filters) + self.network_client.subnet_pools.assert_called_once_with(**filters) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_list_with_tag_options(self): arglist = [ - '--tags', 'red,blue', - '--any-tags', 'red,green', - '--not-tags', 'orange,yellow', - '--not-any-tags', 'black,white', + '--tags', + 'red,blue', + '--any-tags', + 'red,green', + '--not-tags', + 'orange,yellow', + '--not-any-tags', + 'black,white', ] verifylist = [ ('tags', ['red', 'blue']), @@ -647,46 +704,49 @@ def test_list_with_tag_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.network.subnet_pools.assert_called_once_with( - **{'tags': 'red,blue', - 'any_tags': 'red,green', - 'not_tags': 'orange,yellow', - 'not_any_tags': 'black,white'} + self.network_client.subnet_pools.assert_called_once_with( + **{ + 'tags': 'red,blue', + 'any_tags': 'red,green', + 'not_tags': 'orange,yellow', + 'not_any_tags': 'black,white', + } ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) class TestSetSubnetPool(TestSubnetPool): - # The subnet_pool to set. _subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool( - {'default_quota': 10, - 'tags': ['green', 'red']} + {'default_quota': 10, 'tags': ['green', 'red']} ) _address_scope = network_fakes.create_one_address_scope() def setUp(self): - super(TestSetSubnetPool, self).setUp() + super().setUp() - self.network.update_subnet_pool = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + self.network_client.update_subnet_pool.return_value = None + self.network_client.set_tags.return_value = None - self.network.find_subnet_pool = mock.Mock( - return_value=self._subnet_pool) + self.network_client.find_subnet_pool.return_value = self._subnet_pool - self.network.find_address_scope = mock.Mock( - return_value=self._address_scope) + self.network_client.find_address_scope.return_value = ( + self._address_scope + ) # Get the command object to test - self.cmd = subnet_pool.SetSubnetPool(self.app, self.namespace) + self.cmd = subnet_pool.SetSubnetPool(self.app, None) def test_set_this(self): arglist = [ - '--name', 'noob', - '--default-prefix-length', '8', - '--min-prefix-length', '8', + '--name', + 'noob', + '--default-prefix-length', + '8', + '--min-prefix-length', + '8', self._subnet_pool.name, ] verifylist = [ @@ -704,15 +764,19 @@ def test_set_this(self): 'default_prefixlen': 8, 'min_prefixlen': 8, } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_that(self): arglist = [ - '--pool-prefix', '10.0.1.0/24', - '--pool-prefix', '10.0.2.0/24', - '--max-prefix-length', '16', + '--pool-prefix', + '10.0.1.0/24', + '--pool-prefix', + '10.0.2.0/24', + '--max-prefix-length', + '16', self._subnet_pool.name, ] verifylist = [ @@ -730,24 +794,30 @@ def test_set_that(self): 'prefixes': prefixes, 'max_prefixlen': 16, } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_nothing(self): - arglist = [self._subnet_pool.name, ] - verifylist = [('subnet_pool', self._subnet_pool.name), ] + arglist = [ + self._subnet_pool.name, + ] + verifylist = [ + ('subnet_pool', self._subnet_pool.name), + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_subnet_pool.called) - self.assertFalse(self.network.set_tags.called) + self.assertFalse(self.network_client.update_subnet_pool.called) + self.assertFalse(self.network_client.set_tags.called) self.assertIsNone(result) def test_set_len_negative(self): arglist = [ - '--max-prefix-length', '-16', + '--max-prefix-length', + '-16', self._subnet_pool.name, ] verifylist = [ @@ -755,12 +825,18 @@ def test_set_len_negative(self): ('subnet_pool', self._subnet_pool.name), ] - self.assertRaises(argparse.ArgumentTypeError, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_set_address_scope(self): arglist = [ - '--address-scope', self._address_scope.id, + '--address-scope', + self._address_scope.id, self._subnet_pool.name, ] verifylist = [ @@ -774,8 +850,9 @@ def test_set_address_scope(self): attrs = { 'address_scope_id': self._address_scope.id, } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_no_address_scope(self): @@ -794,13 +871,15 @@ def test_set_no_address_scope(self): attrs = { 'address_scope_id': None, } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_no_address_scope_conflict(self): arglist = [ - '--address-scope', self._address_scope.id, + '--address-scope', + self._address_scope.id, '--no-address-scope', self._subnet_pool.name, ] @@ -811,8 +890,13 @@ def test_set_no_address_scope_conflict(self): ] # Exclusive arguments will conflict here. - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_set_default(self): arglist = [ @@ -827,11 +911,10 @@ def test_set_default(self): result = self.cmd.take_action(parsed_args) - attrs = { - 'is_default': True - } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + attrs = {'is_default': True} + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_no_default(self): @@ -850,8 +933,9 @@ def test_set_no_default(self): attrs = { 'is_default': False, } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_no_default_conflict(self): @@ -867,12 +951,18 @@ def test_set_no_default_conflict(self): ] # Exclusive arguments will conflict here. - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_set_description(self): arglist = [ - '--description', 'new_description', + '--description', + 'new_description', self._subnet_pool.name, ] verifylist = [ @@ -886,13 +976,15 @@ def test_set_description(self): attrs = { 'description': "new_description", } - self.network.update_subnet_pool.assert_called_once_with( - self._subnet_pool, **attrs) + self.network_client.update_subnet_pool.assert_called_once_with( + self._subnet_pool, **attrs + ) self.assertIsNone(result) def test_set_with_default_quota(self): arglist = [ - '--default-quota', '20', + '--default-quota', + '20', self._subnet_pool.name, ] verifylist = [ @@ -901,9 +993,11 @@ def test_set_with_default_quota(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.network.update_subnet_pool.assert_called_once_with( + self.network_client.update_subnet_pool.assert_called_once_with( self._subnet_pool, - **{'default_quota': 20, } + **{ + 'default_quota': 20, + }, ) self.assertIsNone(result) @@ -917,16 +1011,15 @@ def _test_set_tags(self, with_tags=True): verifylist = [('no_tag', True)] expected_args = [] arglist.append(self._subnet_pool.name) - verifylist.append( - ('subnet_pool', self._subnet_pool.name)) + verifylist.append(('subnet_pool', self._subnet_pool.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_subnet_pool.called) - self.network.set_tags.assert_called_once_with( - self._subnet_pool, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_subnet_pool.called) + self.network_client.set_tags.assert_called_once_with( + self._subnet_pool, test_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_set_with_tags(self): @@ -937,7 +1030,6 @@ def test_set_with_no_tag(self): class TestShowSubnetPool(TestSubnetPool): - # The subnet_pool to set. _subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool() @@ -976,21 +1068,24 @@ class TestShowSubnetPool(TestSubnetPool): ) def setUp(self): - super(TestShowSubnetPool, self).setUp() + super().setUp() - self.network.find_subnet_pool = mock.Mock( - return_value=self._subnet_pool - ) + self.network_client.find_subnet_pool.return_value = self._subnet_pool # Get the command object to test - self.cmd = subnet_pool.ShowSubnetPool(self.app, self.namespace) + self.cmd = subnet_pool.ShowSubnetPool(self.app, None) def test_show_no_options(self): arglist = [] verifylist = [] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_show_all_options(self): arglist = [ @@ -1003,26 +1098,25 @@ def test_show_all_options(self): columns, data = self.cmd.take_action(parsed_args) - self.network.find_subnet_pool.assert_called_once_with( - self._subnet_pool.name, - ignore_missing=False + self.network_client.find_subnet_pool.assert_called_once_with( + self._subnet_pool.name, ignore_missing=False ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) class TestUnsetSubnetPool(TestSubnetPool): - def setUp(self): - super(TestUnsetSubnetPool, self).setUp() + super().setUp() self._subnetpool = network_fakes.FakeSubnetPool.create_one_subnet_pool( - {'tags': ['green', 'red']}) - self.network.find_subnet_pool = mock.Mock( - return_value=self._subnetpool) - self.network.update_subnet_pool = mock.Mock(return_value=None) - self.network.set_tags = mock.Mock(return_value=None) + {'tags': ['green', 'red']} + ) + self.network_client.find_subnet_pool.return_value = self._subnetpool + + self.network_client.update_subnet_pool.return_value = None + self.network_client.set_tags.return_value = None # Get the command object to test - self.cmd = subnet_pool.UnsetSubnetPool(self.app, self.namespace) + self.cmd = subnet_pool.UnsetSubnetPool(self.app, None) def _test_unset_tags(self, with_tags=True): if with_tags: @@ -1034,16 +1128,15 @@ def _test_unset_tags(self, with_tags=True): verifylist = [('all_tag', True)] expected_args = [] arglist.append(self._subnetpool.name) - verifylist.append( - ('subnet_pool', self._subnetpool.name)) + verifylist.append(('subnet_pool', self._subnetpool.name)) parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.assertFalse(self.network.update_subnet_pool.called) - self.network.set_tags.assert_called_once_with( - self._subnetpool, - tests_utils.CompareBySet(expected_args)) + self.assertFalse(self.network_client.update_subnet_pool.called) + self.network_client.set_tags.assert_called_once_with( + self._subnetpool, test_utils.CompareBySet(expected_args) + ) self.assertIsNone(result) def test_unset_with_tags(self): diff --git a/openstackclient/tests/unit/object/v1/fakes.py b/openstackclient/tests/unit/object/v1/fakes.py index 1808d5b7d9..eedccdc0e8 100644 --- a/openstackclient/tests/unit/object/v1/fakes.py +++ b/openstackclient/tests/unit/object/v1/fakes.py @@ -81,9 +81,8 @@ class TestObjectv1(utils.TestCommand): - def setUp(self): - super(TestObjectv1, self).setUp() + super().setUp() self.app.client_manager.session = session.Session() self.app.client_manager.object_store = object_store.APIv1( diff --git a/openstackclient/tests/unit/object/v1/test_container.py b/openstackclient/tests/unit/object/v1/test_container.py index 7d3cc8d840..9143df9c9a 100644 --- a/openstackclient/tests/unit/object/v1/test_container.py +++ b/openstackclient/tests/unit/object/v1/test_container.py @@ -25,19 +25,17 @@ AUTH_URL = "http://0.0.0.0" -class FakeClient(object): - +class FakeClient: def __init__(self, endpoint=None, **kwargs): self.endpoint = AUTH_URL self.token = AUTH_TOKEN class TestContainer(object_fakes.TestObjectv1): - columns = ('Name',) def setUp(self): - super(TestContainer, self).setUp() + super().setUp() self.app.client_manager.object_store = object_store.APIv1( session=mock.Mock(), service_type="object-store", @@ -49,9 +47,8 @@ def setUp(self): @mock.patch('openstackclient.api.object_store_v1.APIv1.object_list') @mock.patch('openstackclient.api.object_store_v1.APIv1.container_delete') class TestContainerDelete(TestContainer): - def setUp(self): - super(TestContainerDelete, self).setUp() + super().setUp() # Get the command object to test self.cmd = container.DeleteContainer(self.app, None) @@ -72,8 +69,7 @@ def test_container_delete(self, c_mock, o_list_mock, o_delete_mock): kwargs = {} c_mock.assert_called_with( - container=object_fakes.container_name, - **kwargs + container=object_fakes.container_name, **kwargs ) self.assertFalse(o_list_mock.called) self.assertFalse(o_delete_mock.called) @@ -97,8 +93,7 @@ def test_recursive_delete(self, c_mock, o_list_mock, o_delete_mock): kwargs = {} c_mock.assert_called_with( - container=object_fakes.container_name, - **kwargs + container=object_fakes.container_name, **kwargs ) o_list_mock.assert_called_with(container=object_fakes.container_name) o_delete_mock.assert_called_with( @@ -125,8 +120,7 @@ def test_r_delete(self, c_mock, o_list_mock, o_delete_mock): kwargs = {} c_mock.assert_called_with( - container=object_fakes.container_name, - **kwargs + container=object_fakes.container_name, **kwargs ) o_list_mock.assert_called_with(container=object_fakes.container_name) o_delete_mock.assert_called_with( @@ -135,13 +129,10 @@ def test_r_delete(self, c_mock, o_list_mock, o_delete_mock): ) -@mock.patch( - 'openstackclient.api.object_store_v1.APIv1.container_list' -) +@mock.patch('openstackclient.api.object_store_v1.APIv1.container_list') class TestContainerList(TestContainer): - def setUp(self): - super(TestContainerList, self).setUp() + super().setUp() # Get the command object to test self.cmd = container.ListContainer(self.app, None) @@ -163,17 +154,14 @@ def test_object_list_containers_no_options(self, c_mock): columns, data = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - } - c_mock.assert_called_with( - **kwargs - ) + kwargs = {} + c_mock.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.container_name, ), - (object_fakes.container_name_3, ), - (object_fakes.container_name_2, ), + (object_fakes.container_name,), + (object_fakes.container_name_3,), + (object_fakes.container_name_2,), ) self.assertEqual(datalist, tuple(data)) @@ -184,7 +172,8 @@ def test_object_list_containers_prefix(self, c_mock): ] arglist = [ - '--prefix', 'bit', + '--prefix', + 'bit', ] verifylist = [ ('prefix', 'bit'), @@ -200,14 +189,12 @@ def test_object_list_containers_prefix(self, c_mock): kwargs = { 'prefix': 'bit', } - c_mock.assert_called_with( - **kwargs - ) + c_mock.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.container_name, ), - (object_fakes.container_name_3, ), + (object_fakes.container_name,), + (object_fakes.container_name_3,), ) self.assertEqual(datalist, tuple(data)) @@ -218,8 +205,10 @@ def test_object_list_containers_marker(self, c_mock): ] arglist = [ - '--marker', object_fakes.container_name, - '--end-marker', object_fakes.container_name_3, + '--marker', + object_fakes.container_name, + '--end-marker', + object_fakes.container_name_3, ] verifylist = [ ('marker', object_fakes.container_name), @@ -237,14 +226,12 @@ def test_object_list_containers_marker(self, c_mock): 'marker': object_fakes.container_name, 'end_marker': object_fakes.container_name_3, } - c_mock.assert_called_with( - **kwargs - ) + c_mock.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.container_name, ), - (object_fakes.container_name_3, ), + (object_fakes.container_name,), + (object_fakes.container_name_3,), ) self.assertEqual(datalist, tuple(data)) @@ -255,7 +242,8 @@ def test_object_list_containers_limit(self, c_mock): ] arglist = [ - '--limit', '2', + '--limit', + '2', ] verifylist = [ ('limit', 2), @@ -271,14 +259,12 @@ def test_object_list_containers_limit(self, c_mock): kwargs = { 'limit': 2, } - c_mock.assert_called_with( - **kwargs - ) + c_mock.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.container_name, ), - (object_fakes.container_name_3, ), + (object_fakes.container_name,), + (object_fakes.container_name_3,), ) self.assertEqual(datalist, tuple(data)) @@ -302,11 +288,8 @@ def test_object_list_containers_long(self, c_mock): columns, data = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - } - c_mock.assert_called_with( - **kwargs - ) + kwargs = {} + c_mock.assert_called_with(**kwargs) collist = ('Name', 'Bytes', 'Count') self.assertEqual(collist, columns) @@ -348,26 +331,21 @@ def test_object_list_containers_all(self, c_mock): kwargs = { 'full_listing': True, } - c_mock.assert_called_with( - **kwargs - ) + c_mock.assert_called_with(**kwargs) self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.container_name, ), - (object_fakes.container_name_2, ), - (object_fakes.container_name_3, ), + (object_fakes.container_name,), + (object_fakes.container_name_2,), + (object_fakes.container_name_3,), ) self.assertEqual(datalist, tuple(data)) -@mock.patch( - 'openstackclient.api.object_store_v1.APIv1.container_show' -) +@mock.patch('openstackclient.api.object_store_v1.APIv1.container_show') class TestContainerShow(TestContainer): - def setUp(self): - super(TestContainerShow, self).setUp() + super().setUp() # Get the command object to test self.cmd = container.ShowContainer(self.app, None) @@ -389,12 +367,10 @@ def test_container_show(self, c_mock): columns, data = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - } + kwargs = {} # lib.container.show_container(api, url, container) c_mock.assert_called_with( - container=object_fakes.container_name, - **kwargs + container=object_fakes.container_name, **kwargs ) collist = ('bytes', 'count', 'name') diff --git a/openstackclient/tests/unit/object/v1/test_container_all.py b/openstackclient/tests/unit/object/v1/test_container_all.py index 654cfbc740..0a795dd860 100644 --- a/openstackclient/tests/unit/object/v1/test_container_all.py +++ b/openstackclient/tests/unit/object/v1/test_container_all.py @@ -20,15 +20,13 @@ class TestContainerAll(object_fakes.TestObjectv1): - def setUp(self): - super(TestContainerAll, self).setUp() + super().setUp() self.requests_mock = self.useFixture(fixture.Fixture()) class TestContainerCreate(TestContainerAll): - columns = ( 'account', 'container', @@ -36,7 +34,7 @@ class TestContainerCreate(TestContainerAll): ) def setUp(self): - super(TestContainerCreate, self).setUp() + super().setUp() # Get the command object to test self.cmd = container_cmds.CreateContainer(self.app, None) @@ -52,9 +50,12 @@ def test_object_create_container_single(self): arglist = [ 'ernie', ] - verifylist = [( - 'containers', ['ernie'], - )] + verifylist = [ + ( + 'containers', + ['ernie'], + ) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() @@ -63,32 +64,27 @@ def test_object_create_container_single(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - datalist = [( - object_fakes.ACCOUNT_ID, - 'ernie', - '314159', - )] + datalist = [ + ( + object_fakes.ACCOUNT_ID, + 'ernie', + '314159', + ) + ] self.assertEqual(datalist, list(data)) def test_object_create_container_storage_policy(self): self.requests_mock.register_uri( 'PUT', object_fakes.ENDPOINT + '/ernie', - headers={ - 'x-trans-id': '314159', - 'x-storage-policy': 'o1--sr-r3' - }, + headers={'x-trans-id': '314159', 'x-storage-policy': 'o1--sr-r3'}, status_code=200, ) - arglist = [ - 'ernie', - '--storage-policy', - 'o1--sr-r3' - ] + arglist = ['ernie', '--storage-policy', 'o1--sr-r3'] verifylist = [ ('containers', ['ernie']), - ('storage_policy', 'o1--sr-r3') + ('storage_policy', 'o1--sr-r3'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -98,11 +94,13 @@ def test_object_create_container_storage_policy(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - datalist = [( - object_fakes.ACCOUNT_ID, - 'ernie', - '314159', - )] + datalist = [ + ( + object_fakes.ACCOUNT_ID, + 'ernie', + '314159', + ) + ] self.assertEqual(datalist, list(data)) def test_object_create_container_public(self): @@ -111,19 +109,13 @@ def test_object_create_container_public(self): object_fakes.ENDPOINT + '/ernie', headers={ 'x-trans-id': '314159', - 'x-container-read': '.r:*,.rlistings' + 'x-container-read': '.r:*,.rlistings', }, status_code=200, ) - arglist = [ - 'ernie', - '--public' - ] - verifylist = [ - ('containers', ['ernie']), - ('public', True) - ] + arglist = ['ernie', '--public'] + verifylist = [('containers', ['ernie']), ('public', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() @@ -132,11 +124,13 @@ def test_object_create_container_public(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - datalist = [( - object_fakes.ACCOUNT_ID, - 'ernie', - '314159', - )] + datalist = [ + ( + object_fakes.ACCOUNT_ID, + 'ernie', + '314159', + ) + ] self.assertEqual(datalist, list(data)) def test_object_create_container_more(self): @@ -157,9 +151,12 @@ def test_object_create_container_more(self): 'ernie', 'bert', ] - verifylist = [( - 'containers', ['ernie', 'bert'], - )] + verifylist = [ + ( + 'containers', + ['ernie', 'bert'], + ) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() @@ -184,9 +181,8 @@ def test_object_create_container_more(self): class TestContainerDelete(TestContainerAll): - def setUp(self): - super(TestContainerDelete, self).setUp() + super().setUp() # Get the command object to test self.cmd = container_cmds.DeleteContainer(self.app, None) @@ -201,9 +197,12 @@ def test_object_delete_container_single(self): arglist = [ 'ernie', ] - verifylist = [( - 'containers', ['ernie'], - )] + verifylist = [ + ( + 'containers', + ['ernie'], + ) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # Command.take_action() returns None @@ -226,9 +225,12 @@ def test_object_delete_container_more(self): 'ernie', 'bert', ] - verifylist = [( - 'containers', ['ernie', 'bert'], - )] + verifylist = [ + ( + 'containers', + ['ernie', 'bert'], + ) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # Command.take_action() returns None @@ -237,11 +239,10 @@ def test_object_delete_container_more(self): class TestContainerList(TestContainerAll): - columns = ('Name',) def setUp(self): - super(TestContainerList, self).setUp() + super().setUp() # Get the command object to test self.cmd = container_cmds.ListContainer(self.app, None) @@ -268,9 +269,9 @@ def test_object_list_containers_no_options(self): self.assertEqual(self.columns, columns) datalist = [ - (object_fakes.container_name, ), - (object_fakes.container_name_3, ), - (object_fakes.container_name_2, ), + (object_fakes.container_name,), + (object_fakes.container_name_3,), + (object_fakes.container_name_2,), ] self.assertEqual(datalist, list(data)) @@ -287,7 +288,8 @@ def test_object_list_containers_prefix(self): ) arglist = [ - '--prefix', 'bit', + '--prefix', + 'bit', ] verifylist = [ ('prefix', 'bit'), @@ -299,20 +301,20 @@ def test_object_list_containers_prefix(self): self.assertEqual(self.columns, columns) datalist = [ - (object_fakes.container_name, ), - (object_fakes.container_name_3, ), + (object_fakes.container_name,), + (object_fakes.container_name_3,), ] self.assertEqual(datalist, list(data)) class TestContainerSave(TestContainerAll): - def setUp(self): - super(TestContainerSave, self).setUp() + super().setUp() # Get the command object to test self.cmd = container_cmds.SaveContainer(self.app, None) + # TODO(dtroyer): need to mock out object_lib.save_object() to test this # def test_object_save_container(self): # return_body = [ @@ -354,9 +356,8 @@ def setUp(self): class TestContainerShow(TestContainerAll): - def setUp(self): - super(TestContainerShow, self).setUp() + super().setUp() # Get the command object to test self.cmd = container_cmds.ShowContainer(self.app, None) @@ -369,7 +370,7 @@ def test_object_show_container(self): 'x-container-write': 'wsx', 'x-container-sync-to': 'edc', 'x-container-sync-key': 'rfv', - 'x-storage-policy': 'o1--sr-r3' + 'x-storage-policy': 'o1--sr-r3', } self.requests_mock.register_uri( 'HEAD', @@ -381,9 +382,12 @@ def test_object_show_container(self): arglist = [ 'ernie', ] - verifylist = [( - 'container', 'ernie', - )] + verifylist = [ + ( + 'container', + 'ernie', + ) + ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class ShowOne in cliff, abstract method take_action() diff --git a/openstackclient/tests/unit/object/v1/test_object.py b/openstackclient/tests/unit/object/v1/test_object.py index fc3073c8bc..f1777f963c 100644 --- a/openstackclient/tests/unit/object/v1/test_object.py +++ b/openstackclient/tests/unit/object/v1/test_object.py @@ -26,9 +26,8 @@ class TestObject(object_fakes.TestObjectv1): - def setUp(self): - super(TestObject, self).setUp() + super().setUp() self.app.client_manager.object_store = object_store.APIv1( session=mock.Mock(), service_type="object-store", @@ -36,20 +35,13 @@ def setUp(self): self.api = self.app.client_manager.object_store -@mock.patch( - 'openstackclient.api.object_store_v1.APIv1.object_list' -) +@mock.patch('openstackclient.api.object_store_v1.APIv1.object_list') class TestObjectList(TestObject): - columns = ('Name',) - datalist = ( - ( - object_fakes.object_name_2, - ), - ) + datalist = ((object_fakes.object_name_2,),) def setUp(self): - super(TestObjectList, self).setUp() + super().setUp() # Get the command object to test self.cmd = obj.ListObject(self.app, None) @@ -79,8 +71,8 @@ def test_object_list_objects_no_options(self, o_mock): self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.object_name_1, ), - (object_fakes.object_name_2, ), + (object_fakes.object_name_1,), + (object_fakes.object_name_2,), ) self.assertEqual(datalist, tuple(data)) @@ -90,7 +82,8 @@ def test_object_list_objects_prefix(self, o_mock): ] arglist = [ - '--prefix', 'floppy', + '--prefix', + 'floppy', object_fakes.container_name_2, ] verifylist = [ @@ -109,8 +102,7 @@ def test_object_list_objects_prefix(self, o_mock): 'prefix': 'floppy', } o_mock.assert_called_with( - container=object_fakes.container_name_2, - **kwargs + container=object_fakes.container_name_2, **kwargs ) self.assertEqual(self.columns, columns) @@ -122,7 +114,8 @@ def test_object_list_objects_delimiter(self, o_mock): ] arglist = [ - '--delimiter', '=', + '--delimiter', + '=', object_fakes.container_name_2, ] verifylist = [ @@ -141,8 +134,7 @@ def test_object_list_objects_delimiter(self, o_mock): 'delimiter': '=', } o_mock.assert_called_with( - container=object_fakes.container_name_2, - **kwargs + container=object_fakes.container_name_2, **kwargs ) self.assertEqual(self.columns, columns) @@ -154,7 +146,8 @@ def test_object_list_objects_marker(self, o_mock): ] arglist = [ - '--marker', object_fakes.object_name_2, + '--marker', + object_fakes.object_name_2, object_fakes.container_name_2, ] verifylist = [ @@ -173,8 +166,7 @@ def test_object_list_objects_marker(self, o_mock): 'marker': object_fakes.object_name_2, } o_mock.assert_called_with( - container=object_fakes.container_name_2, - **kwargs + container=object_fakes.container_name_2, **kwargs ) self.assertEqual(self.columns, columns) @@ -186,7 +178,8 @@ def test_object_list_objects_end_marker(self, o_mock): ] arglist = [ - '--end-marker', object_fakes.object_name_2, + '--end-marker', + object_fakes.object_name_2, object_fakes.container_name_2, ] verifylist = [ @@ -205,8 +198,7 @@ def test_object_list_objects_end_marker(self, o_mock): 'end_marker': object_fakes.object_name_2, } o_mock.assert_called_with( - container=object_fakes.container_name_2, - **kwargs + container=object_fakes.container_name_2, **kwargs ) self.assertEqual(self.columns, columns) @@ -218,7 +210,8 @@ def test_object_list_objects_limit(self, o_mock): ] arglist = [ - '--limit', '2', + '--limit', + '2', object_fakes.container_name_2, ] verifylist = [ @@ -237,8 +230,7 @@ def test_object_list_objects_limit(self, o_mock): 'limit': 2, } o_mock.assert_called_with( - container=object_fakes.container_name_2, - **kwargs + container=object_fakes.container_name_2, **kwargs ) self.assertEqual(self.columns, columns) @@ -266,11 +258,9 @@ def test_object_list_objects_long(self, o_mock): columns, data = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - } + kwargs = {} o_mock.assert_called_with( - container=object_fakes.container_name, - **kwargs + container=object_fakes.container_name, **kwargs ) collist = ('Name', 'Bytes', 'Hash', 'Content Type', 'Last Modified') @@ -319,25 +309,21 @@ def test_object_list_objects_all(self, o_mock): 'full_listing': True, } o_mock.assert_called_with( - container=object_fakes.container_name, - **kwargs + container=object_fakes.container_name, **kwargs ) self.assertEqual(self.columns, columns) datalist = ( - (object_fakes.object_name_1, ), - (object_fakes.object_name_2, ), + (object_fakes.object_name_1,), + (object_fakes.object_name_2,), ) self.assertEqual(datalist, tuple(data)) -@mock.patch( - 'openstackclient.api.object_store_v1.APIv1.object_show' -) +@mock.patch('openstackclient.api.object_store_v1.APIv1.object_show') class TestObjectShow(TestObject): - def setUp(self): - super(TestObjectShow, self).setUp() + super().setUp() # Get the command object to test self.cmd = obj.ShowObject(self.app, None) @@ -361,13 +347,12 @@ def test_object_show(self, c_mock): columns, data = self.cmd.take_action(parsed_args) # Set expected values - kwargs = { - } + kwargs = {} # lib.container.show_container(api, url, container) c_mock.assert_called_with( container=object_fakes.container_name, object=object_fakes.object_name_1, - **kwargs + **kwargs, ) collist = ('bytes', 'content_type', 'hash', 'last_modified', 'name') diff --git a/openstackclient/tests/unit/object/v1/test_object_all.py b/openstackclient/tests/unit/object/v1/test_object_all.py index 7e88409f71..968667b68e 100644 --- a/openstackclient/tests/unit/object/v1/test_object_all.py +++ b/openstackclient/tests/unit/object/v1/test_object_all.py @@ -23,17 +23,15 @@ class TestObjectAll(object_fakes.TestObjectv1): - def setUp(self): - super(TestObjectAll, self).setUp() + super().setUp() self.requests_mock = self.useFixture(fixture.Fixture()) class TestObjectCreate(TestObjectAll): - def setUp(self): - super(TestObjectCreate, self).setUp() + super().setUp() # Get the command object to test self.cmd = object_cmds.CreateObject(self.app, None) @@ -43,29 +41,31 @@ def test_multiple_object_create_with_object_name(self): object_fakes.container_name, object_fakes.object_name_1, object_fakes.object_name_2, - '--name', object_fakes.object_upload_name, + '--name', + object_fakes.object_upload_name, ] verifylist = [ ('container', object_fakes.container_name), - ('objects', [object_fakes.object_name_1, - object_fakes.object_name_2]), + ( + 'objects', + [object_fakes.object_name_1, object_fakes.object_name_2], + ), ('name', object_fakes.object_upload_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) class TestObjectList(TestObjectAll): - columns = ('Name',) def setUp(self): - super(TestObjectList, self).setUp() + super().setUp() # Get the command object to test self.cmd = object_cmds.ListObject(self.app, None) @@ -77,10 +77,10 @@ def test_object_list_objects_no_options(self): ] self.requests_mock.register_uri( 'GET', - object_fakes.ENDPOINT + - '/' + - object_fakes.container_name + - '?format=json', + object_fakes.ENDPOINT + + '/' + + object_fakes.container_name + + '?format=json', json=return_body, status_code=200, ) @@ -98,8 +98,8 @@ def test_object_list_objects_no_options(self): self.assertEqual(self.columns, columns) datalist = [ - (object_fakes.object_name_1, ), - (object_fakes.object_name_2, ), + (object_fakes.object_name_1,), + (object_fakes.object_name_2,), ] self.assertEqual(datalist, list(data)) @@ -109,16 +109,17 @@ def test_object_list_objects_prefix(self): ] self.requests_mock.register_uri( 'GET', - object_fakes.ENDPOINT + - '/' + - object_fakes.container_name_2 + - '?prefix=floppy&format=json', + object_fakes.ENDPOINT + + '/' + + object_fakes.container_name_2 + + '?prefix=floppy&format=json', json=return_body, status_code=200, ) arglist = [ - '--prefix', 'floppy', + '--prefix', + 'floppy', object_fakes.container_name_2, ] verifylist = [ @@ -133,16 +134,13 @@ def test_object_list_objects_prefix(self): columns, data = self.cmd.take_action(parsed_args) self.assertEqual(self.columns, columns) - datalist = ( - (object_fakes.object_name_2, ), - ) + datalist = ((object_fakes.object_name_2,),) self.assertEqual(datalist, tuple(data)) class TestObjectShow(TestObjectAll): - def setUp(self): - super(TestObjectShow, self).setUp() + super().setUp() # Get the command object to test self.cmd = object_cmds.ShowObject(self.app, None) @@ -158,11 +156,13 @@ def test_object_show(self): } self.requests_mock.register_uri( 'HEAD', - '/'.join([ - object_fakes.ENDPOINT, - object_fakes.container_name, - object_fakes.object_name_1, - ]), + '/'.join( + [ + object_fakes.ENDPOINT, + object_fakes.container_name, + object_fakes.object_name_1, + ] + ), headers=headers, status_code=200, ) @@ -207,9 +207,8 @@ def test_object_show(self): class TestObjectSave(TestObjectAll): - def setUp(self): - super(TestObjectSave, self).setUp() + super().setUp() # Get the command object to test self.cmd = object_cmds.SaveObject(self.app, None) @@ -217,20 +216,20 @@ def setUp(self): def test_save_to_stdout(self): self.requests_mock.register_uri( 'GET', - object_fakes.ENDPOINT + - '/' + - object_fakes.container_name + - '/' + - object_fakes.object_name_1, + object_fakes.ENDPOINT + + '/' + + object_fakes.container_name + + '/' + + object_fakes.object_name_1, status_code=200, - content=object_fakes.object_1_content + content=object_fakes.object_1_content, ) arglist = [ object_fakes.container_name, object_fakes.object_name_1, '--file', - '-' + '-', ] verifylist = [ @@ -253,13 +252,18 @@ def __enter__(self): def __exit__(self, *a): self.context_manager_calls.append('__exit__') - with mock.patch('sys.stdout') as fake_stdout, mock.patch( - 'os.fdopen', return_value=FakeStdout()) as fake_fdopen: + with ( + mock.patch('sys.stdout') as fake_stdout, + mock.patch('os.fdopen', return_value=FakeStdout()) as fake_fdopen, + ): fake_stdout.fileno.return_value = 123 self.cmd.take_action(parsed_args) - self.assertEqual(fake_fdopen.return_value.getvalue(), - object_fakes.object_1_content) + self.assertEqual( + fake_fdopen.return_value.getvalue(), object_fakes.object_1_content + ) self.assertEqual(fake_fdopen.mock_calls, [mock.call(123, 'wb')]) - self.assertEqual(fake_fdopen.return_value.context_manager_calls, - ['__enter__', '__exit__']) + self.assertEqual( + fake_fdopen.return_value.context_manager_calls, + ['__enter__', '__exit__'], + ) diff --git a/openstackclient/tests/unit/test_shell.py b/openstackclient/tests/unit/test_shell.py index bee2b40149..628e362bba 100644 --- a/openstackclient/tests/unit/test_shell.py +++ b/openstackclient/tests/unit/test_shell.py @@ -96,7 +96,7 @@ '--os-cacert': ('/dev/null', True, True), '--timing': (True, True, False), '--os-profile': ('SECRET_KEY', True, False), - '--os-interface': (DEFAULT_INTERFACE, True, True) + '--os-interface': (DEFAULT_INTERFACE, True, True), } @@ -138,16 +138,11 @@ def make_shell_wrapper(func, inst, args, kwargs): class TestShell(osc_lib_test_utils.TestShell): - # Full name of the OpenStackShell class to test (cliff.app.App subclass) shell_class_name = "openstackclient.shell.OpenStackShell" - # TODO(dtroyer): remove this once the shell_class_patch patch is released - # in osc-lib - app_patch = shell_class_name - def setUp(self): - super(TestShell, self).setUp() + super().setUp() # TODO(dtroyer): remove this once the shell_class_patch patch is # released in osc-lib mod_str, _sep, class_str = self.shell_class_name.rpartition('.') @@ -163,7 +158,6 @@ def _assert_admin_token_auth(self, cmd_options, default_args): ) _cmd = cmd_options + " list role" osc_lib_test_utils.fake_execute(_shell, _cmd) - print("_shell: %s" % _shell) self.app.assert_called_with(["list", "role"]) self.assertEqual( @@ -179,32 +173,29 @@ def _assert_admin_token_auth(self, cmd_options, default_args): def _assert_token_auth(self, cmd_options, default_args): with mock.patch( - self.app_patch + ".initialize_app", - self.app, + self.shell_class_name + ".initialize_app", + self.app, ): _shell = osc_lib_test_utils.make_shell( shell_class=self.shell_class, ) _cmd = cmd_options + " list role" osc_lib_test_utils.fake_execute(_shell, _cmd) - print("_shell: %s" % _shell) self.app.assert_called_with(["list", "role"]) - self.assertEqual( - default_args.get("token", ''), - _shell.options.token, - "token" - ) - self.assertEqual( - default_args.get("auth_url", ''), - _shell.options.auth_url, - "auth_url" - ) + + if default_args.get('token'): + self.assertEqual(default_args['token'], _shell.options.token) + + if default_args.get('auth_url'): + self.assertEqual( + default_args['auth_url'], _shell.options.auth_url + ) def _assert_cli(self, cmd_options, default_args): with mock.patch( - self.shell_class_name + ".initialize_app", - self.app, + self.shell_class_name + ".initialize_app", + self.app, ): _shell = osc_lib_test_utils.make_shell( shell_class=self.shell_class, @@ -213,22 +204,34 @@ def _assert_cli(self, cmd_options, default_args): osc_lib_test_utils.fake_execute(_shell, _cmd) self.app.assert_called_with(["list", "server"]) - self.assertEqual(default_args["compute_api_version"], - _shell.options.os_compute_api_version) - self.assertEqual(default_args["identity_api_version"], - _shell.options.os_identity_api_version) - self.assertEqual(default_args["image_api_version"], - _shell.options.os_image_api_version) - self.assertEqual(default_args["volume_api_version"], - _shell.options.os_volume_api_version) - self.assertEqual(default_args["network_api_version"], - _shell.options.os_network_api_version) + # TODO(stephenfin): Remove "or ''" when we bump osc-lib minimum to + # a version that includes I1d26133c9d9ed299d1035f207059aa8fe463a001 + self.assertEqual( + default_args["compute_api_version"], + _shell.options.os_compute_api_version or '', + ) + self.assertEqual( + default_args["identity_api_version"], + _shell.options.os_identity_api_version or '', + ) + self.assertEqual( + default_args["image_api_version"], + _shell.options.os_image_api_version or '', + ) + self.assertEqual( + default_args["volume_api_version"], + _shell.options.os_volume_api_version or '', + ) + self.assertEqual( + default_args["network_api_version"], + _shell.options.os_network_api_version or '', + ) -class TestShellOptions(TestShell): +class TestShellOptions(TestShell): def setUp(self): - super(TestShellOptions, self).setUp() + super().setUp() self.useFixture(osc_lib_test_utils.EnvFixture()) def _test_options_init_app(self, test_opts): @@ -289,9 +292,8 @@ def _test_env_get_one_cloud(self, test_opts): class TestShellTokenAuthEnv(TestShell): - def setUp(self): - super(TestShellTokenAuthEnv, self).setUp() + super().setUp() env = { "OS_TOKEN": DEFAULT_TOKEN, "OS_AUTH_URL": DEFAULT_AUTH_URL, @@ -333,9 +335,8 @@ def test_empty_auth(self): class TestShellTokenEndpointAuthEnv(TestShell): - def setUp(self): - super(TestShellTokenEndpointAuthEnv, self).setUp() + super().setUp() env = { "OS_TOKEN": DEFAULT_TOKEN, "OS_ENDPOINT": DEFAULT_SERVICE_URL, @@ -377,9 +378,8 @@ def test_empty_auth(self): class TestShellCli(TestShell): - def setUp(self): - super(TestShellCli, self).setUp() + super().setUp() env = { "OS_COMPUTE_API_VERSION": DEFAULT_COMPUTE_API_VERSION, "OS_IDENTITY_API_VERSION": DEFAULT_IDENTITY_API_VERSION, @@ -408,6 +408,6 @@ def test_empty_env(self): "identity_api_version": LIB_IDENTITY_API_VERSION, "image_api_version": LIB_IMAGE_API_VERSION, "volume_api_version": LIB_VOLUME_API_VERSION, - "network_api_version": LIB_NETWORK_API_VERSION + "network_api_version": LIB_NETWORK_API_VERSION, } self._assert_cli(flag, kwargs) diff --git a/openstackclient/tests/unit/utils.py b/openstackclient/tests/unit/utils.py index 39cb561474..607047f14e 100644 --- a/openstackclient/tests/unit/utils.py +++ b/openstackclient/tests/unit/utils.py @@ -12,9 +12,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from io import StringIO +import argparse +import io import os import fixtures @@ -29,31 +29,40 @@ class ParserException(Exception): class CompareBySet(list): """Class to compare value using set.""" + def __eq__(self, other): return set(self) == set(other) class TestCase(testtools.TestCase): + # provide additional context for failures + maxDiff = None def setUp(self): testtools.TestCase.setUp(self) - if (os.environ.get("OS_STDOUT_CAPTURE") == "True" or - os.environ.get("OS_STDOUT_CAPTURE") == "1"): + if ( + os.environ.get("OS_STDOUT_CAPTURE") == "True" + or os.environ.get("OS_STDOUT_CAPTURE") == "1" + ): stdout = self.useFixture(fixtures.StringStream("stdout")).stream self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout)) - if (os.environ.get("OS_STDERR_CAPTURE") == "True" or - os.environ.get("OS_STDERR_CAPTURE") == "1"): + if ( + os.environ.get("OS_STDERR_CAPTURE") == "True" + or os.environ.get("OS_STDERR_CAPTURE") == "1" + ): stderr = self.useFixture(fixtures.StringStream("stderr")).stream self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr)) + self.log = self.useFixture(fixtures.LoggerFixture()) + def assertNotCalled(self, m, msg=None): """Assert a function was not called""" if m.called: if not msg: - msg = 'method %s should not have been called' % m + msg = f'method {m} should not have been called' self.fail(msg) @@ -61,7 +70,7 @@ class TestCommand(TestCase): """Test cliff command classes""" def setUp(self): - super(TestCommand, self).setUp() + super().setUp() # Build up a fake app self.fake_stdout = fakes.FakeStdout() self.fake_log = fakes.FakeLog() @@ -71,16 +80,27 @@ def setUp(self): def check_parser(self, cmd, args, verify_args): cmd_parser = cmd.get_parser('check_parser') - stderr = StringIO() + stderr = io.StringIO() with fixtures.MonkeyPatch('sys.stderr', stderr): try: parsed_args = cmd_parser.parse_args(args) - except SystemExit: - raise ParserException("Argument parse failed: %s" % - stderr.getvalue()) + except ( + SystemExit, + argparse.ArgumentTypeError, + argparse.ArgumentError, + ): + raise ParserException( + f"Argument parse failed: {stderr.getvalue()}" + ) for av in verify_args: - attr, value = av + attr, expected_value = av if attr: + actual_value = getattr(parsed_args, attr) self.assertIn(attr, parsed_args) - self.assertEqual(value, getattr(parsed_args, attr)) + self.assertEqual( + expected_value, + actual_value, + f'args.{attr}: expected: {expected_value}, got: ' + f'{actual_value}', + ) return parsed_args diff --git a/openstackclient/tests/unit/volume/test_find_resource.py b/openstackclient/tests/unit/volume/test_find_resource.py index 208f55b94e..614fa9a518 100644 --- a/openstackclient/tests/unit/volume/test_find_resource.py +++ b/openstackclient/tests/unit/volume/test_find_resource.py @@ -21,61 +21,56 @@ from osc_lib import utils from openstackclient.tests.unit import utils as test_utils -from openstackclient.volume import client # noqa - - -# Monkey patch for v1 cinderclient -# NOTE(dtroyer): Do here because openstackclient.volume.client -# doesn't do it until the client object is created now. -volumes.Volume.NAME_ATTR = 'display_name' -volume_snapshots.Snapshot.NAME_ATTR = 'display_name' - ID = '1after909' NAME = 'PhilSpector' class TestFindResourceVolumes(test_utils.TestCase): - def setUp(self): - super(TestFindResourceVolumes, self).setUp() + super().setUp() api = mock.Mock() api.client = mock.Mock() api.client.get = mock.Mock() resp = mock.Mock() - body = {"volumes": [{"id": ID, 'display_name': NAME}]} - api.client.get.side_effect = [Exception("Not found"), - (resp, body)] + body = {"volumes": [{"id": ID, 'name': NAME}]} + api.client.get.side_effect = [Exception("Not found"), (resp, body)] self.manager = volumes.VolumeManager(api) def test_find(self): result = utils.find_resource(self.manager, NAME) self.assertEqual(ID, result.id) - self.assertEqual(NAME, result.display_name) + self.assertEqual(NAME, result.name) def test_not_find(self): - self.assertRaises(exceptions.CommandError, utils.find_resource, - self.manager, 'GeorgeMartin') + self.assertRaises( + exceptions.CommandError, + utils.find_resource, + self.manager, + 'GeorgeMartin', + ) class TestFindResourceVolumeSnapshots(test_utils.TestCase): - def setUp(self): - super(TestFindResourceVolumeSnapshots, self).setUp() + super().setUp() api = mock.Mock() api.client = mock.Mock() api.client.get = mock.Mock() resp = mock.Mock() - body = {"snapshots": [{"id": ID, 'display_name': NAME}]} - api.client.get.side_effect = [Exception("Not found"), - (resp, body)] + body = {"snapshots": [{"id": ID, 'name': NAME}]} + api.client.get.side_effect = [Exception("Not found"), (resp, body)] self.manager = volume_snapshots.SnapshotManager(api) def test_find(self): result = utils.find_resource(self.manager, NAME) self.assertEqual(ID, result.id) - self.assertEqual(NAME, result.display_name) + self.assertEqual(NAME, result.name) def test_not_find(self): - self.assertRaises(exceptions.CommandError, utils.find_resource, - self.manager, 'GeorgeMartin') + self.assertRaises( + exceptions.CommandError, + utils.find_resource, + self.manager, + 'GeorgeMartin', + ) diff --git a/openstackclient/tests/unit/volume/v1/fakes.py b/openstackclient/tests/unit/volume/v1/fakes.py deleted file mode 100644 index 76b208b2f1..0000000000 --- a/openstackclient/tests/unit/volume/v1/fakes.py +++ /dev/null @@ -1,611 +0,0 @@ -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import copy -import random -from unittest import mock -import uuid - -from openstackclient.tests.unit import fakes -from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes -from openstackclient.tests.unit import utils - - -class FakeVolumev1Client: - def __init__(self, **kwargs): - self.volumes = mock.Mock() - self.volumes.resource_class = fakes.FakeResource(None, {}) - self.services = mock.Mock() - self.services.resource_class = fakes.FakeResource(None, {}) - self.extensions = mock.Mock() - self.extensions.resource_class = fakes.FakeResource(None, {}) - self.qos_specs = mock.Mock() - self.qos_specs.resource_class = fakes.FakeResource(None, {}) - self.volume_types = mock.Mock() - self.volume_types.resource_class = fakes.FakeResource(None, {}) - self.volume_encryption_types = mock.Mock() - self.volume_encryption_types.resource_class = fakes.FakeResource( - None, {} - ) - self.transfers = mock.Mock() - self.transfers.resource_class = fakes.FakeResource(None, {}) - self.volume_snapshots = mock.Mock() - self.volume_snapshots.resource_class = fakes.FakeResource(None, {}) - self.backups = mock.Mock() - self.backups.resource_class = fakes.FakeResource(None, {}) - self.restores = mock.Mock() - self.restores.resource_class = fakes.FakeResource(None, {}) - self.auth_token = kwargs['token'] - self.management_url = kwargs['endpoint'] - - -class TestVolumev1(utils.TestCommand): - def setUp(self): - super().setUp() - - self.app.client_manager.volume = FakeVolumev1Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - - self.app.client_manager.identity = identity_fakes.FakeIdentityv2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - - # avoid circular imports - from openstackclient.tests.unit.image.v1 import fakes as image_fakes - - self.app.client_manager.image = image_fakes.FakeImagev1Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, - ) - - -def create_one_transfer(attrs=None): - """Create a fake transfer. - - :param Dictionary attrs: - A dictionary with all attributes of Transfer Request - :return: - A FakeResource object with volume_id, name, id. - """ - # Set default attribute - transfer_info = { - 'volume_id': 'volume-id-' + uuid.uuid4().hex, - 'name': 'fake_transfer_name', - 'id': 'id-' + uuid.uuid4().hex, - 'links': 'links-' + uuid.uuid4().hex, - } - - # Overwrite default attributes if there are some attributes set - attrs = attrs or {} - - transfer_info.update(attrs) - - transfer = fakes.FakeResource(None, transfer_info, loaded=True) - - return transfer - - -def create_transfers(attrs=None, count=2): - """Create multiple fake transfers. - - :param Dictionary attrs: - A dictionary with all attributes of transfer - :param Integer count: - The number of transfers to be faked - :return: - A list of FakeResource objects - """ - transfers = [] - for n in range(0, count): - transfers.append(create_one_transfer(attrs)) - - return transfers - - -def get_transfers(transfers=None, count=2): - """Get an iterable MagicMock object with a list of faked transfers. - - If transfers list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List transfers: - A list of FakeResource objects faking transfers - :param Integer count: - The number of transfers to be faked - :return - An iterable Mock object with side_effect set to a list of faked - transfers - """ - if transfers is None: - transfers = create_transfers(count) - - return mock.Mock(side_effect=transfers) - - -def create_one_service(attrs=None): - """Create a fake service. - - :param Dictionary attrs: - A dictionary with all attributes of service - :return: - A FakeResource object with host, status, etc. - """ - # Set default attribute - service_info = { - 'host': 'host_test', - 'binary': 'cinder_test', - 'status': 'enabled', - 'disabled_reason': 'LongHoliday-GoldenWeek', - 'zone': 'fake_zone', - 'updated_at': 'fake_date', - 'state': 'fake_state', - } - - # Overwrite default attributes if there are some attributes set - attrs = attrs or {} - - service_info.update(attrs) - - service = fakes.FakeResource(None, service_info, loaded=True) - - return service - - -def create_services(attrs=None, count=2): - """Create multiple fake services. - - :param Dictionary attrs: - A dictionary with all attributes of service - :param Integer count: - The number of services to be faked - :return: - A list of FakeResource objects - """ - services = [] - for n in range(0, count): - services.append(create_one_service(attrs)) - - return services - - -def get_services(services=None, count=2): - """Get an iterable MagicMock object with a list of faked services. - - If services list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List services: - A list of FakeResource objects faking services - :param Integer count: - The number of services to be faked - :return - An iterable Mock object with side_effect set to a list of faked - services - """ - if services is None: - services = create_services(count) - - return mock.Mock(side_effect=services) - - -def create_one_qos(attrs=None): - """Create a fake Qos specification. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with id, name, consumer, etc. - """ - attrs = attrs or {} - - # Set default attributes. - qos_info = { - "id": 'qos-id-' + uuid.uuid4().hex, - "name": 'qos-name-' + uuid.uuid4().hex, - "consumer": 'front-end', - "specs": {"foo": "bar", "iops": "9001"}, - } - - # Overwrite default attributes. - qos_info.update(attrs) - - qos = fakes.FakeResource(info=copy.deepcopy(qos_info), loaded=True) - return qos - - -def create_one_qos_association(attrs=None): - """Create a fake Qos specification association. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with id, name, association_type, etc. - """ - attrs = attrs or {} - - # Set default attributes. - qos_association_info = { - "id": 'type-id-' + uuid.uuid4().hex, - "name": 'type-name-' + uuid.uuid4().hex, - "association_type": 'volume_type', - } - - # Overwrite default attributes. - qos_association_info.update(attrs) - - qos_association = fakes.FakeResource( - info=copy.deepcopy(qos_association_info), loaded=True - ) - return qos_association - - -def create_qoses(attrs=None, count=2): - """Create multiple fake Qos specifications. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of Qos specifications to fake - :return: - A list of FakeResource objects faking the Qos specifications - """ - qoses = [] - for i in range(0, count): - qos = create_one_qos(attrs) - qoses.append(qos) - - return qoses - - -def get_qoses(qoses=None, count=2): - """Get an iterable MagicMock object with a list of faked qoses. - - If qoses list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List volumes: - A list of FakeResource objects faking qoses - :param Integer count: - The number of qoses to be faked - :return - An iterable Mock object with side_effect set to a list of faked - qoses - """ - if qoses is None: - qoses = create_qoses(count) - - return mock.Mock(side_effect=qoses) - - -def create_one_volume(attrs=None): - """Create a fake volume. - - :param Dictionary attrs: - A dictionary with all attributes of volume - :return: - A FakeResource object with id, name, status, etc. - """ - attrs = attrs or {} - - # Set default attribute - volume_info = { - 'id': 'volume-id' + uuid.uuid4().hex, - 'display_name': 'volume-name' + uuid.uuid4().hex, - 'display_description': 'description' + uuid.uuid4().hex, - 'status': 'available', - 'size': 10, - 'volume_type': random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']), - 'bootable': 'true', - 'metadata': { - 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, - 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, - 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, - }, - 'snapshot_id': 'snapshot-id-' + uuid.uuid4().hex, - 'availability_zone': 'zone' + uuid.uuid4().hex, - 'attachments': [ - { - 'device': '/dev/' + uuid.uuid4().hex, - 'server_id': uuid.uuid4().hex, - }, - ], - 'created_at': 'time-' + uuid.uuid4().hex, - } - - # Overwrite default attributes if there are some attributes set - volume_info.update(attrs) - - volume = fakes.FakeResource(None, volume_info, loaded=True) - return volume - - -def create_volumes(attrs=None, count=2): - """Create multiple fake volumes. - - :param Dictionary attrs: - A dictionary with all attributes of volume - :param Integer count: - The number of volumes to be faked - :return: - A list of FakeResource objects - """ - volumes = [] - for n in range(0, count): - volumes.append(create_one_volume(attrs)) - - return volumes - - -def get_volumes(volumes=None, count=2): - """Get an iterable MagicMock object with a list of faked volumes. - - If volumes list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List volumes: - A list of FakeResource objects faking volumes - :param Integer count: - The number of volumes to be faked - :return - An iterable Mock object with side_effect set to a list of faked - volumes - """ - if volumes is None: - volumes = create_volumes(count) - - return mock.Mock(side_effect=volumes) - - -def create_one_volume_type(attrs=None, methods=None): - """Create a fake volume type. - - :param Dictionary attrs: - A dictionary with all attributes - :param Dictionary methods: - A dictionary with all methods - :return: - A FakeResource object with id, name, description, etc. - """ - attrs = attrs or {} - methods = methods or {} - - # Set default attributes. - volume_type_info = { - "id": 'type-id-' + uuid.uuid4().hex, - "name": 'type-name-' + uuid.uuid4().hex, - "description": 'type-description-' + uuid.uuid4().hex, - "extra_specs": {"foo": "bar"}, - "is_public": True, - } - - # Overwrite default attributes. - volume_type_info.update(attrs) - - volume_type = fakes.FakeResource( - info=copy.deepcopy(volume_type_info), methods=methods, loaded=True - ) - return volume_type - - -def create_volume_types(attrs=None, count=2): - """Create multiple fake types. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of types to fake - :return: - A list of FakeResource objects faking the types - """ - volume_types = [] - for i in range(0, count): - volume_type = create_one_volume_type(attrs) - volume_types.append(volume_type) - - return volume_types - - -def get_volume_types(volume_types=None, count=2): - """Get an iterable MagicMock object with a list of faked types. - - If types list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List volume_types: - A list of FakeResource objects faking types - :param Integer count: - The number of types to be faked - :return - An iterable Mock object with side_effect set to a list of faked - types - """ - if volume_types is None: - volume_types = create_volume_types(count) - - return mock.Mock(side_effect=volume_types) - - -def create_one_encryption_volume_type(attrs=None): - """Create a fake encryption volume type. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with volume_type_id etc. - """ - attrs = attrs or {} - - # Set default attributes. - encryption_info = { - "volume_type_id": 'type-id-' + uuid.uuid4().hex, - 'provider': 'LuksEncryptor', - 'cipher': None, - 'key_size': None, - 'control_location': 'front-end', - } - - # Overwrite default attributes. - encryption_info.update(attrs) - - encryption_type = fakes.FakeResource( - info=copy.deepcopy(encryption_info), loaded=True - ) - return encryption_type - - -def create_one_snapshot(attrs=None): - """Create a fake snapshot. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with id, name, description, etc. - """ - attrs = attrs or {} - - # Set default attributes. - snapshot_info = { - "id": 'snapshot-id-' + uuid.uuid4().hex, - "display_name": 'snapshot-name-' + uuid.uuid4().hex, - "display_description": 'snapshot-description-' + uuid.uuid4().hex, - "size": 10, - "status": "available", - "metadata": {"foo": "bar"}, - "created_at": "2015-06-03T18:49:19.000000", - "volume_id": 'vloume-id-' + uuid.uuid4().hex, - } - - # Overwrite default attributes. - snapshot_info.update(attrs) - - snapshot_method = {'update': None} - - snapshot = fakes.FakeResource( - info=copy.deepcopy(snapshot_info), - methods=copy.deepcopy(snapshot_method), - loaded=True, - ) - return snapshot - - -def create_snapshots(attrs=None, count=2): - """Create multiple fake snapshots. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of snapshots to fake - :return: - A list of FakeResource objects faking the snapshots - """ - snapshots = [] - for i in range(0, count): - snapshot = create_one_snapshot(attrs) - snapshots.append(snapshot) - - return snapshots - - -def get_snapshots(snapshots=None, count=2): - """Get an iterable MagicMock object with a list of faked snapshots. - - If snapshots list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List volumes: - A list of FakeResource objects faking snapshots - :param Integer count: - The number of snapshots to be faked - :return - An iterable Mock object with side_effect set to a list of faked - snapshots - """ - if snapshots is None: - snapshots = create_snapshots(count) - - return mock.Mock(side_effect=snapshots) - - -def create_one_backup(attrs=None): - """Create a fake backup. - - :param Dictionary attrs: - A dictionary with all attributes - :return: - A FakeResource object with id, name, volume_id, etc. - """ - attrs = attrs or {} - - # Set default attributes. - backup_info = { - "id": 'backup-id-' + uuid.uuid4().hex, - "name": 'backup-name-' + uuid.uuid4().hex, - "volume_id": 'volume-id-' + uuid.uuid4().hex, - "snapshot_id": 'snapshot-id' + uuid.uuid4().hex, - "description": 'description-' + uuid.uuid4().hex, - "object_count": None, - "container": 'container-' + uuid.uuid4().hex, - "size": random.randint(1, 20), - "status": "error", - "availability_zone": 'zone' + uuid.uuid4().hex, - "links": 'links-' + uuid.uuid4().hex, - } - - # Overwrite default attributes. - backup_info.update(attrs) - - backup = fakes.FakeResource(info=copy.deepcopy(backup_info), loaded=True) - return backup - - -def create_backups(attrs=None, count=2): - """Create multiple fake backups. - - :param Dictionary attrs: - A dictionary with all attributes - :param int count: - The number of backups to fake - :return: - A list of FakeResource objects faking the backups - """ - backups = [] - for i in range(0, count): - backup = create_one_backup(attrs) - backups.append(backup) - - return backups - - -def get_backups(backups=None, count=2): - """Get an iterable MagicMock object with a list of faked backups. - - If backups list is provided, then initialize the Mock object with the - list. Otherwise create one. - - :param List volumes: - A list of FakeResource objects faking backups - :param Integer count: - The number of backups to be faked - :return - An iterable Mock object with side_effect set to a list of faked - backups - """ - if backups is None: - backups = create_backups(count) - - return mock.Mock(side_effect=backups) diff --git a/openstackclient/tests/unit/volume/v1/test_qos_specs.py b/openstackclient/tests/unit/volume/v1/test_qos_specs.py deleted file mode 100644 index f5b35143a6..0000000000 --- a/openstackclient/tests/unit/volume/v1/test_qos_specs.py +++ /dev/null @@ -1,502 +0,0 @@ -# Copyright 2015 iWeb Technologies Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import copy -from unittest import mock -from unittest.mock import call - -from osc_lib.cli import format_columns -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes -from openstackclient.volume.v1 import qos_specs - - -class TestQos(volume_fakes.TestVolumev1): - - def setUp(self): - super().setUp() - - self.qos_mock = self.app.client_manager.volume.qos_specs - self.qos_mock.reset_mock() - - self.types_mock = self.app.client_manager.volume.volume_types - self.types_mock.reset_mock() - - -class TestQosAssociate(TestQos): - - volume_type = volume_fakes.create_one_volume_type() - qos_spec = volume_fakes.create_one_qos() - - def setUp(self): - super().setUp() - - self.qos_mock.get.return_value = self.qos_spec - self.types_mock.get.return_value = self.volume_type - # Get the command object to test - self.cmd = qos_specs.AssociateQos(self.app, None) - - def test_qos_associate(self): - arglist = [ - self.qos_spec.id, - self.volume_type.id - ] - verifylist = [ - ('qos_spec', self.qos_spec.id), - ('volume_type', self.volume_type.id) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.associate.assert_called_with( - self.qos_spec.id, - self.volume_type.id - ) - self.assertIsNone(result) - - -class TestQosCreate(TestQos): - - columns = ( - 'consumer', - 'id', - 'name', - 'properties' - ) - - def setUp(self): - super().setUp() - self.new_qos_spec = volume_fakes.create_one_qos() - self.datalist = ( - self.new_qos_spec.consumer, - self.new_qos_spec.id, - self.new_qos_spec.name, - format_columns.DictColumn(self.new_qos_spec.specs) - ) - self.qos_mock.create.return_value = self.new_qos_spec - # Get the command object to test - self.cmd = qos_specs.CreateQos(self.app, None) - - def test_qos_create_without_properties(self): - arglist = [ - self.new_qos_spec.name, - ] - verifylist = [ - ('name', self.new_qos_spec.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.qos_mock.create.assert_called_with( - self.new_qos_spec.name, - {'consumer': 'both'} - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_qos_create_with_consumer(self): - arglist = [ - '--consumer', self.new_qos_spec.consumer, - self.new_qos_spec.name, - ] - verifylist = [ - ('consumer', self.new_qos_spec.consumer), - ('name', self.new_qos_spec.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.qos_mock.create.assert_called_with( - self.new_qos_spec.name, - {'consumer': self.new_qos_spec.consumer} - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_qos_create_with_properties(self): - arglist = [ - '--consumer', self.new_qos_spec.consumer, - '--property', 'foo=bar', - '--property', 'iops=9001', - self.new_qos_spec.name, - ] - verifylist = [ - ('consumer', self.new_qos_spec.consumer), - ('property', self.new_qos_spec.specs), - ('name', self.new_qos_spec.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.new_qos_spec.specs.update( - {'consumer': self.new_qos_spec.consumer}) - self.qos_mock.create.assert_called_with( - self.new_qos_spec.name, - self.new_qos_spec.specs - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - -class TestQosDelete(TestQos): - - qos_specs = volume_fakes.create_qoses(count=2) - - def setUp(self): - super().setUp() - - self.qos_mock.get = ( - volume_fakes.get_qoses(self.qos_specs)) - # Get the command object to test - self.cmd = qos_specs.DeleteQos(self.app, None) - - def test_qos_delete_with_id(self): - arglist = [ - self.qos_specs[0].id - ] - verifylist = [ - ('qos_specs', [self.qos_specs[0].id]) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, False) - self.assertIsNone(result) - - def test_qos_delete_with_name(self): - arglist = [ - self.qos_specs[0].name - ] - verifylist = [ - ('qos_specs', [self.qos_specs[0].name]) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, False) - self.assertIsNone(result) - - def test_qos_delete_with_force(self): - arglist = [ - '--force', - self.qos_specs[0].id - ] - verifylist = [ - ('force', True), - ('qos_specs', [self.qos_specs[0].id]) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, True) - self.assertIsNone(result) - - def test_delete_multiple_qoses(self): - arglist = [] - for q in self.qos_specs: - arglist.append(q.id) - verifylist = [ - ('qos_specs', arglist), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - - calls = [] - for q in self.qos_specs: - calls.append(call(q.id, False)) - self.qos_mock.delete.assert_has_calls(calls) - self.assertIsNone(result) - - def test_delete_multiple_qoses_with_exception(self): - arglist = [ - self.qos_specs[0].id, - 'unexist_qos', - ] - verifylist = [ - ('qos_specs', arglist), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - find_mock_result = [self.qos_specs[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual( - '1 of 2 QoS specifications failed to delete.', str(e)) - - find_mock.assert_any_call(self.qos_mock, self.qos_specs[0].id) - find_mock.assert_any_call(self.qos_mock, 'unexist_qos') - - self.assertEqual(2, find_mock.call_count) - self.qos_mock.delete.assert_called_once_with( - self.qos_specs[0].id, False - ) - - -class TestQosDisassociate(TestQos): - - volume_type = volume_fakes.create_one_volume_type() - qos_spec = volume_fakes.create_one_qos() - - def setUp(self): - super().setUp() - - self.qos_mock.get.return_value = self.qos_spec - self.types_mock.get.return_value = self.volume_type - # Get the command object to test - self.cmd = qos_specs.DisassociateQos(self.app, None) - - def test_qos_disassociate_with_volume_type(self): - arglist = [ - '--volume-type', self.volume_type.id, - self.qos_spec.id, - ] - verifylist = [ - ('volume_type', self.volume_type.id), - ('qos_spec', self.qos_spec.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.disassociate.assert_called_with( - self.qos_spec.id, - self.volume_type.id - ) - self.assertIsNone(result) - - def test_qos_disassociate_with_all_volume_types(self): - arglist = [ - '--all', - self.qos_spec.id, - ] - verifylist = [ - ('qos_spec', self.qos_spec.id) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.disassociate_all.assert_called_with(self.qos_spec.id) - self.assertIsNone(result) - - -class TestQosList(TestQos): - - qos_specs = volume_fakes.create_qoses(count=2) - qos_association = volume_fakes.create_one_qos_association() - - columns = ( - 'ID', - 'Name', - 'Consumer', - 'Associations', - 'Properties', - ) - data = [] - for q in qos_specs: - data.append(( - q.id, - q.name, - q.consumer, - format_columns.ListColumn([qos_association.name]), - format_columns.DictColumn(q.specs), - )) - - def setUp(self): - super().setUp() - - self.qos_mock.list.return_value = self.qos_specs - self.qos_mock.get_associations.return_value = [self.qos_association] - - # Get the command object to test - self.cmd = qos_specs.ListQos(self.app, None) - - def test_qos_list(self): - arglist = [] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.qos_mock.list.assert_called_with() - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, list(data)) - - def test_qos_list_no_association(self): - self.qos_mock.reset_mock() - self.qos_mock.get_associations.side_effect = [ - [self.qos_association], - exceptions.NotFound("NotFound"), - ] - - arglist = [] - verifylist = [] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.qos_mock.list.assert_called_with() - - self.assertEqual(self.columns, columns) - - ex_data = copy.deepcopy(self.data) - ex_data[1] = ( - self.qos_specs[1].id, - self.qos_specs[1].name, - self.qos_specs[1].consumer, - format_columns.ListColumn(None), - format_columns.DictColumn(self.qos_specs[1].specs), - ) - self.assertCountEqual(ex_data, list(data)) - - -class TestQosSet(TestQos): - - qos_spec = volume_fakes.create_one_qos() - - def setUp(self): - super().setUp() - - self.qos_mock.get.return_value = self.qos_spec - # Get the command object to test - self.cmd = qos_specs.SetQos(self.app, None) - - def test_qos_set_with_properties_with_id(self): - arglist = [ - '--property', 'foo=bar', - '--property', 'iops=9001', - self.qos_spec.id, - ] - verifylist = [ - ('property', self.qos_spec.specs), - ('qos_spec', self.qos_spec.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.set_keys.assert_called_with( - self.qos_spec.id, - self.qos_spec.specs - ) - self.assertIsNone(result) - - -class TestQosShow(TestQos): - - qos_spec = volume_fakes.create_one_qos() - qos_association = volume_fakes.create_one_qos_association() - - def setUp(self): - super().setUp() - self.qos_mock.get.return_value = self.qos_spec - self.qos_mock.get_associations.return_value = [self.qos_association] - # Get the command object to test - self.cmd = qos_specs.ShowQos(self.app, None) - - def test_qos_show(self): - arglist = [ - self.qos_spec.id - ] - verifylist = [ - ('qos_spec', self.qos_spec.id) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.qos_mock.get.assert_called_with( - self.qos_spec.id - ) - - collist = ( - 'associations', - 'consumer', - 'id', - 'name', - 'properties' - ) - self.assertEqual(collist, columns) - datalist = ( - format_columns.ListColumn([self.qos_association.name]), - self.qos_spec.consumer, - self.qos_spec.id, - self.qos_spec.name, - format_columns.DictColumn(self.qos_spec.specs), - ) - self.assertCountEqual(datalist, tuple(data)) - - -class TestQosUnset(TestQos): - - qos_spec = volume_fakes.create_one_qos() - - def setUp(self): - super().setUp() - - self.qos_mock.get.return_value = self.qos_spec - # Get the command object to test - self.cmd = qos_specs.UnsetQos(self.app, None) - - def test_qos_unset_with_properties(self): - arglist = [ - '--property', 'iops', - '--property', 'foo', - self.qos_spec.id, - ] - verifylist = [ - ('property', ['iops', 'foo']), - ('qos_spec', self.qos_spec.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.qos_mock.unset_keys.assert_called_with( - self.qos_spec.id, - ['iops', 'foo'] - ) - self.assertIsNone(result) - - def test_qos_unset_nothing(self): - arglist = [ - self.qos_spec.id, - ] - - verifylist = [ - ('qos_spec', self.qos_spec.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.assertIsNone(result) diff --git a/openstackclient/tests/unit/volume/v1/test_service.py b/openstackclient/tests/unit/volume/v1/test_service.py deleted file mode 100644 index a199c91349..0000000000 --- a/openstackclient/tests/unit/volume/v1/test_service.py +++ /dev/null @@ -1,286 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from osc_lib import exceptions - -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes -from openstackclient.volume.v1 import service - - -class TestService(volume_fakes.TestVolumev1): - - def setUp(self): - super().setUp() - - # Get a shortcut to the ServiceManager Mock - self.service_mock = self.app.client_manager.volume.services - self.service_mock.reset_mock() - - -class TestServiceList(TestService): - - # The service to be listed - services = volume_fakes.create_one_service() - - def setUp(self): - super().setUp() - - self.service_mock.list.return_value = [self.services] - - # Get the command object to test - self.cmd = service.ListService(self.app, None) - - def test_service_list(self): - arglist = [ - '--host', self.services.host, - '--service', self.services.binary, - ] - verifylist = [ - ('host', self.services.host), - ('service', self.services.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class Lister in cliff, abstract method take_action() - # returns a tuple containing the column names and an iterable - # containing the data to be listed. - columns, data = self.cmd.take_action(parsed_args) - - expected_columns = [ - 'Binary', - 'Host', - 'Zone', - 'Status', - 'State', - 'Updated At', - ] - - # confirming if all expected columns are present in the result. - self.assertEqual(expected_columns, columns) - - datalist = (( - self.services.binary, - self.services.host, - self.services.zone, - self.services.status, - self.services.state, - self.services.updated_at, - ), ) - - # confirming if all expected values are present in the result. - self.assertEqual(datalist, tuple(data)) - - # checking if proper call was made to list services - self.service_mock.list.assert_called_with( - self.services.host, - self.services.binary, - ) - - # checking if prohibited columns are present in output - self.assertNotIn("Disabled Reason", columns) - self.assertNotIn(self.services.disabled_reason, - tuple(data)) - - def test_service_list_with_long_option(self): - arglist = [ - '--host', self.services.host, - '--service', self.services.binary, - '--long' - ] - verifylist = [ - ('host', self.services.host), - ('service', self.services.binary), - ('long', True) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class Lister in cliff, abstract method take_action() - # returns a tuple containing the column names and an iterable - # containing the data to be listed. - columns, data = self.cmd.take_action(parsed_args) - - expected_columns = [ - 'Binary', - 'Host', - 'Zone', - 'Status', - 'State', - 'Updated At', - 'Disabled Reason' - ] - - # confirming if all expected columns are present in the result. - self.assertEqual(expected_columns, columns) - - datalist = (( - self.services.binary, - self.services.host, - self.services.zone, - self.services.status, - self.services.state, - self.services.updated_at, - self.services.disabled_reason, - ), ) - - # confirming if all expected values are present in the result. - self.assertEqual(datalist, tuple(data)) - - self.service_mock.list.assert_called_with( - self.services.host, - self.services.binary, - ) - - -class TestServiceSet(TestService): - - service = volume_fakes.create_one_service() - - def setUp(self): - super().setUp() - - self.service_mock.enable.return_value = self.service - self.service_mock.disable.return_value = self.service - self.service_mock.disable_log_reason.return_value = self.service - - self.cmd = service.SetService(self.app, None) - - def test_service_set_nothing(self): - arglist = [ - self.service.host, - self.service.binary, - ] - verifylist = [ - ('host', self.service.host), - ('service', self.service.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - - self.service_mock.enable.assert_not_called() - self.service_mock.disable.assert_not_called() - self.service_mock.disable_log_reason.assert_not_called() - self.assertIsNone(result) - - def test_service_set_enable(self): - arglist = [ - '--enable', - self.service.host, - self.service.binary, - ] - verifylist = [ - ('enable', True), - ('host', self.service.host), - ('service', self.service.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.service_mock.enable.assert_called_with( - self.service.host, - self.service.binary - ) - self.service_mock.disable.assert_not_called() - self.service_mock.disable_log_reason.assert_not_called() - self.assertIsNone(result) - - def test_service_set_disable(self): - arglist = [ - '--disable', - self.service.host, - self.service.binary, - ] - verifylist = [ - ('disable', True), - ('host', self.service.host), - ('service', self.service.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.service_mock.disable.assert_called_with( - self.service.host, - self.service.binary - ) - self.service_mock.enable.assert_not_called() - self.service_mock.disable_log_reason.assert_not_called() - self.assertIsNone(result) - - def test_service_set_disable_with_reason(self): - reason = 'earthquake' - arglist = [ - '--disable', - '--disable-reason', reason, - self.service.host, - self.service.binary, - ] - verifylist = [ - ('disable', True), - ('disable_reason', reason), - ('host', self.service.host), - ('service', self.service.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.service_mock.disable_log_reason.assert_called_with( - self.service.host, - self.service.binary, - reason - ) - self.assertIsNone(result) - - def test_service_set_only_with_disable_reason(self): - reason = 'earthquake' - arglist = [ - '--disable-reason', reason, - self.service.host, - self.service.binary, - ] - verifylist = [ - ('disable_reason', reason), - ('host', self.service.host), - ('service', self.service.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail("CommandError should be raised.") - except exceptions.CommandError as e: - self.assertEqual("Cannot specify option --disable-reason without " - "--disable specified.", str(e)) - - def test_service_set_enable_with_disable_reason(self): - reason = 'earthquake' - arglist = [ - '--enable', - '--disable-reason', reason, - self.service.host, - self.service.binary, - ] - verifylist = [ - ('enable', True), - ('disable_reason', reason), - ('host', self.service.host), - ('service', self.service.binary), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail("CommandError should be raised.") - except exceptions.CommandError as e: - self.assertEqual("Cannot specify option --disable-reason without " - "--disable specified.", str(e)) diff --git a/openstackclient/tests/unit/volume/v1/test_type.py b/openstackclient/tests/unit/volume/v1/test_type.py deleted file mode 100644 index c878824901..0000000000 --- a/openstackclient/tests/unit/volume/v1/test_type.py +++ /dev/null @@ -1,624 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock -from unittest.mock import call - -from osc_lib.cli import format_columns -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.tests.unit import utils as tests_utils -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes -from openstackclient.volume.v1 import volume_type - - -class TestType(volume_fakes.TestVolumev1): - - def setUp(self): - super().setUp() - - self.types_mock = self.app.client_manager.volume.volume_types - self.types_mock.reset_mock() - - self.encryption_types_mock = ( - self.app.client_manager.volume.volume_encryption_types) - self.encryption_types_mock.reset_mock() - - -class TestTypeCreate(TestType): - - columns = ( - 'description', - 'id', - 'is_public', - 'name', - ) - - def setUp(self): - super().setUp() - - self.new_volume_type = volume_fakes.create_one_volume_type( - methods={'set_keys': {'myprop': 'myvalue'}}, - ) - self.data = ( - self.new_volume_type.description, - self.new_volume_type.id, - True, - self.new_volume_type.name, - ) - - self.types_mock.create.return_value = self.new_volume_type - # Get the command object to test - self.cmd = volume_type.CreateVolumeType(self.app, None) - - def test_type_create(self): - arglist = [ - self.new_volume_type.name, - ] - verifylist = [ - ("name", self.new_volume_type.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.types_mock.create.assert_called_with( - self.new_volume_type.name, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) - - def test_type_create_with_encryption(self): - encryption_info = { - 'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': '128', - 'control_location': 'front-end', - } - encryption_type = volume_fakes.create_one_encryption_volume_type( - attrs=encryption_info, - ) - self.new_volume_type = volume_fakes.create_one_volume_type( - attrs={'encryption': encryption_info}, - ) - self.types_mock.create.return_value = self.new_volume_type - self.encryption_types_mock.create.return_value = encryption_type - encryption_columns = ( - 'description', - 'encryption', - 'id', - 'is_public', - 'name', - ) - encryption_data = ( - self.new_volume_type.description, - format_columns.DictColumn(encryption_info), - self.new_volume_type.id, - True, - self.new_volume_type.name, - ) - arglist = [ - '--encryption-provider', 'LuksEncryptor', - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-key-size', '128', - '--encryption-control-location', 'front-end', - self.new_volume_type.name, - ] - verifylist = [ - ('encryption_provider', 'LuksEncryptor'), - ('encryption_cipher', 'aes-xts-plain64'), - ('encryption_key_size', 128), - ('encryption_control_location', 'front-end'), - ('name', self.new_volume_type.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.types_mock.create.assert_called_with( - self.new_volume_type.name, - ) - body = { - 'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end', - } - self.encryption_types_mock.create.assert_called_with( - self.new_volume_type, - body, - ) - self.assertEqual(encryption_columns, columns) - self.assertCountEqual(encryption_data, data) - - -class TestTypeDelete(TestType): - - volume_types = volume_fakes.create_volume_types(count=2) - - def setUp(self): - super().setUp() - - self.types_mock.get = volume_fakes.get_volume_types(self.volume_types) - self.types_mock.delete.return_value = None - - # Get the command object to mock - self.cmd = volume_type.DeleteVolumeType(self.app, None) - - def test_type_delete(self): - arglist = [ - self.volume_types[0].id - ] - verifylist = [ - ("volume_types", [self.volume_types[0].id]) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.types_mock.delete.assert_called_with(self.volume_types[0]) - self.assertIsNone(result) - - def test_delete_multiple_types(self): - arglist = [] - for t in self.volume_types: - arglist.append(t.id) - verifylist = [ - ('volume_types', arglist), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - - calls = [] - for t in self.volume_types: - calls.append(call(t)) - self.types_mock.delete.assert_has_calls(calls) - self.assertIsNone(result) - - def test_delete_multiple_types_with_exception(self): - arglist = [ - self.volume_types[0].id, - 'unexist_type', - ] - verifylist = [ - ('volume_types', arglist), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - find_mock_result = [self.volume_types[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 volume types failed to delete.', - str(e)) - - find_mock.assert_any_call( - self.types_mock, self.volume_types[0].id) - find_mock.assert_any_call(self.types_mock, 'unexist_type') - - self.assertEqual(2, find_mock.call_count) - self.types_mock.delete.assert_called_once_with( - self.volume_types[0] - ) - - -class TestTypeList(TestType): - - volume_types = volume_fakes.create_volume_types() - - columns = [ - "ID", - "Name", - "Is Public", - ] - columns_long = [ - "ID", - "Name", - "Is Public", - "Properties" - ] - - data = [] - for t in volume_types: - data.append(( - t.id, - t.name, - t.is_public, - )) - data_long = [] - for t in volume_types: - data_long.append(( - t.id, - t.name, - t.is_public, - format_columns.DictColumn(t.extra_specs), - )) - - def setUp(self): - super().setUp() - - self.types_mock.list.return_value = self.volume_types - self.encryption_types_mock.create.return_value = None - self.encryption_types_mock.update.return_value = None - # get the command to test - self.cmd = volume_type.ListVolumeType(self.app, None) - - def test_type_list_without_options(self): - arglist = [] - verifylist = [ - ("long", False), - ("encryption_type", False), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.types_mock.list.assert_called_once_with() - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, list(data)) - - def test_type_list_with_options(self): - arglist = [ - "--long", - ] - verifylist = [ - ("long", True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.types_mock.list.assert_called_once_with() - self.assertEqual(self.columns_long, columns) - self.assertCountEqual(self.data_long, list(data)) - - def test_type_list_with_encryption(self): - encryption_type = volume_fakes.create_one_encryption_volume_type( - attrs={'volume_type_id': self.volume_types[0].id}, - ) - encryption_info = { - 'provider': 'LuksEncryptor', - 'cipher': None, - 'key_size': None, - 'control_location': 'front-end', - } - encryption_columns = self.columns + [ - "Encryption", - ] - encryption_data = [] - encryption_data.append(( - self.volume_types[0].id, - self.volume_types[0].name, - self.volume_types[0].is_public, - volume_type.EncryptionInfoColumn( - self.volume_types[0].id, - {self.volume_types[0].id: encryption_info}), - )) - encryption_data.append(( - self.volume_types[1].id, - self.volume_types[1].name, - self.volume_types[1].is_public, - volume_type.EncryptionInfoColumn( - self.volume_types[1].id, {}), - )) - - self.encryption_types_mock.list.return_value = [encryption_type] - arglist = [ - "--encryption-type", - ] - verifylist = [ - ("encryption_type", True), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.encryption_types_mock.list.assert_called_once_with() - self.types_mock.list.assert_called_once_with() - self.assertEqual(encryption_columns, columns) - self.assertCountEqual(encryption_data, list(data)) - - -class TestTypeSet(TestType): - - volume_type = volume_fakes.create_one_volume_type( - methods={'set_keys': None}, - ) - - def setUp(self): - super().setUp() - - self.types_mock.get.return_value = self.volume_type - - # Get the command object to test - self.cmd = volume_type.SetVolumeType(self.app, None) - - def test_type_set_nothing(self): - arglist = [ - self.volume_type.id, - ] - verifylist = [ - ('volume_type', self.volume_type.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.assertIsNone(result) - - def test_type_set_property(self): - arglist = [ - '--property', 'myprop=myvalue', - self.volume_type.id, - ] - verifylist = [ - ('property', {'myprop': 'myvalue'}), - ('volume_type', self.volume_type.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.volume_type.set_keys.assert_called_once_with( - {'myprop': 'myvalue'}) - self.assertIsNone(result) - - def test_type_set_new_encryption(self): - arglist = [ - '--encryption-provider', 'LuksEncryptor', - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-key-size', '128', - '--encryption-control-location', 'front-end', - self.volume_type.id, - ] - verifylist = [ - ('encryption_provider', 'LuksEncryptor'), - ('encryption_cipher', 'aes-xts-plain64'), - ('encryption_key_size', 128), - ('encryption_control_location', 'front-end'), - ('volume_type', self.volume_type.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - body = { - 'provider': 'LuksEncryptor', - 'cipher': 'aes-xts-plain64', - 'key_size': 128, - 'control_location': 'front-end', - } - self.encryption_types_mock.create.assert_called_with( - self.volume_type, - body, - ) - self.assertIsNone(result) - - def test_type_set_new_encryption_without_provider(self): - arglist = [ - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-key-size', '128', - '--encryption-control-location', 'front-end', - self.volume_type.id, - ] - verifylist = [ - ('encryption_cipher', 'aes-xts-plain64'), - ('encryption_key_size', 128), - ('encryption_control_location', 'front-end'), - ('volume_type', self.volume_type.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual("Command Failed: One or more of" - " the operations failed", - str(e)) - self.encryption_types_mock.create.assert_not_called() - self.encryption_types_mock.update.assert_not_called() - - -class TestTypeShow(TestType): - - columns = ( - 'description', - 'id', - 'is_public', - 'name', - 'properties', - ) - - def setUp(self): - super().setUp() - - self.volume_type = volume_fakes.create_one_volume_type() - self.data = ( - self.volume_type.description, - self.volume_type.id, - True, - self.volume_type.name, - format_columns.DictColumn(self.volume_type.extra_specs) - ) - - self.types_mock.get.return_value = self.volume_type - - # Get the command object to test - self.cmd = volume_type.ShowVolumeType(self.app, None) - - def test_type_show(self): - arglist = [ - self.volume_type.id - ] - verifylist = [ - ("volume_type", self.volume_type.id), - ("encryption_type", False), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_with(self.volume_type.id) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) - - def test_type_show_with_encryption(self): - encryption_type = volume_fakes.create_one_encryption_volume_type() - encryption_info = { - 'provider': 'LuksEncryptor', - 'cipher': None, - 'key_size': None, - 'control_location': 'front-end', - } - self.volume_type = volume_fakes.create_one_volume_type( - attrs={'encryption': encryption_info}, - ) - self.types_mock.get.return_value = self.volume_type - self.encryption_types_mock.get.return_value = encryption_type - encryption_columns = ( - 'description', - 'encryption', - 'id', - 'is_public', - 'name', - 'properties', - ) - encryption_data = ( - self.volume_type.description, - format_columns.DictColumn(encryption_info), - self.volume_type.id, - True, - self.volume_type.name, - format_columns.DictColumn(self.volume_type.extra_specs) - ) - arglist = [ - '--encryption-type', - self.volume_type.id - ] - verifylist = [ - ('encryption_type', True), - ("volume_type", self.volume_type.id) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_with(self.volume_type.id) - self.encryption_types_mock.get.assert_called_with(self.volume_type.id) - self.assertEqual(encryption_columns, columns) - self.assertCountEqual(encryption_data, data) - - -class TestTypeUnset(TestType): - - volume_type = volume_fakes.create_one_volume_type( - methods={'unset_keys': None}, - ) - - def setUp(self): - super().setUp() - - self.types_mock.get.return_value = self.volume_type - - # Get the command object to test - self.cmd = volume_type.UnsetVolumeType(self.app, None) - - def test_type_unset_property(self): - arglist = [ - '--property', 'property', - '--property', 'multi_property', - self.volume_type.id, - ] - verifylist = [ - ('encryption_type', False), - ('property', ['property', 'multi_property']), - ('volume_type', self.volume_type.id), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.volume_type.unset_keys.assert_called_once_with( - ['property', 'multi_property']) - self.encryption_types_mock.delete.assert_not_called() - self.assertIsNone(result) - - def test_type_unset_failed_with_missing_volume_type_argument(self): - arglist = [ - '--property', 'property', - '--property', 'multi_property', - ] - verifylist = [ - ('property', ['property', 'multi_property']), - ] - - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) - - def test_type_unset_nothing(self): - arglist = [ - self.volume_type.id, - ] - verifylist = [ - ('volume_type', self.volume_type.id), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.assertIsNone(result) - - def test_type_unset_encryption_type(self): - arglist = [ - '--encryption-type', - self.volume_type.id, - ] - verifylist = [ - ('encryption_type', True), - ('volume_type', self.volume_type.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.encryption_types_mock.delete.assert_called_with(self.volume_type) - self.assertIsNone(result) - - -class TestColumns(TestType): - - def test_encryption_info_column_with_info(self): - fake_volume_type = volume_fakes.create_one_volume_type() - type_id = fake_volume_type.id - - encryption_info = { - 'provider': 'LuksEncryptor', - 'cipher': None, - 'key_size': None, - 'control_location': 'front-end', - } - col = volume_type.EncryptionInfoColumn(type_id, - {type_id: encryption_info}) - self.assertEqual(utils.format_dict(encryption_info), - col.human_readable()) - self.assertEqual(encryption_info, col.machine_readable()) - - def test_encryption_info_column_without_info(self): - fake_volume_type = volume_fakes.create_one_volume_type() - type_id = fake_volume_type.id - - col = volume_type.EncryptionInfoColumn(type_id, {}) - self.assertEqual('-', col.human_readable()) - self.assertIsNone(col.machine_readable()) diff --git a/openstackclient/tests/unit/volume/v1/test_volume.py b/openstackclient/tests/unit/volume/v1/test_volume.py deleted file mode 100644 index b46a608d15..0000000000 --- a/openstackclient/tests/unit/volume/v1/test_volume.py +++ /dev/null @@ -1,1414 +0,0 @@ -# Copyright 2013 Nebula Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import argparse -from unittest import mock -from unittest.mock import call - -from osc_lib.cli import format_columns -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes -from openstackclient.tests.unit.image.v1 import fakes as image_fakes -from openstackclient.tests.unit import utils as tests_utils -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes -from openstackclient.volume.v1 import volume - - -class TestVolume(volume_fakes.TestVolumev1): - - def setUp(self): - super().setUp() - - # Get a shortcut to the VolumeManager Mock - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - - # Get a shortcut to the TenantManager Mock - self.projects_mock = self.app.client_manager.identity.tenants - self.projects_mock.reset_mock() - - # Get a shortcut to the UserManager Mock - self.users_mock = self.app.client_manager.identity.users - self.users_mock.reset_mock() - - # Get a shortcut to the ImageManager Mock - self.images_mock = self.app.client_manager.image.images - self.images_mock.reset_mock() - - def setup_volumes_mock(self, count): - volumes = volume_fakes.create_volumes(count=count) - - self.volumes_mock.get = volume_fakes.get_volumes(volumes, 0) - return volumes - - -class TestVolumeCreate(TestVolume): - - project = identity_fakes.FakeProject.create_one_project() - user = identity_fakes.FakeUser.create_one_user() - - columns = ( - 'attachments', - 'availability_zone', - 'bootable', - 'created_at', - 'display_description', - 'id', - 'name', - 'properties', - 'size', - 'snapshot_id', - 'status', - 'type', - ) - - def setUp(self): - super().setUp() - self.new_volume = volume_fakes.create_one_volume() - self.datalist = ( - self.new_volume.attachments, - self.new_volume.availability_zone, - self.new_volume.bootable, - self.new_volume.created_at, - self.new_volume.display_description, - self.new_volume.id, - self.new_volume.display_name, - format_columns.DictColumn(self.new_volume.metadata), - self.new_volume.size, - self.new_volume.snapshot_id, - self.new_volume.status, - self.new_volume.volume_type, - ) - self.volumes_mock.create.return_value = self.new_volume - - # Get the command object to test - self.cmd = volume.CreateVolume(self.app, None) - - def test_volume_create_min_options(self): - arglist = [ - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_options(self): - arglist = [ - '--size', str(self.new_volume.size), - '--description', self.new_volume.display_description, - '--type', self.new_volume.volume_type, - '--availability-zone', self.new_volume.availability_zone, - self.new_volume.display_name, - ] - verifylist = [ - ('size', self.new_volume.size), - ('description', self.new_volume.display_description), - ('type', self.new_volume.volume_type), - ('availability_zone', self.new_volume.availability_zone), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - self.new_volume.display_description, - self.new_volume.volume_type, - None, - None, - self.new_volume.availability_zone, - None, - None, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_user_project_id(self): - # Return a project - self.projects_mock.get.return_value = self.project - # Return a user - self.users_mock.get.return_value = self.user - - arglist = [ - '--size', str(self.new_volume.size), - '--project', self.project.id, - '--user', self.user.id, - self.new_volume.display_name, - ] - verifylist = [ - ('size', self.new_volume.size), - ('project', self.project.id), - ('user', self.user.id), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - self.user.id, - self.project.id, - None, - None, - None, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_user_project_name(self): - # Return a project - self.projects_mock.get.return_value = self.project - # Return a user - self.users_mock.get.return_value = self.user - - arglist = [ - '--size', str(self.new_volume.size), - '--project', self.project.name, - '--user', self.user.name, - self.new_volume.display_name, - ] - verifylist = [ - ('size', self.new_volume.size), - ('project', self.project.name), - ('user', self.user.name), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - self.user.id, - self.project.id, - None, - None, - None, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_properties(self): - arglist = [ - '--property', 'Alpha=a', - '--property', 'Beta=b', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('property', {'Alpha': 'a', 'Beta': 'b'}), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - {'Alpha': 'a', 'Beta': 'b'}, - None, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_image_id(self): - image = image_fakes.create_one_image() - self.images_mock.get.return_value = image - - arglist = [ - '--image', image.id, - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('image', image.id), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - image.id, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_image_name(self): - image = image_fakes.create_one_image() - self.images_mock.get.return_value = image - - arglist = [ - '--image', image.name, - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('image', image.name), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - # VolumeManager.create(size, snapshot_id=, source_volid=, - # display_name=, display_description=, - # volume_type=, user_id=, - # project_id=, availability_zone=, - # metadata=, imageRef=) - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - image.id, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_with_source(self): - self.volumes_mock.get.return_value = self.new_volume - arglist = [ - '--source', self.new_volume.id, - self.new_volume.display_name, - ] - verifylist = [ - ('source', self.new_volume.id), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - None, - None, - self.new_volume.id, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - @mock.patch.object(utils, 'wait_for_status', return_value=True) - def test_volume_create_with_bootable_and_readonly(self, mock_wait): - arglist = [ - '--bootable', - '--read-only', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('bootable', True), - ('non_bootable', False), - ('read_only', True), - ('read_write', False), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, True) - self.volumes_mock.update_readonly_flag.assert_called_with( - self.new_volume.id, True) - - @mock.patch.object(utils, 'wait_for_status', return_value=True) - def test_volume_create_with_nonbootable_and_readwrite(self, mock_wait): - arglist = [ - '--non-bootable', - '--read-write', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('bootable', False), - ('non_bootable', True), - ('read_only', False), - ('read_write', True), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, False) - self.volumes_mock.update_readonly_flag.assert_called_with( - self.new_volume.id, False) - - @mock.patch.object(volume.LOG, 'error') - @mock.patch.object(utils, 'wait_for_status', return_value=True) - def test_volume_create_with_bootable_and_readonly_fail( - self, mock_wait, mock_error): - - self.volumes_mock.set_bootable.side_effect = ( - exceptions.CommandError()) - - self.volumes_mock.update_readonly_flag.side_effect = ( - exceptions.CommandError()) - - arglist = [ - '--bootable', - '--read-only', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('bootable', True), - ('non_bootable', False), - ('read_only', True), - ('read_write', False), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - - self.assertEqual(2, mock_error.call_count) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, True) - self.volumes_mock.update_readonly_flag.assert_called_with( - self.new_volume.id, True) - - @mock.patch.object(volume.LOG, 'error') - @mock.patch.object(utils, 'wait_for_status', return_value=False) - def test_volume_create_non_available_with_readonly( - self, mock_wait, mock_error): - arglist = [ - '--non-bootable', - '--read-only', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('bootable', False), - ('non_bootable', True), - ('read_only', True), - ('read_write', False), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - - self.assertEqual(2, mock_error.call_count) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_without_size(self): - arglist = [ - self.new_volume.display_name, - ] - verifylist = [ - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) - - def test_volume_create_with_multi_source(self): - arglist = [ - '--image', 'source_image', - '--source', 'source_volume', - '--snapshot', 'source_snapshot', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('image', 'source_image'), - ('source', 'source_volume'), - ('snapshot', 'source_snapshot'), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - def test_volume_create_backward_compatibility(self): - arglist = [ - '-c', 'display_name', - '--size', str(self.new_volume.size), - self.new_volume.display_name, - ] - verifylist = [ - ('columns', ['display_name']), - ('size', self.new_volume.size), - ('name', self.new_volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - self.new_volume.size, - None, - None, - self.new_volume.display_name, - None, - None, - None, - None, - None, - None, - None, - ) - self.assertIn('display_name', columns) - self.assertNotIn('name', columns) - self.assertIn(self.new_volume.display_name, data) - - -class TestVolumeDelete(TestVolume): - - def setUp(self): - super().setUp() - - self.volumes_mock.delete.return_value = None - - # Get the command object to mock - self.cmd = volume.DeleteVolume(self.app, None) - - def test_volume_delete_one_volume(self): - volumes = self.setup_volumes_mock(count=1) - - arglist = [ - volumes[0].id - ] - verifylist = [ - ("force", False), - ("volumes", [volumes[0].id]), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.volumes_mock.delete.assert_called_once_with(volumes[0].id) - self.assertIsNone(result) - - def test_volume_delete_multi_volumes(self): - volumes = self.setup_volumes_mock(count=3) - - arglist = [v.id for v in volumes] - verifylist = [ - ('force', False), - ('volumes', arglist), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - calls = [call(v.id) for v in volumes] - self.volumes_mock.delete.assert_has_calls(calls) - self.assertIsNone(result) - - def test_volume_delete_multi_volumes_with_exception(self): - volumes = self.setup_volumes_mock(count=2) - - arglist = [ - volumes[0].id, - 'unexist_volume', - ] - verifylist = [ - ('force', False), - ('volumes', arglist), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - find_mock_result = [volumes[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 volumes failed to delete.', - str(e)) - - find_mock.assert_any_call(self.volumes_mock, volumes[0].id) - find_mock.assert_any_call(self.volumes_mock, 'unexist_volume') - - self.assertEqual(2, find_mock.call_count) - self.volumes_mock.delete.assert_called_once_with(volumes[0].id) - - def test_volume_delete_with_force(self): - volumes = self.setup_volumes_mock(count=1) - - arglist = [ - '--force', - volumes[0].id, - ] - verifylist = [ - ('force', True), - ('volumes', [volumes[0].id]), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.volumes_mock.force_delete.assert_called_once_with(volumes[0].id) - self.assertIsNone(result) - - -class TestVolumeList(TestVolume): - - _volume = volume_fakes.create_one_volume() - columns = ( - 'ID', - 'Name', - 'Status', - 'Size', - 'Attached to', - ) - datalist = ( - ( - _volume.id, - _volume.display_name, - _volume.status, - _volume.size, - volume.AttachmentsColumn(_volume.attachments), - ), - ) - - def setUp(self): - super().setUp() - - self.volumes_mock.list.return_value = [self._volume] - - # Get the command object to test - self.cmd = volume.ListVolume(self.app, None) - - def test_volume_list_no_options(self): - arglist = [] - verifylist = [ - ('long', False), - ('all_projects', False), - ('name', None), - ('status', None), - ('limit', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, tuple(data)) - - def test_volume_list_name(self): - arglist = [ - '--name', self._volume.display_name, - ] - verifylist = [ - ('long', False), - ('all_projects', False), - ('name', self._volume.display_name), - ('status', None), - ('limit', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.columns, tuple(columns)) - self.assertCountEqual(self.datalist, tuple(data)) - - def test_volume_list_status(self): - arglist = [ - '--status', self._volume.status, - ] - verifylist = [ - ('long', False), - ('all_projects', False), - ('name', None), - ('status', self._volume.status), - ('limit', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.columns, tuple(columns)) - self.assertCountEqual(self.datalist, tuple(data)) - - def test_volume_list_all_projects(self): - arglist = [ - '--all-projects', - ] - verifylist = [ - ('long', False), - ('all_projects', True), - ('name', None), - ('status', None), - ('limit', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.assertEqual(self.columns, tuple(columns)) - self.assertCountEqual(self.datalist, tuple(data)) - - def test_volume_list_long(self): - arglist = [ - '--long', - ] - verifylist = [ - ('long', True), - ('all_projects', False), - ('name', None), - ('status', None), - ('limit', None), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - collist = ( - 'ID', - 'Name', - 'Status', - 'Size', - 'Type', - 'Bootable', - 'Attached to', - 'Properties', - ) - self.assertEqual(collist, columns) - - datalist = (( - self._volume.id, - self._volume.display_name, - self._volume.status, - self._volume.size, - self._volume.volume_type, - self._volume.bootable, - volume.AttachmentsColumn(self._volume.attachments), - format_columns.DictColumn(self._volume.metadata), - ), ) - self.assertCountEqual(datalist, tuple(data)) - - def test_volume_list_with_limit_and_offset(self): - arglist = [ - '--limit', '2', - '--offset', '5', - ] - verifylist = [ - ('long', False), - ('all_projects', False), - ('name', None), - ('status', None), - ('limit', 2), - ('offset', 5), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.list.assert_called_once_with( - limit=2, - search_opts={ - 'offset': 5, - 'status': None, - 'display_name': None, - 'all_tenants': False, - }, - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, tuple(data)) - - def test_volume_list_negative_limit(self): - arglist = [ - "--limit", "-2", - ] - verifylist = [ - ("limit", -2), - ] - self.assertRaises(argparse.ArgumentTypeError, self.check_parser, - self.cmd, arglist, verifylist) - - def test_volume_list_backward_compatibility(self): - arglist = [ - '-c', 'Display Name', - ] - verifylist = [ - ('columns', ['Display Name']), - ('long', False), - ('all_projects', False), - ('name', None), - ('status', None), - ('limit', None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.assertIn('Display Name', columns) - self.assertNotIn('Name', columns) - for each_volume in data: - self.assertIn(self._volume.display_name, each_volume) - - -class TestVolumeMigrate(TestVolume): - - _volume = volume_fakes.create_one_volume() - - def setUp(self): - super().setUp() - - self.volumes_mock.get.return_value = self._volume - self.volumes_mock.migrate_volume.return_value = None - # Get the command object to test - self.cmd = volume.MigrateVolume(self.app, None) - - def test_volume_migrate(self): - arglist = [ - "--host", "host@backend-name#pool", - self._volume.id, - ] - verifylist = [ - ("force_host_copy", False), - ("host", "host@backend-name#pool"), - ("volume", self._volume.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_once_with(self._volume.id) - self.volumes_mock.migrate_volume.assert_called_once_with( - self._volume.id, "host@backend-name#pool", False) - self.assertIsNone(result) - - def test_volume_migrate_with_option(self): - arglist = [ - "--force-host-copy", - "--host", "host@backend-name#pool", - self._volume.id, - ] - verifylist = [ - ("force_host_copy", True), - ("host", "host@backend-name#pool"), - ("volume", self._volume.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_once_with(self._volume.id) - self.volumes_mock.migrate_volume.assert_called_once_with( - self._volume.id, "host@backend-name#pool", True) - self.assertIsNone(result) - - def test_volume_migrate_without_host(self): - arglist = [ - self._volume.id, - ] - verifylist = [ - ("force_host_copy", False), - ("volume", self._volume.id), - ] - - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) - - -class TestVolumeSet(TestVolume): - - _volume = volume_fakes.create_one_volume() - - def setUp(self): - super().setUp() - - self.volumes_mock.get.return_value = self._volume - - self.volumes_mock.update.return_value = self._volume - # Get the command object to test - self.cmd = volume.SetVolume(self.app, None) - - def test_volume_set_no_options(self): - arglist = [ - self._volume.display_name, - ] - verifylist = [ - ('name', None), - ('description', None), - ('size', None), - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.assertIsNone(result) - - def test_volume_set_name(self): - arglist = [ - '--name', 'qwerty', - self._volume.display_name, - ] - verifylist = [ - ('name', 'qwerty'), - ('description', None), - ('size', None), - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'display_name': 'qwerty', - } - self.volumes_mock.update.assert_called_with( - self._volume.id, - **kwargs - ) - self.assertIsNone(result) - - def test_volume_set_description(self): - arglist = [ - '--description', 'new desc', - self._volume.display_name, - ] - verifylist = [ - ('name', None), - ('description', 'new desc'), - ('size', None), - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'display_description': 'new desc', - } - self.volumes_mock.update.assert_called_with( - self._volume.id, - **kwargs - ) - self.assertIsNone(result) - - def test_volume_set_size(self): - arglist = [ - '--size', '130', - self._volume.display_name, - ] - verifylist = [ - ('name', None), - ('description', None), - ('size', 130), - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - size = 130 - self.volumes_mock.extend.assert_called_with( - self._volume.id, - size - ) - self.assertIsNone(result) - - def test_volume_set_size_smaller(self): - self._volume.status = 'available' - arglist = [ - '--size', '1', - self._volume.display_name, - ] - verifylist = [ - ('name', None), - ('description', None), - ('size', 1), - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - - def test_volume_set_size_not_available(self): - self._volume.status = 'error' - arglist = [ - '--size', '130', - self._volume.display_name, - ] - verifylist = [ - ('name', None), - ('description', None), - ('size', 130), - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) - - def test_volume_set_property(self): - arglist = [ - '--no-property', - '--property', 'myprop=myvalue', - self._volume.display_name, - ] - verifylist = [ - ('read_only', False), - ('read_write', False), - ('name', None), - ('description', None), - ('size', None), - ('no_property', True), - ('property', {'myprop': 'myvalue'}), - ('volume', self._volume.display_name), - ('bootable', False), - ('non_bootable', False) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - metadata = { - 'myprop': 'myvalue' - } - self.volumes_mock.set_metadata.assert_called_with( - self._volume.id, - metadata - ) - self.volumes_mock.delete_metadata.assert_called_with( - self._volume.id, - self._volume.metadata.keys() - ) - self.volumes_mock.update_readonly_flag.assert_not_called() - self.assertIsNone(result) - - def test_volume_set_bootable(self): - arglist = [ - ['--bootable', self._volume.id], - ['--non-bootable', self._volume.id] - ] - verifylist = [ - [ - ('bootable', True), - ('non_bootable', False), - ('volume', self._volume.id) - ], - [ - ('bootable', False), - ('non_bootable', True), - ('volume', self._volume.id) - ] - ] - for index in range(len(arglist)): - parsed_args = self.check_parser( - self.cmd, arglist[index], verifylist[index]) - - self.cmd.take_action(parsed_args) - self.volumes_mock.set_bootable.assert_called_with( - self._volume.id, verifylist[index][0][1]) - - def test_volume_set_readonly(self): - arglist = [ - '--read-only', - self._volume.id - ] - verifylist = [ - ('read_only', True), - ('read_write', False), - ('volume', self._volume.id) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.volumes_mock.update_readonly_flag.assert_called_once_with( - self._volume.id, - True) - self.assertIsNone(result) - - def test_volume_set_read_write(self): - arglist = [ - '--read-write', - self._volume.id - ] - verifylist = [ - ('read_only', False), - ('read_write', True), - ('volume', self._volume.id) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.volumes_mock.update_readonly_flag.assert_called_once_with( - self._volume.id, - False) - self.assertIsNone(result) - - -class TestVolumeShow(TestVolume): - - columns = ( - 'attachments', - 'availability_zone', - 'bootable', - 'created_at', - 'display_description', - 'id', - 'name', - 'properties', - 'size', - 'snapshot_id', - 'status', - 'type', - ) - - def setUp(self): - super().setUp() - self._volume = volume_fakes.create_one_volume() - self.datalist = ( - self._volume.attachments, - self._volume.availability_zone, - self._volume.bootable, - self._volume.created_at, - self._volume.display_description, - self._volume.id, - self._volume.display_name, - format_columns.DictColumn(self._volume.metadata), - self._volume.size, - self._volume.snapshot_id, - self._volume.status, - self._volume.volume_type, - ) - self.volumes_mock.get.return_value = self._volume - # Get the command object to test - self.cmd = volume.ShowVolume(self.app, None) - - def test_volume_show(self): - arglist = [ - self._volume.id - ] - verifylist = [ - ("volume", self._volume.id) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_with(self._volume.id) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_show_backward_compatibility(self): - arglist = [ - '-c', 'display_name', - self._volume.id, - ] - verifylist = [ - ('columns', ['display_name']), - ('volume', self._volume.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.get.assert_called_with(self._volume.id) - - self.assertIn('display_name', columns) - self.assertNotIn('name', columns) - self.assertIn(self._volume.display_name, data) - - -class TestVolumeUnset(TestVolume): - - _volume = volume_fakes.create_one_volume() - - def setUp(self): - super().setUp() - - self.volumes_mock.get.return_value = self._volume - - self.volumes_mock.delete_metadata.return_value = None - # Get the command object to test - self.cmd = volume.UnsetVolume(self.app, None) - - def test_volume_unset_no_options(self): - arglist = [ - self._volume.display_name, - ] - verifylist = [ - ('property', None), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.assertIsNone(result) - - def test_volume_unset_property(self): - arglist = [ - '--property', 'myprop', - self._volume.display_name, - ] - verifylist = [ - ('property', ['myprop']), - ('volume', self._volume.display_name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.volumes_mock.delete_metadata.assert_called_with( - self._volume.id, ['myprop'] - ) - self.assertIsNone(result) - - -class TestColumns(TestVolume): - - def test_attachments_column_without_server_cache(self): - _volume = volume_fakes.create_one_volume() - server_id = _volume.attachments[0]['server_id'] - device = _volume.attachments[0]['device'] - - col = volume.AttachmentsColumn(_volume.attachments, {}) - self.assertEqual('Attached to %s on %s ' % (server_id, device), - col.human_readable()) - self.assertEqual(_volume.attachments, col.machine_readable()) - - def test_attachments_column_with_server_cache(self): - _volume = volume_fakes.create_one_volume() - - server_id = _volume.attachments[0]['server_id'] - device = _volume.attachments[0]['device'] - fake_server = mock.Mock() - fake_server.name = 'fake-server-name' - server_cache = {server_id: fake_server} - - col = volume.AttachmentsColumn(_volume.attachments, server_cache) - self.assertEqual( - 'Attached to %s on %s ' % ('fake-server-name', device), - col.human_readable()) - self.assertEqual(_volume.attachments, col.machine_readable()) diff --git a/openstackclient/tests/unit/volume/v1/test_volume_backup.py b/openstackclient/tests/unit/volume/v1/test_volume_backup.py deleted file mode 100644 index b705b4b9b9..0000000000 --- a/openstackclient/tests/unit/volume/v1/test_volume_backup.py +++ /dev/null @@ -1,437 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock -from unittest.mock import call - -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes -from openstackclient.volume.v1 import volume_backup - - -class TestBackup(volume_fakes.TestVolumev1): - - def setUp(self): - super().setUp() - - self.backups_mock = self.app.client_manager.volume.backups - self.backups_mock.reset_mock() - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots - self.snapshots_mock.reset_mock() - self.restores_mock = self.app.client_manager.volume.restores - self.restores_mock.reset_mock() - - -class TestBackupCreate(TestBackup): - - volume = volume_fakes.create_one_volume() - - columns = ( - 'availability_zone', - 'container', - 'description', - 'id', - 'name', - 'object_count', - 'size', - 'snapshot_id', - 'status', - 'volume_id', - ) - - def setUp(self): - super().setUp() - self.new_backup = volume_fakes.create_one_backup( - attrs={'volume_id': self.volume.id}, - ) - self.data = ( - self.new_backup.availability_zone, - self.new_backup.container, - self.new_backup.description, - self.new_backup.id, - self.new_backup.name, - self.new_backup.object_count, - self.new_backup.size, - self.new_backup.snapshot_id, - self.new_backup.status, - self.new_backup.volume_id, - ) - self.volumes_mock.get.return_value = self.volume - self.backups_mock.create.return_value = self.new_backup - - # Get the command object to test - self.cmd = volume_backup.CreateVolumeBackup(self.app, None) - - def test_backup_create(self): - arglist = [ - "--name", self.new_backup.name, - "--description", self.new_backup.description, - "--container", self.new_backup.container, - self.new_backup.volume_id, - ] - verifylist = [ - ("name", self.new_backup.name), - ("description", self.new_backup.description), - ("container", self.new_backup.container), - ("volume", self.new_backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.backups_mock.create.assert_called_with( - self.new_backup.volume_id, - self.new_backup.container, - self.new_backup.name, - self.new_backup.description, - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) - - def test_backup_create_without_name(self): - arglist = [ - "--description", self.new_backup.description, - "--container", self.new_backup.container, - self.new_backup.volume_id, - ] - verifylist = [ - ("description", self.new_backup.description), - ("container", self.new_backup.container), - ("volume", self.new_backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.backups_mock.create.assert_called_with( - self.new_backup.volume_id, - self.new_backup.container, - None, - self.new_backup.description, - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) - - -class TestBackupDelete(TestBackup): - - backups = volume_fakes.create_backups(count=2) - - def setUp(self): - super().setUp() - - self.backups_mock.get = volume_fakes.get_backups(self.backups) - self.backups_mock.delete.return_value = None - - # Get the command object to mock - self.cmd = volume_backup.DeleteVolumeBackup(self.app, None) - - def test_backup_delete(self): - arglist = [ - self.backups[0].id - ] - verifylist = [ - ("backups", [self.backups[0].id]) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - self.backups_mock.delete.assert_called_with( - self.backups[0].id) - self.assertIsNone(result) - - def test_delete_multiple_backups(self): - arglist = [] - for b in self.backups: - arglist.append(b.id) - verifylist = [ - ('backups', arglist), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - - calls = [] - for b in self.backups: - calls.append(call(b.id)) - self.backups_mock.delete.assert_has_calls(calls) - self.assertIsNone(result) - - def test_delete_multiple_backups_with_exception(self): - arglist = [ - self.backups[0].id, - 'unexist_backup', - ] - verifylist = [ - ('backups', arglist), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - find_mock_result = [self.backups[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 backups failed to delete.', - str(e)) - - find_mock.assert_any_call(self.backups_mock, self.backups[0].id) - find_mock.assert_any_call(self.backups_mock, 'unexist_backup') - - self.assertEqual(2, find_mock.call_count) - self.backups_mock.delete.assert_called_once_with( - self.backups[0].id, - ) - - -class TestBackupList(TestBackup): - - volume = volume_fakes.create_one_volume() - backups = volume_fakes.create_backups( - attrs={'volume_id': volume.display_name}, - count=3, - ) - - columns = [ - 'ID', - 'Name', - 'Description', - 'Status', - 'Size', - ] - columns_long = columns + [ - 'Availability Zone', - 'Volume', - 'Container', - ] - - data = [] - for b in backups: - data.append(( - b.id, - b.name, - b.description, - b.status, - b.size, - )) - data_long = [] - for b in backups: - data_long.append(( - b.id, - b.name, - b.description, - b.status, - b.size, - b.availability_zone, - volume_backup.VolumeIdColumn(b.volume_id), - b.container, - )) - - def setUp(self): - super().setUp() - - self.volumes_mock.list.return_value = [self.volume] - self.backups_mock.list.return_value = self.backups - self.volumes_mock.get.return_value = self.volume - # Get the command to test - self.cmd = volume_backup.ListVolumeBackup(self.app, None) - - def test_backup_list_without_options(self): - arglist = [] - verifylist = [ - ("long", False), - ("name", None), - ("status", None), - ("volume", None), - ('all_projects', False), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - - search_opts = { - "name": None, - "status": None, - "volume_id": None, - "all_tenants": False, - } - self.volumes_mock.get.assert_not_called() - self.backups_mock.list.assert_called_with( - search_opts=search_opts, - ) - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, list(data)) - - def test_backup_list_with_options(self): - arglist = [ - "--long", - "--name", self.backups[0].name, - "--status", "error", - "--volume", self.volume.id, - "--all-projects" - ] - verifylist = [ - ("long", True), - ("name", self.backups[0].name), - ("status", "error"), - ("volume", self.volume.id), - ('all_projects', True), - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) - - search_opts = { - "name": self.backups[0].name, - "status": "error", - "volume_id": self.volume.id, - "all_tenants": True, - } - self.volumes_mock.get.assert_called_once_with(self.volume.id) - self.backups_mock.list.assert_called_with( - search_opts=search_opts, - ) - self.assertEqual(self.columns_long, columns) - self.assertCountEqual(self.data_long, list(data)) - - -class TestBackupRestore(TestBackup): - - volume = volume_fakes.create_one_volume() - backup = volume_fakes.create_one_backup( - attrs={'volume_id': volume.id}, - ) - - def setUp(self): - super().setUp() - - self.backups_mock.get.return_value = self.backup - self.volumes_mock.get.return_value = self.volume - self.restores_mock.restore.return_value = ( - volume_fakes.create_one_volume( - {'id': self.volume['id']}, - ) - ) - # Get the command object to mock - self.cmd = volume_backup.RestoreVolumeBackup(self.app, None) - - def test_backup_restore(self): - arglist = [ - self.backup.id, - ] - verifylist = [ - ("backup", self.backup.id), - ("volume", None), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.restores_mock.restore.assert_called_with(self.backup.id, - None) - self.assertIsNotNone(result) - - def test_backup_restore_with_existing_volume(self): - arglist = [ - self.backup.id, - self.backup.volume_id, - ] - verifylist = [ - ("backup", self.backup.id), - ("volume", self.backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.restores_mock.restore.assert_called_with( - self.backup.id, self.backup.volume_id, - ) - self.assertIsNotNone(result) - - def test_backup_restore_with_invalid_volume(self): - arglist = [ - self.backup.id, - "unexist_volume", - ] - verifylist = [ - ("backup", self.backup.id), - ("volume", "unexist_volume"), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - with mock.patch.object( - utils, 'find_resource', - side_effect=exceptions.CommandError(), - ): - self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args, - ) - - -class TestBackupShow(TestBackup): - - columns = ( - 'availability_zone', - 'container', - 'description', - 'id', - 'name', - 'object_count', - 'size', - 'snapshot_id', - 'status', - 'volume_id', - ) - - def setUp(self): - super().setUp() - self.backup = volume_fakes.create_one_backup() - self.data = ( - self.backup.availability_zone, - self.backup.container, - self.backup.description, - self.backup.id, - self.backup.name, - self.backup.object_count, - self.backup.size, - self.backup.snapshot_id, - self.backup.status, - self.backup.volume_id, - ) - self.backups_mock.get.return_value = self.backup - # Get the command object to test - self.cmd = volume_backup.ShowVolumeBackup(self.app, None) - - def test_backup_show(self): - arglist = [ - self.backup.id - ] - verifylist = [ - ("backup", self.backup.id) - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - self.backups_mock.get.assert_called_with(self.backup.id) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v2/fakes.py b/openstackclient/tests/unit/volume/v2/fakes.py index a3ef142f27..c2303d3880 100644 --- a/openstackclient/tests/unit/volume/v2/fakes.py +++ b/openstackclient/tests/unit/volume/v2/fakes.py @@ -17,29 +17,21 @@ from unittest import mock import uuid +# FIXME(stephenfin): We are using v3 resource versions despite being v2 fakes from cinderclient import api_versions -from openstack.block_storage.v3 import volume -from osc_lib.cli import format_columns +from keystoneauth1 import discover +from openstack.block_storage.v2 import _proxy as block_storage_v2_proxy +from openstack.block_storage.v2 import backup as _backup +from openstack.block_storage.v3 import capabilities as _capabilities +from openstack.block_storage.v3 import stats as _stats +from openstack.block_storage.v3 import volume as _volume +from openstack.image.v2 import _proxy as image_v2_proxy from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes -from openstackclient.tests.unit.image.v2 import fakes as image_fakes from openstackclient.tests.unit import utils -QUOTA = { - "gigabytes": 1000, - "volumes": 11, - "snapshots": 10, - "backups": 10, - "backup_gigabytes": 1000, - "per_volume_gigabytes": -1, - "gigabytes_volume_type_backend": -1, - "volumes_volume_type_backend": -1, - "snapshots_volume_type_backend": -1, -} - - class FakeVolumeClient: def __init__(self, **kwargs): self.auth_token = kwargs['token'] @@ -56,18 +48,12 @@ def __init__(self, **kwargs): self.cgsnapshots.resource_class = fakes.FakeResource(None, {}) self.consistencygroups = mock.Mock() self.consistencygroups.resource_class = fakes.FakeResource(None, {}) - self.extensions = mock.Mock() - self.extensions.resource_class = fakes.FakeResource(None, {}) self.limits = mock.Mock() self.limits.resource_class = fakes.FakeResource(None, {}) self.pools = mock.Mock() self.pools.resource_class = fakes.FakeResource(None, {}) self.qos_specs = mock.Mock() self.qos_specs.resource_class = fakes.FakeResource(None, {}) - self.quota_classes = mock.Mock() - self.quota_classes.resource_class = fakes.FakeResource(None, {}) - self.quotas = mock.Mock() - self.quotas.resource_class = fakes.FakeResource(None, {}) self.restores = mock.Mock() self.restores.resource_class = fakes.FakeResource(None, {}) self.services = mock.Mock() @@ -88,21 +74,57 @@ def __init__(self, **kwargs): self.volumes.resource_class = fakes.FakeResource(None, {}) -class TestVolume(utils.TestCommand): +class FakeClientMixin: def setUp(self): super().setUp() self.app.client_manager.volume = FakeVolumeClient( endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN ) - self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client( - endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN + self.volume_client = self.app.client_manager.volume + + # TODO(stephenfin): Rename to 'volume_client' once all commands are + # migrated to SDK + self.app.client_manager.sdk_connection.volume = mock.Mock( + spec=block_storage_v2_proxy.Proxy, ) - self.app.client_manager.image = image_fakes.FakeImagev2Client( - endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN + self.volume_sdk_client = self.app.client_manager.sdk_connection.volume + self.set_volume_api_version() # default to the lowest + + def set_volume_api_version(self, version: str | None = None): + """Set a fake block storage API version. + + :param version: The fake microversion to "support". This must be None + since cinder v2 didn't support microversions. + :returns: None + """ + assert version is None + + self.volume_client.api_version = None + + self.volume_sdk_client.default_microversion = None + self.volume_sdk_client.get_endpoint_data.return_value = ( + discover.EndpointData( + min_microversion=None, + max_microversion=None, + ) ) +class TestVolume( + identity_fakes.FakeClientMixin, + FakeClientMixin, + utils.TestCommand, +): + def setUp(self): + super().setUp() + + # avoid circular imports by defining this manually rather than using + # openstackclient.tests.unit.image.v2.fakes.FakeClientMixin + self.app.client_manager.image = mock.Mock(spec=image_v2_proxy.Proxy) + self.image_client = self.app.client_manager.image + + def create_one_transfer(attrs=None): """Create a fake transfer. @@ -284,7 +306,7 @@ def create_one_capability(attrs=None): # Overwrite default attributes if there are some attributes set capability_info.update(attrs or {}) - capability = fakes.FakeResource(None, capability_info, loaded=True) + capability = _capabilities.Capabilities(**capability_info) return capability @@ -300,19 +322,21 @@ def create_one_pool(attrs=None): # Set default attribute pool_info = { 'name': 'host@lvmdriver-1#lvmdriver-1', - 'storage_protocol': 'iSCSI', - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'total_volumes': 99, - 'total_capacity_gb': 1000.00, - 'allocated_capacity_gb': 100, - 'max_over_subscription_ratio': 200.0, + 'capabilities': { + 'storage_protocol': 'iSCSI', + 'thick_provisioning_support': False, + 'thin_provisioning_support': True, + 'total_volumes': 99, + 'total_capacity_gb': 1000.00, + 'allocated_capacity_gb': 100, + 'max_over_subscription_ratio': 200.0, + }, } # Overwrite default attributes if there are some attributes set pool_info.update(attrs or {}) - pool = fakes.FakeResource(None, pool_info, loaded=True) + pool = _stats.Pools(**pool_info) return pool @@ -411,7 +435,7 @@ def create_one_sdk_volume(attrs=None): # Overwrite default attributes if there are some attributes set volume_info.update(attrs) - return volume.Volume(**volume_info) + return _volume.Volume(**volume_info) def create_sdk_volumes(attrs=None, count=2): @@ -451,114 +475,40 @@ def get_volumes(volumes=None, count=2): return mock.Mock(side_effect=volumes) -def get_volume_columns(volume=None): - """Get the volume columns from a faked volume object. - - :param volume: - A FakeResource objects faking volume - :return - A tuple which may include the following keys: - ('id', 'name', 'description', 'status', 'size', 'volume_type', - 'metadata', 'snapshot', 'availability_zone', 'attachments') - """ - if volume is not None: - return tuple(k for k in sorted(volume.keys())) - return tuple([]) - - -def get_volume_data(volume=None): - """Get the volume data from a faked volume object. - - :param volume: - A FakeResource objects faking volume - :return - A tuple which may include the following values: - ('ce26708d', 'fake_volume', 'fake description', 'available', - 20, 'fake_lvmdriver-1', "Alpha='a', Beta='b', Gamma='g'", - 1, 'nova', [{'device': '/dev/ice', 'server_id': '1233'}]) - """ - data_list = [] - if volume is not None: - for x in sorted(volume.keys()): - if x == 'tags': - # The 'tags' should be format_list - data_list.append(format_columns.ListColumn(volume.info.get(x))) - else: - data_list.append(volume.info.get(x)) - return tuple(data_list) - - -def create_one_availability_zone(attrs=None): - """Create a fake AZ. - - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object with zoneName, zoneState, etc. - """ - attrs = attrs or {} - - # Set default attributes. - availability_zone = { - 'zoneName': uuid.uuid4().hex, - 'zoneState': {'available': True}, - } - - # Overwrite default attributes. - availability_zone.update(attrs) - - availability_zone = fakes.FakeResource( - info=copy.deepcopy(availability_zone), loaded=True - ) - return availability_zone - - -def create_availability_zones(attrs=None, count=2): - """Create multiple fake AZs. - - :param dict attrs: - A dictionary with all attributes - :param int count: - The number of AZs to fake - :return: - A list of FakeResource objects faking the AZs - """ - availability_zones = [] - for i in range(0, count): - availability_zone = create_one_availability_zone(attrs) - availability_zones.append(availability_zone) - - return availability_zones - - def create_one_backup(attrs=None): """Create a fake backup. :param dict attrs: A dictionary with all attributes - :return: - A FakeResource object with id, name, volume_id, etc. + :return: A fake + openstack.block_storage.v2.backup.Backup object """ attrs = attrs or {} # Set default attributes. backup_info = { + "availability_zone": 'zone' + uuid.uuid4().hex, + "container": 'container-' + uuid.uuid4().hex, + "created_at": 'time-' + uuid.uuid4().hex, + "data_timestamp": 'time-' + uuid.uuid4().hex, + "description": 'description-' + uuid.uuid4().hex, + "fail_reason": "Service not found for creating backup.", + "has_dependent_backups": False, "id": 'backup-id-' + uuid.uuid4().hex, + "is_incremental": False, "name": 'backup-name-' + uuid.uuid4().hex, - "volume_id": 'volume-id-' + uuid.uuid4().hex, - "snapshot_id": 'snapshot-id' + uuid.uuid4().hex, - "description": 'description-' + uuid.uuid4().hex, "object_count": None, - "container": 'container-' + uuid.uuid4().hex, "size": random.randint(1, 20), + "snapshot_id": 'snapshot-id' + uuid.uuid4().hex, "status": "error", - "availability_zone": 'zone' + uuid.uuid4().hex, + "updated_at": 'time-' + uuid.uuid4().hex, + "volume_id": 'volume-id-' + uuid.uuid4().hex, } # Overwrite default attributes. backup_info.update(attrs) - backup = fakes.FakeResource(info=copy.deepcopy(backup_info), loaded=True) + backup = _backup.Backup(**backup_info) return backup @@ -569,8 +519,8 @@ def create_backups(attrs=None, count=2): A dictionary with all attributes :param int count: The number of backups to fake - :return: - A list of FakeResource objects faking the backups + :return: A list of fake + openstack.block_storage.v2.backup.Backup objects """ backups = [] for i in range(0, count): @@ -770,42 +720,6 @@ def get_consistency_group_snapshots(snapshots=None, count=2): return mock.Mock(side_effect=snapshots) -def create_one_extension(attrs=None): - """Create a fake extension. - - :param dict attrs: - A dictionary with all attributes - :return: - A FakeResource object with name, namespace, etc. - """ - attrs = attrs or {} - - # Set default attributes. - extension_info = { - 'name': 'name-' + uuid.uuid4().hex, - 'namespace': ( - 'http://docs.openstack.org/' - 'block-service/ext/scheduler-hints/api/v2' - ), - 'description': 'description-' + uuid.uuid4().hex, - 'updated': '2013-04-18T00:00:00+00:00', - 'alias': 'OS-SCH-HNT', - 'links': ( - '[{"href":' - '"https://github.com/openstack/block-api", "type":' - ' "text/html", "rel": "describedby"}]' - ), - } - - # Overwrite default attributes. - extension_info.update(attrs) - - extension = fakes.FakeResource( - info=copy.deepcopy(extension_info), loaded=True - ) - return extension - - def create_one_qos(attrs=None): """Create a fake Qos specification. @@ -1059,185 +973,3 @@ def create_one_encryption_volume_type(attrs=None): info=copy.deepcopy(encryption_info), loaded=True ) return encryption_type - - -def create_one_vol_quota(attrs=None): - """Create one quota""" - attrs = attrs or {} - - quota_attrs = { - 'id': 'project-id-' + uuid.uuid4().hex, - 'backups': 100, - 'backup_gigabytes': 100, - 'gigabytes': 10, - 'per_volume_gigabytes': 10, - 'snapshots': 0, - 'volumes': 10, - } - - quota_attrs.update(attrs) - - quota = fakes.FakeResource(info=copy.deepcopy(quota_attrs), loaded=True) - quota.project_id = quota_attrs['id'] - - return quota - - -def create_one_default_vol_quota(attrs=None): - """Create one quota""" - attrs = attrs or {} - - quota_attrs = { - 'id': 'project-id-' + uuid.uuid4().hex, - 'backups': 100, - 'backup_gigabytes': 100, - 'gigabytes': 100, - 'per_volume_gigabytes': 100, - 'snapshots': 100, - 'volumes': 100, - } - - quota_attrs.update(attrs) - - quota = fakes.FakeResource(info=copy.deepcopy(quota_attrs), loaded=True) - quota.project_id = quota_attrs['id'] - - return quota - - -def create_one_detailed_quota(attrs=None): - """Create one quota""" - attrs = attrs or {} - - quota_attrs = { - 'volumes': {'limit': 3, 'in_use': 1, 'reserved': 0}, - 'per_volume_gigabytes': {'limit': -1, 'in_use': 0, 'reserved': 0}, - 'snapshots': {'limit': 10, 'in_use': 0, 'reserved': 0}, - 'gigabytes': {'limit': 1000, 'in_use': 5, 'reserved': 0}, - 'backups': {'limit': 10, 'in_use': 0, 'reserved': 0}, - 'backup_gigabytes': {'limit': 1000, 'in_use': 0, 'reserved': 0}, - 'volumes_lvmdriver-1': {'limit': -1, 'in_use': 1, 'reserved': 0}, - 'gigabytes_lvmdriver-1': {'limit': -1, 'in_use': 5, 'reserved': 0}, - 'snapshots_lvmdriver-1': {'limit': -1, 'in_use': 0, 'reserved': 0}, - 'volumes___DEFAULT__': {'limit': -1, 'in_use': 0, 'reserved': 0}, - 'gigabytes___DEFAULT__': {'limit': -1, 'in_use': 0, 'reserved': 0}, - 'snapshots___DEFAULT__': {'limit': -1, 'in_use': 0, 'reserved': 0}, - 'groups': {'limit': 10, 'in_use': 0, 'reserved': 0}, - 'id': uuid.uuid4().hex, - } - quota_attrs.update(attrs) - - quota = fakes.FakeResource(info=copy.deepcopy(quota_attrs), loaded=True) - - return quota - - -class FakeLimits(object): - """Fake limits""" - - def __init__(self, absolute_attrs=None): - self.absolute_limits_attrs = { - 'totalSnapshotsUsed': 1, - 'maxTotalBackups': 10, - 'maxTotalVolumeGigabytes': 1000, - 'maxTotalSnapshots': 10, - 'maxTotalBackupGigabytes': 1000, - 'totalBackupGigabytesUsed': 0, - 'maxTotalVolumes': 10, - 'totalVolumesUsed': 4, - 'totalBackupsUsed': 0, - 'totalGigabytesUsed': 35, - } - absolute_attrs = absolute_attrs or {} - self.absolute_limits_attrs.update(absolute_attrs) - - self.rate_limits_attrs = [ - { - "uri": "*", - "limit": [ - { - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z", - }, - { - "value": 10, - "verb": "PUT", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z", - }, - { - "value": 100, - "verb": "DELETE", - "remaining": 100, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z", - }, - ], - } - ] - - @property - def absolute(self): - for (name, value) in self.absolute_limits_attrs.items(): - yield FakeAbsoluteLimit(name, value) - - def absolute_limits(self): - reference_data = [] - for (name, value) in self.absolute_limits_attrs.items(): - reference_data.append((name, value)) - return reference_data - - @property - def rate(self): - for group in self.rate_limits_attrs: - uri = group['uri'] - for rate in group['limit']: - yield FakeRateLimit( - rate['verb'], - uri, - rate['value'], - rate['remaining'], - rate['unit'], - rate['next-available'], - ) - - def rate_limits(self): - reference_data = [] - for group in self.rate_limits_attrs: - uri = group['uri'] - for rate in group['limit']: - reference_data.append( - ( - rate['verb'], - uri, - rate['value'], - rate['remaining'], - rate['unit'], - rate['next-available'], - ) - ) - return reference_data - - -class FakeAbsoluteLimit(object): - """Data model that represents an absolute limit.""" - - def __init__(self, name, value): - self.name = name - self.value = value - - -class FakeRateLimit(object): - """Data model that represents a flattened view of a single rate limit.""" - - def __init__(self, verb, uri, value, remain, unit, next_available): - self.verb = verb - self.uri = uri - self.value = value - self.remain = remain - self.unit = unit - self.next_available = next_available diff --git a/openstackclient/tests/unit/volume/v2/test_backup_record.py b/openstackclient/tests/unit/volume/v2/test_backup_record.py index aa9a25a223..d677b9a284 100644 --- a/openstackclient/tests/unit/volume/v2/test_backup_record.py +++ b/openstackclient/tests/unit/volume/v2/test_backup_record.py @@ -17,16 +17,14 @@ class TestBackupRecord(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.backups_mock = self.app.client_manager.volume.backups + self.backups_mock = self.volume_client.backups self.backups_mock.reset_mock() class TestBackupRecordExport(TestBackupRecord): - new_backup = volume_fakes.create_one_backup( attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'}, ) @@ -81,7 +79,6 @@ def test_backup_export_json(self): class TestBackupRecordImport(TestBackupRecord): - new_backup = volume_fakes.create_one_backup( attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'}, ) @@ -101,8 +98,10 @@ def test_backup_import(self): "fake_backup_record_data", ] verifylist = [ - ("backup_service", - "cinder.backup.drivers.swift.SwiftBackupDriver"), + ( + "backup_service", + "cinder.backup.drivers.swift.SwiftBackupDriver", + ), ("backup_metadata", "fake_backup_record_data"), ] diff --git a/openstackclient/tests/unit/volume/v2/test_consistency_group.py b/openstackclient/tests/unit/volume/v2/test_consistency_group.py index c5537ed8d6..dc62e5e42f 100644 --- a/openstackclient/tests/unit/volume/v2/test_consistency_group.py +++ b/openstackclient/tests/unit/volume/v2/test_consistency_group.py @@ -24,39 +24,34 @@ class TestConsistencyGroup(volume_fakes.TestVolume): - def setUp(self): super().setUp() # Get a shortcut to the TransferManager Mock - self.consistencygroups_mock = ( - self.app.client_manager.volume.consistencygroups) + self.consistencygroups_mock = self.volume_client.consistencygroups self.consistencygroups_mock.reset_mock() - self.cgsnapshots_mock = ( - self.app.client_manager.volume.cgsnapshots) + self.cgsnapshots_mock = self.volume_client.cgsnapshots self.cgsnapshots_mock.reset_mock() - self.volumes_mock = ( - self.app.client_manager.volume.volumes) + self.volumes_mock = self.volume_client.volumes self.volumes_mock.reset_mock() - self.types_mock = self.app.client_manager.volume.volume_types + self.types_mock = self.volume_client.volume_types self.types_mock.reset_mock() class TestConsistencyGroupAddVolume(TestConsistencyGroup): - _consistency_group = volume_fakes.create_one_consistency_group() def setUp(self): super().setUp() - self.consistencygroups_mock.get.return_value = ( - self._consistency_group) + self.consistencygroups_mock.get.return_value = self._consistency_group # Get the command object to test - self.cmd = \ - consistency_group.AddVolumeToConsistencyGroup(self.app, None) + self.cmd = consistency_group.AddVolumeToConsistencyGroup( + self.app, None + ) def test_add_one_volume_to_consistency_group(self): volume = volume_fakes.create_one_volume() @@ -78,8 +73,7 @@ def test_add_one_volume_to_consistency_group(self): 'add_volumes': volume.id, } self.consistencygroups_mock.update.assert_called_once_with( - self._consistency_group.id, - **kwargs + self._consistency_group.id, **kwargs ) self.assertIsNone(result) @@ -104,14 +98,14 @@ def test_add_multiple_volumes_to_consistency_group(self): 'add_volumes': volumes[0].id + ',' + volumes[1].id, } self.consistencygroups_mock.update.assert_called_once_with( - self._consistency_group.id, - **kwargs + self._consistency_group.id, **kwargs ) self.assertIsNone(result) @mock.patch.object(consistency_group.LOG, 'error') def test_add_multiple_volumes_to_consistency_group_with_exception( - self, mock_error, + self, + mock_error, ): volume = volume_fakes.create_one_volume() arglist = [ @@ -126,20 +120,22 @@ def test_add_multiple_volumes_to_consistency_group_with_exception( parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [volume, - exceptions.CommandError, - self._consistency_group] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + find_mock_result = [ + volume, + exceptions.CommandError, + self._consistency_group, + ] + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: result = self.cmd.take_action(parsed_args) mock_error.assert_called_with("1 of 2 volumes failed to add.") self.assertIsNone(result) - find_mock.assert_any_call(self.consistencygroups_mock, - self._consistency_group.id) - find_mock.assert_any_call(self.volumes_mock, - volume.id) - find_mock.assert_any_call(self.volumes_mock, - 'unexist_volume') + find_mock.assert_any_call( + self.consistencygroups_mock, self._consistency_group.id + ) + find_mock.assert_any_call(self.volumes_mock, volume.id) + find_mock.assert_any_call(self.volumes_mock, 'unexist_volume') self.assertEqual(3, find_mock.call_count) self.consistencygroups_mock.update.assert_called_once_with( self._consistency_group.id, add_volumes=volume.id @@ -147,7 +143,6 @@ def test_add_multiple_volumes_to_consistency_group_with_exception( class TestConsistencyGroupCreate(TestConsistencyGroup): - volume_type = volume_fakes.create_one_volume_type() new_consistency_group = volume_fakes.create_one_consistency_group() consistency_group_snapshot = ( @@ -176,22 +171,28 @@ class TestConsistencyGroupCreate(TestConsistencyGroup): def setUp(self): super().setUp() self.consistencygroups_mock.create.return_value = ( - self.new_consistency_group) + self.new_consistency_group + ) self.consistencygroups_mock.create_from_src.return_value = ( - self.new_consistency_group) + self.new_consistency_group + ) self.consistencygroups_mock.get.return_value = ( - self.new_consistency_group) + self.new_consistency_group + ) self.types_mock.get.return_value = self.volume_type self.cgsnapshots_mock.get.return_value = ( - self.consistency_group_snapshot) + self.consistency_group_snapshot + ) # Get the command object to test self.cmd = consistency_group.CreateConsistencyGroup(self.app, None) def test_consistency_group_create(self): arglist = [ - '--volume-type', self.volume_type.id, - '--description', self.new_consistency_group.description, + '--volume-type', + self.volume_type.id, + '--description', + self.new_consistency_group.description, '--availability-zone', self.new_consistency_group.availability_zone, self.new_consistency_group.name, @@ -199,16 +200,17 @@ def test_consistency_group_create(self): verifylist = [ ('volume_type', self.volume_type.id), ('description', self.new_consistency_group.description), - ('availability_zone', - self.new_consistency_group.availability_zone), + ( + 'availability_zone', + self.new_consistency_group.availability_zone, + ), ('name', self.new_consistency_group.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_once_with( - self.volume_type.id) + self.types_mock.get.assert_called_once_with(self.volume_type.id) self.consistencygroups_mock.get.assert_not_called() self.consistencygroups_mock.create.assert_called_once_with( self.volume_type.id, @@ -222,23 +224,26 @@ def test_consistency_group_create(self): def test_consistency_group_create_without_name(self): arglist = [ - '--volume-type', self.volume_type.id, - '--description', self.new_consistency_group.description, + '--volume-type', + self.volume_type.id, + '--description', + self.new_consistency_group.description, '--availability-zone', self.new_consistency_group.availability_zone, ] verifylist = [ ('volume_type', self.volume_type.id), ('description', self.new_consistency_group.description), - ('availability_zone', - self.new_consistency_group.availability_zone), + ( + 'availability_zone', + self.new_consistency_group.availability_zone, + ), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_once_with( - self.volume_type.id) + self.types_mock.get.assert_called_once_with(self.volume_type.id) self.consistencygroups_mock.get.assert_not_called() self.consistencygroups_mock.create.assert_called_once_with( self.volume_type.id, @@ -252,8 +257,10 @@ def test_consistency_group_create_without_name(self): def test_consistency_group_create_from_source(self): arglist = [ - '--consistency-group-source', self.new_consistency_group.id, - '--description', self.new_consistency_group.description, + '--consistency-group-source', + self.new_consistency_group.id, + '--description', + self.new_consistency_group.description, self.new_consistency_group.name, ] verifylist = [ @@ -267,7 +274,8 @@ def test_consistency_group_create_from_source(self): self.types_mock.get.assert_not_called() self.consistencygroups_mock.get.assert_called_once_with( - self.new_consistency_group.id) + self.new_consistency_group.id + ) self.consistencygroups_mock.create_from_src.assert_called_with( None, self.new_consistency_group.id, @@ -280,8 +288,10 @@ def test_consistency_group_create_from_source(self): def test_consistency_group_create_from_snapshot(self): arglist = [ - '--consistency-group-snapshot', self.consistency_group_snapshot.id, - '--description', self.new_consistency_group.description, + '--consistency-group-snapshot', + self.consistency_group_snapshot.id, + '--description', + self.new_consistency_group.description, self.new_consistency_group.name, ] verifylist = [ @@ -295,7 +305,8 @@ def test_consistency_group_create_from_snapshot(self): self.types_mock.get.assert_not_called() self.cgsnapshots_mock.get.assert_called_once_with( - self.consistency_group_snapshot.id) + self.consistency_group_snapshot.id + ) self.consistencygroups_mock.create_from_src.assert_called_with( self.consistency_group_snapshot.id, None, @@ -308,9 +319,7 @@ def test_consistency_group_create_from_snapshot(self): class TestConsistencyGroupDelete(TestConsistencyGroup): - - consistency_groups =\ - volume_fakes.create_consistency_groups(count=2) + consistency_groups = volume_fakes.create_consistency_groups(count=2) def setUp(self): super().setUp() @@ -324,18 +333,15 @@ def setUp(self): self.cmd = consistency_group.DeleteConsistencyGroup(self.app, None) def test_consistency_group_delete(self): - arglist = [ - self.consistency_groups[0].id - ] - verifylist = [ - ("consistency_groups", [self.consistency_groups[0].id]) - ] + arglist = [self.consistency_groups[0].id] + verifylist = [("consistency_groups", [self.consistency_groups[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.consistencygroups_mock.delete.assert_called_with( - self.consistency_groups[0].id, False) + self.consistency_groups[0].id, False + ) self.assertIsNone(result) def test_consistency_group_delete_with_force(self): @@ -345,14 +351,15 @@ def test_consistency_group_delete_with_force(self): ] verifylist = [ ('force', True), - ("consistency_groups", [self.consistency_groups[0].id]) + ("consistency_groups", [self.consistency_groups[0].id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.consistencygroups_mock.delete.assert_called_with( - self.consistency_groups[0].id, True) + self.consistency_groups[0].id, True + ) self.assertIsNone(result) def test_delete_multiple_consistency_groups(self): @@ -383,21 +390,27 @@ def test_delete_multiple_consistency_groups_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [self.consistency_groups[0], - exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + find_mock_result = [ + self.consistency_groups[0], + exceptions.CommandError, + ] + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 consistency groups failed to delete.', - str(e)) + self.assertEqual( + '1 of 2 consistency groups failed to delete.', str(e) + ) - find_mock.assert_any_call(self.consistencygroups_mock, - self.consistency_groups[0].id) - find_mock.assert_any_call(self.consistencygroups_mock, - 'unexist_consistency_group') + find_mock.assert_any_call( + self.consistencygroups_mock, self.consistency_groups[0].id + ) + find_mock.assert_any_call( + self.consistencygroups_mock, 'unexist_consistency_group' + ) self.assertEqual(2, find_mock.call_count) self.consistencygroups_mock.delete.assert_called_once_with( @@ -406,7 +419,6 @@ def test_delete_multiple_consistency_groups_with_exception(self): class TestConsistencyGroupList(TestConsistencyGroup): - consistency_groups = volume_fakes.create_consistency_groups(count=2) columns = [ @@ -424,21 +436,25 @@ class TestConsistencyGroupList(TestConsistencyGroup): ] data = [] for c in consistency_groups: - data.append(( - c.id, - c.status, - c.name, - )) + data.append( + ( + c.id, + c.status, + c.name, + ) + ) data_long = [] for c in consistency_groups: - data_long.append(( - c.id, - c.status, - c.availability_zone, - c.name, - c.description, - format_columns.ListColumn(c.volume_types) - )) + data_long.append( + ( + c.id, + c.status, + c.availability_zone, + c.name, + c.description, + format_columns.ListColumn(c.volume_types), + ) + ) def setUp(self): super().setUp() @@ -458,14 +474,13 @@ def test_consistency_group_list_without_options(self): columns, data = self.cmd.take_action(parsed_args) self.consistencygroups_mock.list.assert_called_once_with( - detailed=True, search_opts={'all_tenants': False}) + detailed=True, search_opts={'all_tenants': False} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) def test_consistency_group_list_with_all_project(self): - arglist = [ - "--all-projects" - ] + arglist = ["--all-projects"] verifylist = [ ("all_projects", True), ("long", False), @@ -475,7 +490,8 @@ def test_consistency_group_list_with_all_project(self): columns, data = self.cmd.take_action(parsed_args) self.consistencygroups_mock.list.assert_called_once_with( - detailed=True, search_opts={'all_tenants': True}) + detailed=True, search_opts={'all_tenants': True} + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -492,23 +508,23 @@ def test_consistency_group_list_with_long(self): columns, data = self.cmd.take_action(parsed_args) self.consistencygroups_mock.list.assert_called_once_with( - detailed=True, search_opts={'all_tenants': False}) + detailed=True, search_opts={'all_tenants': False} + ) self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) class TestConsistencyGroupRemoveVolume(TestConsistencyGroup): - _consistency_group = volume_fakes.create_one_consistency_group() def setUp(self): super().setUp() - self.consistencygroups_mock.get.return_value = ( - self._consistency_group) + self.consistencygroups_mock.get.return_value = self._consistency_group # Get the command object to test - self.cmd = \ - consistency_group.RemoveVolumeFromConsistencyGroup(self.app, None) + self.cmd = consistency_group.RemoveVolumeFromConsistencyGroup( + self.app, None + ) def test_remove_one_volume_from_consistency_group(self): volume = volume_fakes.create_one_volume() @@ -530,8 +546,7 @@ def test_remove_one_volume_from_consistency_group(self): 'remove_volumes': volume.id, } self.consistencygroups_mock.update.assert_called_once_with( - self._consistency_group.id, - **kwargs + self._consistency_group.id, **kwargs ) self.assertIsNone(result) @@ -556,8 +571,7 @@ def test_remove_multi_volumes_from_consistency_group(self): 'remove_volumes': volumes[0].id + ',' + volumes[1].id, } self.consistencygroups_mock.update.assert_called_once_with( - self._consistency_group.id, - **kwargs + self._consistency_group.id, **kwargs ) self.assertIsNone(result) @@ -579,20 +593,22 @@ def test_remove_multiple_volumes_from_consistency_group_with_exception( parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [volume, - exceptions.CommandError, - self._consistency_group] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + find_mock_result = [ + volume, + exceptions.CommandError, + self._consistency_group, + ] + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: result = self.cmd.take_action(parsed_args) mock_error.assert_called_with("1 of 2 volumes failed to remove.") self.assertIsNone(result) - find_mock.assert_any_call(self.consistencygroups_mock, - self._consistency_group.id) - find_mock.assert_any_call(self.volumes_mock, - volume.id) - find_mock.assert_any_call(self.volumes_mock, - 'unexist_volume') + find_mock.assert_any_call( + self.consistencygroups_mock, self._consistency_group.id + ) + find_mock.assert_any_call(self.volumes_mock, volume.id) + find_mock.assert_any_call(self.volumes_mock, 'unexist_volume') self.assertEqual(3, find_mock.call_count) self.consistencygroups_mock.update.assert_called_once_with( self._consistency_group.id, remove_volumes=volume.id @@ -600,21 +616,20 @@ def test_remove_multiple_volumes_from_consistency_group_with_exception( class TestConsistencyGroupSet(TestConsistencyGroup): - consistency_group = volume_fakes.create_one_consistency_group() def setUp(self): super().setUp() - self.consistencygroups_mock.get.return_value = ( - self.consistency_group) + self.consistencygroups_mock.get.return_value = self.consistency_group # Get the command object to test self.cmd = consistency_group.SetConsistencyGroup(self.app, None) def test_consistency_group_set_name(self): new_name = 'new_name' arglist = [ - '--name', new_name, + '--name', + new_name, self.consistency_group.id, ] verifylist = [ @@ -631,15 +646,15 @@ def test_consistency_group_set_name(self): 'name': new_name, } self.consistencygroups_mock.update.assert_called_once_with( - self.consistency_group.id, - **kwargs + self.consistency_group.id, **kwargs ) self.assertIsNone(result) def test_consistency_group_set_description(self): new_description = 'new_description' arglist = [ - '--description', new_description, + '--description', + new_description, self.consistency_group.id, ] verifylist = [ @@ -656,8 +671,7 @@ def test_consistency_group_set_description(self): 'description': new_description, } self.consistencygroups_mock.update.assert_called_once_with( - self.consistency_group.id, - **kwargs + self.consistency_group.id, **kwargs ) self.assertIsNone(result) @@ -690,15 +704,12 @@ def setUp(self): self.cmd = consistency_group.ShowConsistencyGroup(self.app, None) def test_consistency_group_show(self): - arglist = [ - self.consistency_group.id - ] - verifylist = [ - ("consistency_group", self.consistency_group.id) - ] + arglist = [self.consistency_group.id] + verifylist = [("consistency_group", self.consistency_group.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.consistencygroups_mock.get.assert_called_once_with( - self.consistency_group.id) + self.consistency_group.id + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py b/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py index e3c738c855..5a5b9c0053 100644 --- a/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py +++ b/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py @@ -19,21 +19,17 @@ class TestConsistencyGroupSnapshot(volume_fakes.TestVolume): - def setUp(self): - super(TestConsistencyGroupSnapshot, self).setUp() + super().setUp() # Get a shortcut to the TransferManager Mock - self.cgsnapshots_mock = ( - self.app.client_manager.volume.cgsnapshots) + self.cgsnapshots_mock = self.volume_client.cgsnapshots self.cgsnapshots_mock.reset_mock() - self.consistencygroups_mock = ( - self.app.client_manager.volume.consistencygroups) + self.consistencygroups_mock = self.volume_client.consistencygroups self.consistencygroups_mock.reset_mock() class TestConsistencyGroupSnapshotCreate(TestConsistencyGroupSnapshot): - _consistency_group_snapshot = ( volume_fakes.create_one_consistency_group_snapshot() ) @@ -57,20 +53,23 @@ class TestConsistencyGroupSnapshotCreate(TestConsistencyGroupSnapshot): ) def setUp(self): - super(TestConsistencyGroupSnapshotCreate, self).setUp() + super().setUp() self.cgsnapshots_mock.create.return_value = ( - self._consistency_group_snapshot) - self.consistencygroups_mock.get.return_value = ( - self.consistency_group) + self._consistency_group_snapshot + ) + self.consistencygroups_mock.get.return_value = self.consistency_group # Get the command object to test - self.cmd = (consistency_group_snapshot. - CreateConsistencyGroupSnapshot(self.app, None)) + self.cmd = consistency_group_snapshot.CreateConsistencyGroupSnapshot( + self.app, None + ) def test_consistency_group_snapshot_create(self): arglist = [ - '--consistency-group', self.consistency_group.id, - '--description', self._consistency_group_snapshot.description, + '--consistency-group', + self.consistency_group.id, + '--description', + self._consistency_group_snapshot.description, self._consistency_group_snapshot.name, ] verifylist = [ @@ -83,7 +82,8 @@ def test_consistency_group_snapshot_create(self): columns, data = self.cmd.take_action(parsed_args) self.consistencygroups_mock.get.assert_called_once_with( - self.consistency_group.id) + self.consistency_group.id + ) self.cgsnapshots_mock.create.assert_called_once_with( self.consistency_group.id, name=self._consistency_group_snapshot.name, @@ -95,7 +95,8 @@ def test_consistency_group_snapshot_create(self): def test_consistency_group_snapshot_create_no_consistency_group(self): arglist = [ - '--description', self._consistency_group_snapshot.description, + '--description', + self._consistency_group_snapshot.description, self._consistency_group_snapshot.name, ] verifylist = [ @@ -107,7 +108,8 @@ def test_consistency_group_snapshot_create_no_consistency_group(self): columns, data = self.cmd.take_action(parsed_args) self.consistencygroups_mock.get.assert_called_once_with( - self._consistency_group_snapshot.name) + self._consistency_group_snapshot.name + ) self.cgsnapshots_mock.create.assert_called_once_with( self.consistency_group.id, name=self._consistency_group_snapshot.name, @@ -119,13 +121,12 @@ def test_consistency_group_snapshot_create_no_consistency_group(self): class TestConsistencyGroupSnapshotDelete(TestConsistencyGroupSnapshot): - consistency_group_snapshots = ( volume_fakes.create_consistency_group_snapshots(count=2) ) def setUp(self): - super(TestConsistencyGroupSnapshotDelete, self).setUp() + super().setUp() self.cgsnapshots_mock.get = ( volume_fakes.get_consistency_group_snapshots( @@ -135,23 +136,25 @@ def setUp(self): self.cgsnapshots_mock.delete.return_value = None # Get the command object to mock - self.cmd = (consistency_group_snapshot. - DeleteConsistencyGroupSnapshot(self.app, None)) + self.cmd = consistency_group_snapshot.DeleteConsistencyGroupSnapshot( + self.app, None + ) def test_consistency_group_snapshot_delete(self): - arglist = [ - self.consistency_group_snapshots[0].id - ] + arglist = [self.consistency_group_snapshots[0].id] verifylist = [ - ("consistency_group_snapshot", - [self.consistency_group_snapshots[0].id]) + ( + "consistency_group_snapshot", + [self.consistency_group_snapshots[0].id], + ) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.cgsnapshots_mock.delete.assert_called_once_with( - self.consistency_group_snapshots[0].id) + self.consistency_group_snapshots[0].id + ) self.assertIsNone(result) def test_multiple_consistency_group_snapshots_delete(self): @@ -173,7 +176,6 @@ def test_multiple_consistency_group_snapshots_delete(self): class TestConsistencyGroupSnapshotList(TestConsistencyGroupSnapshot): - consistency_group_snapshots = ( volume_fakes.create_consistency_group_snapshots(count=2) ) @@ -194,32 +196,36 @@ class TestConsistencyGroupSnapshotList(TestConsistencyGroupSnapshot): ] data = [] for c in consistency_group_snapshots: - data.append(( - c.id, - c.status, - c.name, - )) + data.append( + ( + c.id, + c.status, + c.name, + ) + ) data_long = [] for c in consistency_group_snapshots: - data_long.append(( - c.id, - c.status, - c.consistencygroup_id, - c.name, - c.description, - c.created_at, - )) + data_long.append( + ( + c.id, + c.status, + c.consistencygroup_id, + c.name, + c.description, + c.created_at, + ) + ) def setUp(self): - super(TestConsistencyGroupSnapshotList, self).setUp() + super().setUp() self.cgsnapshots_mock.list.return_value = ( - self.consistency_group_snapshots) + self.consistency_group_snapshots + ) self.consistencygroups_mock.get.return_value = self.consistency_group # Get the command to test - self.cmd = ( - consistency_group_snapshot. - ListConsistencyGroupSnapshot(self.app, None) + self.cmd = consistency_group_snapshot.ListConsistencyGroupSnapshot( + self.app, None ) def test_consistency_group_snapshot_list_without_options(self): @@ -240,7 +246,8 @@ def test_consistency_group_snapshot_list_without_options(self): 'consistencygroup_id': None, } self.cgsnapshots_mock.list.assert_called_once_with( - detailed=True, search_opts=search_opts) + detailed=True, search_opts=search_opts + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -264,15 +271,18 @@ def test_consistency_group_snapshot_list_with_long(self): 'consistencygroup_id': None, } self.cgsnapshots_mock.list.assert_called_once_with( - detailed=True, search_opts=search_opts) + detailed=True, search_opts=search_opts + ) self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) def test_consistency_group_snapshot_list_with_options(self): arglist = [ "--all-project", - "--status", self.consistency_group_snapshots[0].status, - "--consistency-group", self.consistency_group.id, + "--status", + self.consistency_group_snapshots[0].status, + "--consistency-group", + self.consistency_group.id, ] verifylist = [ ("all_projects", True), @@ -290,15 +300,16 @@ def test_consistency_group_snapshot_list_with_options(self): 'consistencygroup_id': self.consistency_group.id, } self.consistencygroups_mock.get.assert_called_once_with( - self.consistency_group.id) + self.consistency_group.id + ) self.cgsnapshots_mock.list.assert_called_once_with( - detailed=True, search_opts=search_opts) + detailed=True, search_opts=search_opts + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestConsistencyGroupSnapshotShow(TestConsistencyGroupSnapshot): - _consistency_group_snapshot = ( volume_fakes.create_one_consistency_group_snapshot() ) @@ -321,23 +332,24 @@ class TestConsistencyGroupSnapshotShow(TestConsistencyGroupSnapshot): ) def setUp(self): - super(TestConsistencyGroupSnapshotShow, self).setUp() + super().setUp() self.cgsnapshots_mock.get.return_value = ( - self._consistency_group_snapshot) - self.cmd = (consistency_group_snapshot. - ShowConsistencyGroupSnapshot(self.app, None)) + self._consistency_group_snapshot + ) + self.cmd = consistency_group_snapshot.ShowConsistencyGroupSnapshot( + self.app, None + ) def test_consistency_group_snapshot_show(self): - arglist = [ - self._consistency_group_snapshot.id - ] + arglist = [self._consistency_group_snapshot.id] verifylist = [ ("consistency_group_snapshot", self._consistency_group_snapshot.id) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.cgsnapshots_mock.get.assert_called_once_with( - self._consistency_group_snapshot.id) + self._consistency_group_snapshot.id + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v2/test_qos_specs.py b/openstackclient/tests/unit/volume/v2/test_qos_specs.py index 6f258dd54c..50c7419f1e 100644 --- a/openstackclient/tests/unit/volume/v2/test_qos_specs.py +++ b/openstackclient/tests/unit/volume/v2/test_qos_specs.py @@ -26,24 +26,22 @@ class TestQos(volume_fakes.TestVolume): - def setUp(self): - super(TestQos, self).setUp() + super().setUp() - self.qos_mock = self.app.client_manager.volume.qos_specs + self.qos_mock = self.volume_client.qos_specs self.qos_mock.reset_mock() - self.types_mock = self.app.client_manager.volume.volume_types + self.types_mock = self.volume_client.volume_types self.types_mock.reset_mock() class TestQosAssociate(TestQos): - volume_type = volume_fakes.create_one_volume_type() qos_spec = volume_fakes.create_one_qos() def setUp(self): - super(TestQosAssociate, self).setUp() + super().setUp() self.qos_mock.get.return_value = self.qos_spec self.types_mock.get.return_value = self.volume_type @@ -51,36 +49,26 @@ def setUp(self): self.cmd = qos_specs.AssociateQos(self.app, None) def test_qos_associate(self): - arglist = [ - self.qos_spec.id, - self.volume_type.id - ] + arglist = [self.qos_spec.id, self.volume_type.id] verifylist = [ ('qos_spec', self.qos_spec.id), - ('volume_type', self.volume_type.id) + ('volume_type', self.volume_type.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.qos_mock.associate.assert_called_with( - self.qos_spec.id, - self.volume_type.id + self.qos_spec.id, self.volume_type.id ) self.assertIsNone(result) class TestQosCreate(TestQos): - - columns = ( - 'consumer', - 'id', - 'name', - 'properties' - ) + columns = ('consumer', 'id', 'name', 'properties') def setUp(self): - super(TestQosCreate, self).setUp() + super().setUp() self.new_qos_spec = volume_fakes.create_one_qos() self.qos_mock.create.return_value = self.new_qos_spec @@ -89,7 +77,7 @@ def setUp(self): self.new_qos_spec.consumer, self.new_qos_spec.id, self.new_qos_spec.name, - format_columns.DictColumn(self.new_qos_spec.specs) + format_columns.DictColumn(self.new_qos_spec.specs), ) # Get the command object to test @@ -107,8 +95,7 @@ def test_qos_create_without_properties(self): columns, data = self.cmd.take_action(parsed_args) self.qos_mock.create.assert_called_with( - self.new_qos_spec.name, - {'consumer': 'both'} + self.new_qos_spec.name, {'consumer': 'both'} ) self.assertEqual(self.columns, columns) @@ -116,7 +103,8 @@ def test_qos_create_without_properties(self): def test_qos_create_with_consumer(self): arglist = [ - '--consumer', self.new_qos_spec.consumer, + '--consumer', + self.new_qos_spec.consumer, self.new_qos_spec.name, ] verifylist = [ @@ -128,8 +116,7 @@ def test_qos_create_with_consumer(self): columns, data = self.cmd.take_action(parsed_args) self.qos_mock.create.assert_called_with( - self.new_qos_spec.name, - {'consumer': self.new_qos_spec.consumer} + self.new_qos_spec.name, {'consumer': self.new_qos_spec.consumer} ) self.assertEqual(self.columns, columns) @@ -137,9 +124,12 @@ def test_qos_create_with_consumer(self): def test_qos_create_with_properties(self): arglist = [ - '--consumer', self.new_qos_spec.consumer, - '--property', 'foo=bar', - '--property', 'iops=9001', + '--consumer', + self.new_qos_spec.consumer, + '--property', + 'foo=bar', + '--property', + 'iops=9001', self.new_qos_spec.name, ] verifylist = [ @@ -153,9 +143,11 @@ def test_qos_create_with_properties(self): self.qos_mock.create.assert_called_with( self.new_qos_spec.name, - {'consumer': self.new_qos_spec.consumer, - 'foo': 'bar', - 'iops': '9001'} + { + 'consumer': self.new_qos_spec.consumer, + 'foo': 'bar', + 'iops': '9001', + }, ) self.assertEqual(self.columns, columns) @@ -163,47 +155,33 @@ def test_qos_create_with_properties(self): class TestQosDelete(TestQos): - qos_specs = volume_fakes.create_qoses(count=2) def setUp(self): - super(TestQosDelete, self).setUp() + super().setUp() - self.qos_mock.get = ( - volume_fakes.get_qoses(self.qos_specs)) + self.qos_mock.get = volume_fakes.get_qoses(self.qos_specs) # Get the command object to test self.cmd = qos_specs.DeleteQos(self.app, None) def test_qos_delete(self): - arglist = [ - self.qos_specs[0].id - ] - verifylist = [ - ('qos_specs', [self.qos_specs[0].id]) - ] + arglist = [self.qos_specs[0].id] + verifylist = [('qos_specs', [self.qos_specs[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.qos_mock.delete.assert_called_with( - self.qos_specs[0].id, False) + self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, False) self.assertIsNone(result) def test_qos_delete_with_force(self): - arglist = [ - '--force', - self.qos_specs[0].id - ] - verifylist = [ - ('force', True), - ('qos_specs', [self.qos_specs[0].id]) - ] + arglist = ['--force', self.qos_specs[0].id] + verifylist = [('force', True), ('qos_specs', [self.qos_specs[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.qos_mock.delete.assert_called_with( - self.qos_specs[0].id, True) + self.qos_mock.delete.assert_called_with(self.qos_specs[0].id, True) self.assertIsNone(result) def test_delete_multiple_qoses(self): @@ -235,14 +213,16 @@ def test_delete_multiple_qoses_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.qos_specs[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual( - '1 of 2 QoS specifications failed to delete.', str(e)) + '1 of 2 QoS specifications failed to delete.', str(e) + ) find_mock.assert_any_call(self.qos_mock, self.qos_specs[0].id) find_mock.assert_any_call(self.qos_mock, 'unexist_qos') @@ -254,12 +234,11 @@ def test_delete_multiple_qoses_with_exception(self): class TestQosDisassociate(TestQos): - volume_type = volume_fakes.create_one_volume_type() qos_spec = volume_fakes.create_one_qos() def setUp(self): - super(TestQosDisassociate, self).setUp() + super().setUp() self.qos_mock.get.return_value = self.qos_spec self.types_mock.get.return_value = self.volume_type @@ -268,7 +247,8 @@ def setUp(self): def test_qos_disassociate_with_volume_type(self): arglist = [ - '--volume-type', self.volume_type.id, + '--volume-type', + self.volume_type.id, self.qos_spec.id, ] verifylist = [ @@ -280,8 +260,7 @@ def test_qos_disassociate_with_volume_type(self): result = self.cmd.take_action(parsed_args) self.qos_mock.disassociate.assert_called_with( - self.qos_spec.id, - self.volume_type.id + self.qos_spec.id, self.volume_type.id ) self.assertIsNone(result) @@ -290,9 +269,7 @@ def test_qos_disassociate_with_all_volume_types(self): '--all', self.qos_spec.id, ] - verifylist = [ - ('qos_spec', self.qos_spec.id) - ] + verifylist = [('qos_spec', self.qos_spec.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) @@ -302,7 +279,6 @@ def test_qos_disassociate_with_all_volume_types(self): class TestQosList(TestQos): - qos_specs = volume_fakes.create_qoses(count=2) qos_association = volume_fakes.create_one_qos_association() @@ -315,16 +291,18 @@ class TestQosList(TestQos): ) data = [] for q in qos_specs: - data.append(( - q.id, - q.name, - q.consumer, - format_columns.ListColumn([qos_association.name]), - format_columns.DictColumn(q.specs), - )) + data.append( + ( + q.id, + q.name, + q.consumer, + format_columns.ListColumn([qos_association.name]), + format_columns.DictColumn(q.specs), + ) + ) def setUp(self): - super(TestQosList, self).setUp() + super().setUp() self.qos_mock.list.return_value = self.qos_specs self.qos_mock.get_associations.return_value = [self.qos_association] @@ -373,11 +351,10 @@ def test_qos_list_no_association(self): class TestQosSet(TestQos): - qos_spec = volume_fakes.create_one_qos() def setUp(self): - super(TestQosSet, self).setUp() + super().setUp() self.qos_mock.get.return_value = self.qos_spec # Get the command object to test @@ -385,37 +362,38 @@ def setUp(self): def test_qos_set_with_properties_with_id(self): arglist = [ - '--property', 'foo=bar', - '--property', 'iops=9001', + '--no-property', + '--property', + 'a=b', + '--property', + 'c=d', self.qos_spec.id, ] + new_property = {"a": "b", "c": "d"} verifylist = [ - ('property', self.qos_spec.specs), + ('no_property', True), + ('property', new_property), ('qos_spec', self.qos_spec.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.qos_mock.set_keys.assert_called_with( + self.qos_mock.unset_keys.assert_called_with( self.qos_spec.id, - self.qos_spec.specs + list(self.qos_spec.specs.keys()), + ) + self.qos_mock.set_keys.assert_called_with( + self.qos_spec.id, {"a": "b", "c": "d"} ) self.assertIsNone(result) class TestQosShow(TestQos): - qos_spec = volume_fakes.create_one_qos() qos_association = volume_fakes.create_one_qos_association() - columns = ( - 'associations', - 'consumer', - 'id', - 'name', - 'properties' - ) + columns = ('associations', 'consumer', 'id', 'name', 'properties') data = ( format_columns.ListColumn([qos_association.name]), qos_spec.consumer, @@ -425,7 +403,7 @@ class TestQosShow(TestQos): ) def setUp(self): - super(TestQosShow, self).setUp() + super().setUp() self.qos_mock.get.return_value = self.qos_spec self.qos_mock.get_associations.return_value = [self.qos_association] @@ -434,30 +412,23 @@ def setUp(self): self.cmd = qos_specs.ShowQos(self.app, None) def test_qos_show(self): - arglist = [ - self.qos_spec.id - ] - verifylist = [ - ('qos_spec', self.qos_spec.id) - ] + arglist = [self.qos_spec.id] + verifylist = [('qos_spec', self.qos_spec.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.qos_mock.get.assert_called_with( - self.qos_spec.id - ) + self.qos_mock.get.assert_called_with(self.qos_spec.id) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, tuple(data)) class TestQosUnset(TestQos): - qos_spec = volume_fakes.create_one_qos() def setUp(self): - super(TestQosUnset, self).setUp() + super().setUp() self.qos_mock.get.return_value = self.qos_spec # Get the command object to test @@ -465,8 +436,10 @@ def setUp(self): def test_qos_unset_with_properties(self): arglist = [ - '--property', 'iops', - '--property', 'foo', + '--property', + 'iops', + '--property', + 'foo', self.qos_spec.id, ] verifylist = [ @@ -478,7 +451,6 @@ def test_qos_unset_with_properties(self): result = self.cmd.take_action(parsed_args) self.qos_mock.unset_keys.assert_called_with( - self.qos_spec.id, - ['iops', 'foo'] + self.qos_spec.id, ['iops', 'foo'] ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/volume/v2/test_service.py b/openstackclient/tests/unit/volume/v2/test_service.py index e9e39f4181..e230a39a9a 100644 --- a/openstackclient/tests/unit/volume/v2/test_service.py +++ b/openstackclient/tests/unit/volume/v2/test_service.py @@ -12,146 +12,118 @@ # under the License. # +from unittest import mock + +from openstack.block_storage.v2 import service as _service +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes from openstackclient.volume.v2 import service -class TestService(volume_fakes.TestVolume): - - def setUp(self): - super().setUp() - - # Get a shortcut to the ServiceManager Mock - self.service_mock = self.app.client_manager.volume.services - self.service_mock.reset_mock() - - -class TestServiceList(TestService): - - # The service to be listed - services = volume_fakes.create_one_service() - +class TestServiceList(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.service_mock.list.return_value = [self.services] + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.volume_sdk_client.services.return_value = [self.service] - # Get the command object to test self.cmd = service.ListService(self.app, None) def test_service_list(self): arglist = [ - '--host', self.services.host, - '--service', self.services.binary, + '--host', + self.service.host, + '--service', + self.service.binary, ] verifylist = [ - ('host', self.services.host), - ('service', self.services.binary), + ('host', self.service.host), + ('service', self.service.binary), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class Lister in cliff, abstract method take_action() - # returns a tuple containing the column names and an iterable - # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - expected_columns = [ + expected_columns = ( 'Binary', 'Host', 'Zone', 'Status', 'State', 'Updated At', - ] - - # confirming if all expected columns are present in the result. + ) + datalist = ( + ( + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + ), + ) self.assertEqual(expected_columns, columns) - - datalist = (( - self.services.binary, - self.services.host, - self.services.zone, - self.services.status, - self.services.state, - self.services.updated_at, - ), ) - - # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) - - # checking if proper call was made to list services - self.service_mock.list.assert_called_with( - self.services.host, - self.services.binary, + self.volume_sdk_client.services.assert_called_with( + host=self.service.host, + binary=self.service.binary, ) - # checking if prohibited columns are present in output - self.assertNotIn("Disabled Reason", columns) - self.assertNotIn(self.services.disabled_reason, - tuple(data)) - def test_service_list_with_long_option(self): arglist = [ - '--host', self.services.host, - '--service', self.services.binary, - '--long' + '--host', + self.service.host, + '--service', + self.service.binary, + '--long', ] verifylist = [ - ('host', self.services.host), - ('service', self.services.binary), - ('long', True) + ('host', self.service.host), + ('service', self.service.binary), + ('long', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class Lister in cliff, abstract method take_action() - # returns a tuple containing the column names and an iterable - # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) - expected_columns = [ + expected_columns = ( 'Binary', 'Host', 'Zone', 'Status', 'State', 'Updated At', - 'Disabled Reason' - ] - - # confirming if all expected columns are present in the result. + 'Disabled Reason', + ) + datalist = ( + ( + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + self.service.disabled_reason, + ), + ) self.assertEqual(expected_columns, columns) - - datalist = (( - self.services.binary, - self.services.host, - self.services.zone, - self.services.status, - self.services.state, - self.services.updated_at, - self.services.disabled_reason, - ), ) - - # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) - - self.service_mock.list.assert_called_with( - self.services.host, - self.services.binary, + self.volume_sdk_client.services.assert_called_with( + host=self.service.host, + binary=self.service.binary, ) -class TestServiceSet(TestService): - - service = volume_fakes.create_one_service() - +class TestServiceSet(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.service_mock.enable.return_value = self.service - self.service_mock.disable.return_value = self.service - self.service_mock.disable_log_reason.return_value = self.service + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.service.enable = mock.Mock(autospec=True) + self.service.disable = mock.Mock(autospec=True) + self.volume_sdk_client.find_service.return_value = self.service self.cmd = service.SetService(self.app, None) @@ -167,9 +139,8 @@ def test_service_set_nothing(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.service_mock.enable.assert_not_called() - self.service_mock.disable.assert_not_called() - self.service_mock.disable_log_reason.assert_not_called() + self.service.enable.assert_not_called() + self.service.disable.assert_not_called() self.assertIsNone(result) def test_service_set_enable(self): @@ -187,12 +158,8 @@ def test_service_set_enable(self): result = self.cmd.take_action(parsed_args) - self.service_mock.enable.assert_called_with( - self.service.host, - self.service.binary - ) - self.service_mock.disable.assert_not_called() - self.service_mock.disable_log_reason.assert_not_called() + self.service.enable.assert_called_with(self.volume_sdk_client) + self.service.disable.assert_not_called() self.assertIsNone(result) def test_service_set_disable(self): @@ -210,19 +177,18 @@ def test_service_set_disable(self): result = self.cmd.take_action(parsed_args) - self.service_mock.disable.assert_called_with( - self.service.host, - self.service.binary + self.service.enable.assert_not_called() + self.service.disable.assert_called_with( + self.volume_sdk_client, reason=None ) - self.service_mock.enable.assert_not_called() - self.service_mock.disable_log_reason.assert_not_called() self.assertIsNone(result) def test_service_set_disable_with_reason(self): reason = 'earthquake' arglist = [ '--disable', - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -236,17 +202,17 @@ def test_service_set_disable_with_reason(self): result = self.cmd.take_action(parsed_args) - self.service_mock.disable_log_reason.assert_called_with( - self.service.host, - self.service.binary, - reason + self.service.enable.assert_not_called() + self.service.disable.assert_called_with( + self.volume_sdk_client, reason=reason ) self.assertIsNone(result) def test_service_set_only_with_disable_reason(self): reason = 'earthquake' arglist = [ - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -256,18 +222,23 @@ def test_service_set_only_with_disable_reason(self): ('service', self.service.binary), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + try: self.cmd.take_action(parsed_args) self.fail("CommandError should be raised.") except exceptions.CommandError as e: - self.assertEqual("Cannot specify option --disable-reason without " - "--disable specified.", str(e)) + self.assertEqual( + "Cannot specify option --disable-reason without " + "--disable specified.", + str(e), + ) def test_service_set_enable_with_disable_reason(self): reason = 'earthquake' arglist = [ '--enable', - '--disable-reason', reason, + '--disable-reason', + reason, self.service.host, self.service.binary, ] @@ -278,9 +249,13 @@ def test_service_set_enable_with_disable_reason(self): ('service', self.service.binary), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) + try: self.cmd.take_action(parsed_args) self.fail("CommandError should be raised.") except exceptions.CommandError as e: - self.assertEqual("Cannot specify option --disable-reason without " - "--disable specified.", str(e)) + self.assertEqual( + "Cannot specify option --disable-reason without " + "--disable specified.", + str(e), + ) diff --git a/openstackclient/tests/unit/volume/v2/test_volume.py b/openstackclient/tests/unit/volume/v2/test_volume.py index 0419acef18..b68020fa95 100644 --- a/openstackclient/tests/unit/volume/v2/test_volume.py +++ b/openstackclient/tests/unit/volume/v2/test_volume.py @@ -10,682 +10,668 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse from unittest import mock -from unittest.mock import call +import uuid -from cinderclient import api_versions +from openstack.block_storage.v2 import snapshot as _snapshot +from openstack.block_storage.v2 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions from osc_lib import utils +from openstackclient.api import volume_v2 from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit.image.v2 import fakes as image_fakes -from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit import utils as test_utils from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes from openstackclient.volume.v2 import volume class TestVolume(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.volumes_mock = self.app.client_manager.volume.volumes + self.volumes_mock = self.volume_client.volumes self.volumes_mock.reset_mock() - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() - self.users_mock = self.app.client_manager.identity.users + self.users_mock = self.identity_client.users self.users_mock.reset_mock() - self.find_image_mock = self.app.client_manager.image.find_image - self.find_image_mock.reset_mock() - - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots + self.snapshots_mock = self.volume_client.volume_snapshots self.snapshots_mock.reset_mock() - self.backups_mock = self.app.client_manager.volume.backups - self.backups_mock.reset_mock() - - self.types_mock = self.app.client_manager.volume.volume_types + self.types_mock = self.volume_client.volume_types self.types_mock.reset_mock() - self.consistencygroups_mock = ( - self.app.client_manager.volume.consistencygroups) + self.consistencygroups_mock = self.volume_client.consistencygroups self.consistencygroups_mock.reset_mock() - def setup_volumes_mock(self, count): - volumes = volume_fakes.create_volumes(count=count) - - self.volumes_mock.get = volume_fakes.get_volumes(volumes, 0) - return volumes - - -class TestVolumeCreate(TestVolume): - - project = identity_fakes.FakeProject.create_one_project() - user = identity_fakes.FakeUser.create_one_user() +class TestVolumeCreate(volume_fakes.TestVolume): columns = ( 'attachments', 'availability_zone', 'bootable', + 'consistencygroup_id', + 'created_at', 'description', + 'encrypted', 'id', + 'multiattach', 'name', + 'os-vol-host-attr:host', + 'os-vol-mig-status-attr:migstat', + 'os-vol-mig-status-attr:name_id', + 'os-vol-tenant-attr:tenant_id', + 'os-volume-replication:driver_data', + 'os-volume-replication:extended_status', 'properties', + 'replication_status', 'size', 'snapshot_id', + 'source_volid', 'status', 'type', + 'updated_at', + 'user_id', + 'volume_image_metadata', ) def setUp(self): super().setUp() - self.new_volume = volume_fakes.create_one_volume() - self.volumes_mock.create.return_value = self.new_volume + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.create_volume.return_value = self.volume self.datalist = ( - self.new_volume.attachments, - self.new_volume.availability_zone, - self.new_volume.bootable, - self.new_volume.description, - self.new_volume.id, - self.new_volume.name, - format_columns.DictColumn(self.new_volume.metadata), - self.new_volume.size, - self.new_volume.snapshot_id, - self.new_volume.status, - self.new_volume.volume_type, + self.volume.attachments, + self.volume.availability_zone, + self.volume.is_bootable, + self.volume.consistency_group_id, + self.volume.created_at, + self.volume.description, + self.volume.is_encrypted, + self.volume.id, + self.volume.is_multiattach, + self.volume.name, + self.volume.host, + self.volume.migration_status, + self.volume.migration_id, + self.volume.project_id, + self.volume.replication_driver_data, + self.volume.extended_replication_status, + format_columns.DictColumn(self.volume.metadata), + self.volume.replication_status, + self.volume.size, + self.volume.snapshot_id, + self.volume.source_volume_id, + self.volume.status, + self.volume.volume_type, + self.volume.updated_at, + self.volume.user_id, + self.volume.volume_image_metadata, ) - # Get the command object to test self.cmd = volume.CreateVolume(self.app, None) def test_volume_create_min_options(self): arglist = [ - '--size', str(self.new_volume.size), + '--size', + str(self.volume.size), ] verifylist = [ - ('size', self.new_volume.size), + ('size', self.volume.size), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, name=None, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) + self.assertEqual(self.datalist, data) def test_volume_create_options(self): - consistency_group = volume_fakes.create_one_consistency_group() - self.consistencygroups_mock.get.return_value = consistency_group + consistency_group_id = 'cg123' arglist = [ - '--size', str(self.new_volume.size), - '--description', self.new_volume.description, - '--type', self.new_volume.volume_type, - '--availability-zone', self.new_volume.availability_zone, - '--consistency-group', consistency_group.id, - '--hint', 'k=v', - self.new_volume.name, + '--size', + str(self.volume.size), + '--description', + self.volume.description, + '--type', + self.volume.volume_type, + '--availability-zone', + self.volume.availability_zone, + '--consistency-group', + consistency_group_id, + '--hint', + 'k=v', + self.volume.name, ] verifylist = [ - ('size', self.new_volume.size), - ('description', self.new_volume.description), - ('type', self.new_volume.volume_type), - ('availability_zone', self.new_volume.availability_zone), - ('consistency_group', consistency_group.id), + ('size', self.volume.size), + ('description', self.volume.description), + ('type', self.volume.volume_type), + ('availability_zone', self.volume.availability_zone), + ('consistency_group', consistency_group_id), ('hint', {'k': 'v'}), - ('name', self.new_volume.name), + ('name', self.volume.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) + with mock.patch.object( + volume_v2, + 'find_consistency_group', + return_value={'id': consistency_group_id}, + ) as mock_find_cg: + columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, - description=self.new_volume.description, - volume_type=self.new_volume.volume_type, - availability_zone=self.new_volume.availability_zone, + name=self.volume.name, + description=self.volume.description, + volume_type=self.volume.volume_type, + availability_zone=self.volume.availability_zone, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=consistency_group.id, + image_id=None, + source_volume_id=None, + consistency_group_id=consistency_group_id, scheduler_hints={'k': 'v'}, - backup_id=None, + ) + mock_find_cg.assert_called_once_with( + self.volume_sdk_client, consistency_group_id ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) + self.assertEqual(self.datalist, data) def test_volume_create_properties(self): arglist = [ - '--property', 'Alpha=a', - '--property', 'Beta=b', - '--size', str(self.new_volume.size), - self.new_volume.name, + '--property', + 'Alpha=a', + '--property', + 'Beta=b', + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ - ('property', {'Alpha': 'a', 'Beta': 'b'}), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('properties', {'Alpha': 'a', 'Beta': 'b'}), + ('size', self.volume.size), + ('name', self.volume.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata={'Alpha': 'a', 'Beta': 'b'}, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) + self.assertEqual(self.datalist, data) - def test_volume_create_image_id(self): + def test_volume_create_image(self): image = image_fakes.create_one_image() - self.find_image_mock.return_value = image + self.image_client.find_image.return_value = image arglist = [ - '--image', image.id, - '--size', str(self.new_volume.size), - self.new_volume.name, + '--image', + image.id, + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ ('image', image.id), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('size', self.volume.size), + ('name', self.volume.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=image.id, - source_volid=None, - consistencygroup_id=None, + image_id=image.id, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_image_name(self): - image = image_fakes.create_one_image() - self.find_image_mock.return_value = image - - arglist = [ - '--image', image.name, - '--size', str(self.new_volume.size), - self.new_volume.name, - ] - verifylist = [ - ('image', image.name), - ('size', self.new_volume.size), - ('name', self.new_volume.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, - snapshot_id=None, - name=self.new_volume.name, - description=None, - volume_type=None, - availability_zone=None, - metadata=None, - imageRef=image.id, - source_volid=None, - consistencygroup_id=None, - scheduler_hints=None, - backup_id=None, + self.image_client.find_image.assert_called_once_with( + image.id, ignore_missing=False ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) + self.assertEqual(self.datalist, data) def test_volume_create_with_snapshot(self): - snapshot = volume_fakes.create_one_snapshot() - self.new_volume.snapshot_id = snapshot.id + snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + self.volume_sdk_client.find_snapshot.return_value = snapshot + arglist = [ - '--snapshot', self.new_volume.snapshot_id, - self.new_volume.name, + '--snapshot', + snapshot.id, + self.volume.name, ] verifylist = [ - ('snapshot', self.new_volume.snapshot_id), - ('name', self.new_volume.name), + ('snapshot', snapshot.id), + ('name', self.volume.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.snapshots_mock.get.return_value = snapshot - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_once_with( + self.volume_sdk_client.create_volume.assert_called_with( size=snapshot.size, snapshot_id=snapshot.id, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, ) - - self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_with_backup(self): - backup = volume_fakes.create_one_backup() - self.new_volume.backup_id = backup.id - arglist = [ - '--backup', self.new_volume.backup_id, - self.new_volume.name, - ] - verifylist = [ - ('backup', self.new_volume.backup_id), - ('name', self.new_volume.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.backups_mock.get.return_value = backup - - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.47') - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. - columns, data = self.cmd.take_action(parsed_args) - - self.volumes_mock.create.assert_called_once_with( - size=backup.size, - snapshot_id=None, - name=self.new_volume.name, - description=None, - volume_type=None, - availability_zone=None, - metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, - scheduler_hints=None, - backup_id=backup.id, + self.volume_sdk_client.find_snapshot.assert_called_once_with( + snapshot.id, ignore_missing=False ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - - def test_volume_create_with_backup_pre_347(self): - backup = volume_fakes.create_one_backup() - self.new_volume.backup_id = backup.id - arglist = [ - '--backup', self.new_volume.backup_id, - self.new_volume.name, - ] - verifylist = [ - ('backup', self.new_volume.backup_id), - ('name', self.new_volume.name), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - self.backups_mock.get.return_value = backup - - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.47 or greater", str(exc)) + self.assertEqual(self.datalist, data) def test_volume_create_with_source_volume(self): - source_vol = "source_vol" + source_volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = source_volume + arglist = [ - '--source', self.new_volume.id, - source_vol, + '--source', + source_volume.id, + self.volume.name, ] verifylist = [ - ('source', self.new_volume.id), - ('name', source_vol), + ('source', source_volume.id), + ('name', self.volume.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.volumes_mock.get.return_value = self.new_volume - - # In base command class ShowOne in cliff, abstract method take_action() - # returns a two-part tuple with a tuple of column names and a tuple of - # data to be shown. columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_once_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=source_volume.size, snapshot_id=None, - name=source_vol, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=self.new_volume.id, - consistencygroup_id=None, + image_id=None, + source_volume_id=source_volume.id, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, + ) + self.volume_sdk_client.find_volume.assert_called_once_with( + source_volume.id, ignore_missing=False ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) + self.assertEqual(self.datalist, data) @mock.patch.object(utils, 'wait_for_status', return_value=True) def test_volume_create_with_bootable_and_readonly(self, mock_wait): arglist = [ '--bootable', '--read-only', - '--size', str(self.new_volume.size), - self.new_volume.name, + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ ('bootable', True), - ('non_bootable', False), ('read_only', True), - ('read_write', False), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('size', self.volume.size), + ('name', self.volume.name), ] - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, + ) + self.volume_sdk_client.set_volume_bootable_status.assert_called_once_with( + self.volume, True + ) + self.volume_sdk_client.set_volume_readonly.assert_called_once_with( + self.volume, True ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, True) - self.volumes_mock.update_readonly_flag.assert_called_with( - self.new_volume.id, True) + self.assertEqual(self.datalist, data) @mock.patch.object(utils, 'wait_for_status', return_value=True) def test_volume_create_with_nonbootable_and_readwrite(self, mock_wait): arglist = [ '--non-bootable', '--read-write', - '--size', str(self.new_volume.size), - self.new_volume.name, + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ ('bootable', False), - ('non_bootable', True), ('read_only', False), - ('read_write', True), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('size', self.volume.size), + ('name', self.volume.name), ] - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, + ) + self.volume_sdk_client.set_volume_bootable_status.assert_called_once_with( + self.volume, False + ) + self.volume_sdk_client.set_volume_readonly.assert_called_once_with( + self.volume, False ) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, False) - self.volumes_mock.update_readonly_flag.assert_called_with( - self.new_volume.id, False) + self.assertEqual(self.datalist, data) @mock.patch.object(volume.LOG, 'error') @mock.patch.object(utils, 'wait_for_status', return_value=True) def test_volume_create_with_bootable_and_readonly_fail( - self, mock_wait, mock_error): - - self.volumes_mock.set_bootable.side_effect = ( - exceptions.CommandError()) - - self.volumes_mock.update_readonly_flag.side_effect = ( - exceptions.CommandError()) + self, mock_wait, mock_error + ): + self.volume_sdk_client.set_volume_bootable_status.side_effect = ( + sdk_exceptions.NotFoundException('foo') + ) + self.volume_sdk_client.set_volume_readonly.side_effect = ( + sdk_exceptions.NotFoundException('foo') + ) arglist = [ '--bootable', '--read-only', - '--size', str(self.new_volume.size), - self.new_volume.name, + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ ('bootable', True), - ('non_bootable', False), ('read_only', True), - ('read_write', False), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('size', self.volume.size), + ('name', self.volume.name), ] - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, + ) + self.volume_sdk_client.set_volume_bootable_status.assert_called_once_with( + self.volume, True + ) + self.volume_sdk_client.set_volume_readonly.assert_called_once_with( + self.volume, True ) self.assertEqual(2, mock_error.call_count) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, True) - self.volumes_mock.update_readonly_flag.assert_called_with( - self.new_volume.id, True) + self.assertEqual(self.datalist, data) @mock.patch.object(volume.LOG, 'error') @mock.patch.object(utils, 'wait_for_status', return_value=False) def test_volume_create_non_available_with_readonly( - self, mock_wait, mock_error, + self, mock_wait, mock_error ): arglist = [ '--non-bootable', '--read-only', - '--size', str(self.new_volume.size), - self.new_volume.name, + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ ('bootable', False), - ('non_bootable', True), ('read_only', True), - ('read_write', False), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('size', self.volume.size), + ('name', self.volume.name), ] - parsed_args = self.check_parser( - self.cmd, arglist, verifylist) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.create.assert_called_with( - size=self.new_volume.size, + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, snapshot_id=None, - name=self.new_volume.name, + name=self.volume.name, description=None, volume_type=None, availability_zone=None, metadata=None, - imageRef=None, - source_volid=None, - consistencygroup_id=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, scheduler_hints=None, - backup_id=None, ) self.assertEqual(2, mock_error.call_count) self.assertEqual(self.columns, columns) - self.assertCountEqual(self.datalist, data) + self.assertEqual(self.datalist, data) def test_volume_create_without_size(self): arglist = [ - self.new_volume.name, + self.volume.name, ] verifylist = [ - ('name', self.new_volume.name), + ('name', self.volume.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) def test_volume_create_with_multi_source(self): arglist = [ - '--image', 'source_image', - '--source', 'source_volume', - '--snapshot', 'source_snapshot', - '--size', str(self.new_volume.size), - self.new_volume.name, + '--image', + 'source_image', + '--source', + 'source_volume', + '--snapshot', + 'source_snapshot', + '--size', + str(self.volume.size), + self.volume.name, ] verifylist = [ ('image', 'source_image'), ('source', 'source_volume'), ('snapshot', 'source_snapshot'), - ('size', self.new_volume.size), - ('name', self.new_volume.name), + ('size', self.volume.size), + ('name', self.volume.name), + ] + + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + def test_volume_create_hints(self): + """--hint needs to behave differently based on the given hint + + different_host and same_host need to append to a list if given multiple + times. All other parameter are strings. + """ + arglist = [ + '--size', + str(self.volume.size), + '--hint', + 'k=v', + '--hint', + 'k=v2', + '--hint', + 'same_host=v3', + '--hint', + 'same_host=v4', + '--hint', + 'different_host=v5', + '--hint', + 'local_to_instance=v6', + '--hint', + 'different_host=v7', + self.volume.name, ] + verifylist = [ + ('size', self.volume.size), + ( + 'hint', + { + 'k': 'v2', + 'same_host': ['v3', 'v4'], + 'local_to_instance': 'v6', + 'different_host': ['v5', 'v7'], + }, + ), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints={ + 'k': 'v2', + 'same_host': ['v3', 'v4'], + 'local_to_instance': 'v6', + 'different_host': ['v5', 'v7'], + }, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) -class TestVolumeDelete(TestVolume): +class TestVolumeDelete(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.volumes_mock.delete.return_value = None + self.volumes = list(sdk_fakes.generate_fake_resources(_volume.Volume)) + self.volume_sdk_client.find_volume.side_effect = self.volumes + self.volume_sdk_client.delete_volume.return_value = None - # Get the command object to mock self.cmd = volume.DeleteVolume(self.app, None) def test_volume_delete_one_volume(self): - volumes = self.setup_volumes_mock(count=1) - - arglist = [ - volumes[0].id - ] + arglist = [self.volumes[0].id] verifylist = [ ("force", False), ("purge", False), - ("volumes", [volumes[0].id]), + ("volumes", [self.volumes[0].id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - self.volumes_mock.delete.assert_called_once_with( - volumes[0].id, cascade=False) self.assertIsNone(result) - def test_volume_delete_multi_volumes(self): - volumes = self.setup_volumes_mock(count=3) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_called_once_with( + self.volumes[0].id, cascade=False, force=False + ) - arglist = [v.id for v in volumes] + def test_volume_delete_multi_volumes(self): + arglist = [v.id for v in self.volumes] verifylist = [ ('force', False), ('purge', False), @@ -694,84 +680,97 @@ def test_volume_delete_multi_volumes(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - calls = [call(v.id, cascade=False) for v in volumes] - self.volumes_mock.delete.assert_has_calls(calls) self.assertIsNone(result) + self.volume_sdk_client.find_volume.assert_has_calls( + [mock.call(v.id, ignore_missing=False) for v in self.volumes] + ) + self.volume_sdk_client.delete_volume.assert_has_calls( + [mock.call(v.id, cascade=False, force=False) for v in self.volumes] + ) + def test_volume_delete_multi_volumes_with_exception(self): - volumes = self.setup_volumes_mock(count=2) + self.volume_sdk_client.find_volume.side_effect = [ + self.volumes[0], + sdk_exceptions.NotFoundException(), + ] arglist = [ - volumes[0].id, + self.volumes[0].id, 'unexist_volume', ] verifylist = [ ('force', False), ('purge', False), - ('volumes', arglist), + ('volumes', [self.volumes[0].id, 'unexist_volume']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [volumes[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 volumes failed to delete.', - str(e)) - - find_mock.assert_any_call(self.volumes_mock, volumes[0].id) - find_mock.assert_any_call(self.volumes_mock, 'unexist_volume') + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 2 volumes failed to delete.', str(exc)) - self.assertEqual(2, find_mock.call_count) - self.volumes_mock.delete.assert_called_once_with( - volumes[0].id, cascade=False) + self.volume_sdk_client.find_volume.assert_has_calls( + [ + mock.call(self.volumes[0].id, ignore_missing=False), + mock.call('unexist_volume', ignore_missing=False), + ] + ) + self.volume_sdk_client.delete_volume.assert_has_calls( + [ + mock.call(self.volumes[0].id, cascade=False, force=False), + ] + ) def test_volume_delete_with_purge(self): - volumes = self.setup_volumes_mock(count=1) - arglist = [ '--purge', - volumes[0].id, + self.volumes[0].id, ] verifylist = [ ('force', False), ('purge', True), - ('volumes', [volumes[0].id]), + ('volumes', [self.volumes[0].id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - self.volumes_mock.delete.assert_called_once_with( - volumes[0].id, cascade=True) self.assertIsNone(result) - def test_volume_delete_with_force(self): - volumes = self.setup_volumes_mock(count=1) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_called_once_with( + self.volumes[0].id, cascade=True, force=False + ) + def test_volume_delete_with_force(self): arglist = [ '--force', - volumes[0].id, + self.volumes[0].id, ] verifylist = [ ('force', True), ('purge', False), - ('volumes', [volumes[0].id]), + ('volumes', [self.volumes[0].id]), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - self.volumes_mock.force_delete.assert_called_once_with(volumes[0].id) self.assertIsNone(result) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_called_once_with( + self.volumes[0].id, cascade=False, force=True + ) -class TestVolumeList(TestVolume): +class TestVolumeList(TestVolume): project = identity_fakes.FakeProject.create_one_project() user = identity_fakes.FakeUser.create_one_user() @@ -825,18 +824,21 @@ def test_volume_list_no_options(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_project(self): arglist = [ - '--project', self.project.name, + '--project', + self.project.name, ] verifylist = [ ('project', self.project.name), @@ -865,19 +867,23 @@ def test_volume_list_project(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_project_domain(self): arglist = [ - '--project', self.project.name, - '--project-domain', self.project.domain_id, + '--project', + self.project.name, + '--project-domain', + self.project.domain_id, ] verifylist = [ ('project', self.project.name), @@ -907,18 +913,21 @@ def test_volume_list_project_domain(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_user(self): arglist = [ - '--user', self.user.name, + '--user', + self.user.name, ] verifylist = [ ('user', self.user.name), @@ -946,19 +955,23 @@ def test_volume_list_user(self): ) self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_user_domain(self): arglist = [ - '--user', self.user.name, - '--user-domain', self.user.domain_id, + '--user', + self.user.name, + '--user-domain', + self.user.domain_id, ] verifylist = [ ('user', self.user.name), @@ -988,18 +1001,21 @@ def test_volume_list_user_domain(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_name(self): arglist = [ - '--name', self.mock_volume.name, + '--name', + self.mock_volume.name, ] verifylist = [ ('long', False), @@ -1028,18 +1044,21 @@ def test_volume_list_name(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_status(self): arglist = [ - '--status', self.mock_volume.status, + '--status', + self.mock_volume.status, ] verifylist = [ ('long', False), @@ -1068,13 +1087,15 @@ def test_volume_list_status(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_all_projects(self): @@ -1108,13 +1129,15 @@ def test_volume_list_all_projects(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_long(self): @@ -1159,22 +1182,26 @@ def test_volume_list_long(self): ] self.assertEqual(collist, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - self.mock_volume.volume_type, - self.mock_volume.bootable, - volume.AttachmentsColumn(self.mock_volume.attachments), - format_columns.DictColumn(self.mock_volume.metadata), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + self.mock_volume.volume_type, + self.mock_volume.bootable, + volume.AttachmentsColumn(self.mock_volume.attachments), + format_columns.DictColumn(self.mock_volume.metadata), + ), + ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_with_marker_and_limit(self): arglist = [ - "--marker", self.mock_volume.id, - "--limit", "2", + "--marker", + self.mock_volume.id, + "--limit", + "2", ] verifylist = [ ('long', False), @@ -1190,13 +1217,15 @@ def test_volume_list_with_marker_and_limit(self): self.assertEqual(self.columns, columns) - datalist = (( - self.mock_volume.id, - self.mock_volume.name, - self.mock_volume.status, - self.mock_volume.size, - volume.AttachmentsColumn(self.mock_volume.attachments), - ), ) + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) self.volumes_mock.list.assert_called_once_with( marker=self.mock_volume.id, @@ -1206,23 +1235,31 @@ def test_volume_list_with_marker_and_limit(self): 'project_id': None, 'user_id': None, 'name': None, - 'all_tenants': False, } + 'all_tenants': False, + }, ) self.assertCountEqual(datalist, tuple(data)) def test_volume_list_negative_limit(self): arglist = [ - "--limit", "-2", + "--limit", + "-2", ] verifylist = [ ("limit", -2), ] - self.assertRaises(argparse.ArgumentTypeError, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_volume_list_backward_compatibility(self): arglist = [ - '-c', 'Display Name', + '-c', + 'Display Name', ] verifylist = [ ('columns', ['Display Name']), @@ -1257,74 +1294,95 @@ def test_volume_list_backward_compatibility(self): self.assertIn(self.mock_volume.name, each_volume) -class TestVolumeMigrate(TestVolume): - - _volume = volume_fakes.create_one_volume() - +class TestVolumeMigrate(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.volumes_mock.get.return_value = self._volume - self.volumes_mock.migrate_volume.return_value = None - # Get the command object to test + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.migrate_volume.return_value = None + self.cmd = volume.MigrateVolume(self.app, None) def test_volume_migrate(self): arglist = [ - "--host", "host@backend-name#pool", - self._volume.id, + "--host", + "host@backend-name#pool", + self.volume.id, ] verifylist = [ ("force_host_copy", False), ("lock_volume", False), ("host", "host@backend-name#pool"), - ("volume", self._volume.id), + ("volume", self.volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_once_with(self._volume.id) - self.volumes_mock.migrate_volume.assert_called_once_with( - self._volume.id, "host@backend-name#pool", False, False) self.assertIsNone(result) + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, ignore_missing=False + ) + self.volume_sdk_client.migrate_volume.assert_called_once_with( + self.volume.id, + host="host@backend-name#pool", + force_host_copy=False, + lock_volume=False, + ) + def test_volume_migrate_with_option(self): arglist = [ "--force-host-copy", "--lock-volume", - "--host", "host@backend-name#pool", - self._volume.id, + "--host", + "host@backend-name#pool", + self.volume.id, ] verifylist = [ ("force_host_copy", True), ("lock_volume", True), ("host", "host@backend-name#pool"), - ("volume", self._volume.id), + ("volume", self.volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_once_with(self._volume.id) - self.volumes_mock.migrate_volume.assert_called_once_with( - self._volume.id, "host@backend-name#pool", True, True) self.assertIsNone(result) + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, ignore_missing=False + ) + self.volume_sdk_client.migrate_volume.assert_called_once_with( + self.volume.id, + host="host@backend-name#pool", + force_host_copy=True, + lock_volume=True, + ) + def test_volume_migrate_without_host(self): arglist = [ - self._volume.id, + self.volume.id, ] verifylist = [ ("force_host_copy", False), ("lock_volume", False), - ("volume", self._volume.id), + ("volume", self.volume.id), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + self.volume_sdk_client.find_volume.assert_not_called() + self.volume_sdk_client.migrate_volume.assert_not_called() -class TestVolumeSet(TestVolume): +class TestVolumeSet(TestVolume): volume_type = volume_fakes.create_one_volume_type() def setUp(self): @@ -1339,33 +1397,38 @@ def setUp(self): def test_volume_set_property(self): arglist = [ - '--property', 'a=b', - '--property', 'c=d', + '--property', + 'a=b', + '--property', + 'c=d', self.new_volume.id, ] verifylist = [ - ('property', {'a': 'b', 'c': 'd'}), + ('properties', {'a': 'b', 'c': 'd'}), ('volume', self.new_volume.id), - ('bootable', False), - ('non_bootable', False) + ('bootable', None), + ('read_only', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) self.volumes_mock.set_metadata.assert_called_with( - self.new_volume.id, parsed_args.property) + self.new_volume.id, parsed_args.properties + ) def test_volume_set_image_property(self): arglist = [ - '--image-property', 'Alpha=a', - '--image-property', 'Beta=b', + '--image-property', + 'Alpha=a', + '--image-property', + 'Beta=b', self.new_volume.id, ] verifylist = [ - ('image_property', {'Alpha': 'a', 'Beta': 'b'}), + ('image_properties', {'Alpha': 'a', 'Beta': 'b'}), ('volume', self.new_volume.id), - ('bootable', False), - ('non_bootable', False) + ('bootable', None), + ('read_only', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -1373,243 +1436,274 @@ def test_volume_set_image_property(self): # returns nothing self.cmd.take_action(parsed_args) self.volumes_mock.set_image_metadata.assert_called_with( - self.new_volume.id, parsed_args.image_property) + self.new_volume.id, parsed_args.image_properties + ) def test_volume_set_state(self): - arglist = [ - '--state', 'error', - self.new_volume.id - ] + arglist = ['--state', 'error', self.new_volume.id] verifylist = [ - ('read_only', False), - ('read_write', False), + ('read_only', None), ('state', 'error'), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.reset_state.assert_called_with( - self.new_volume.id, 'error') + self.new_volume.id, 'error' + ) self.volumes_mock.update_readonly_flag.assert_not_called() self.assertIsNone(result) def test_volume_set_state_failed(self): self.volumes_mock.reset_state.side_effect = exceptions.CommandError() - arglist = [ - '--state', 'error', - self.new_volume.id - ] - verifylist = [ - ('state', 'error'), - ('volume', self.new_volume.id) - ] + arglist = ['--state', 'error', self.new_volume.id] + verifylist = [('state', 'error'), ('volume', self.new_volume.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('One or more of the set operations failed', - str(e)) + self.assertEqual( + 'One or more of the set operations failed', str(e) + ) self.volumes_mock.reset_state.assert_called_with( - self.new_volume.id, 'error') + self.new_volume.id, 'error' + ) def test_volume_set_attached(self): - arglist = [ - '--attached', - self.new_volume.id - ] + arglist = ['--attached', self.new_volume.id] verifylist = [ ('attached', True), ('detached', False), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.reset_state.assert_called_with( - self.new_volume.id, attach_status='attached', state=None) + self.new_volume.id, attach_status='attached', state=None + ) self.assertIsNone(result) def test_volume_set_detached(self): - arglist = [ - '--detached', - self.new_volume.id - ] + arglist = ['--detached', self.new_volume.id] verifylist = [ ('attached', False), ('detached', True), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.reset_state.assert_called_with( - self.new_volume.id, attach_status='detached', state=None) + self.new_volume.id, attach_status='detached', state=None + ) self.assertIsNone(result) def test_volume_set_bootable(self): arglist = [ - ['--bootable', self.new_volume.id], - ['--non-bootable', self.new_volume.id] + '--bootable', + self.new_volume.id, ] verifylist = [ - [ - ('bootable', True), - ('non_bootable', False), - ('volume', self.new_volume.id) - ], - [ - ('bootable', False), - ('non_bootable', True), - ('volume', self.new_volume.id) - ] + ('bootable', True), + ('volume', self.new_volume.id), ] - for index in range(len(arglist)): - parsed_args = self.check_parser( - self.cmd, arglist[index], verifylist[index]) + parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) - self.volumes_mock.set_bootable.assert_called_with( - self.new_volume.id, verifylist[index][0][1]) + self.cmd.take_action(parsed_args) + self.volumes_mock.set_bootable.assert_called_with( + self.new_volume.id, verifylist[0][1] + ) - def test_volume_set_readonly(self): + def test_volume_set_non_bootable(self): arglist = [ - '--read-only', - self.new_volume.id + '--non-bootable', + self.new_volume.id, ] + verifylist = [ + ('bootable', False), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + self.volumes_mock.set_bootable.assert_called_with( + self.new_volume.id, verifylist[0][1] + ) + + def test_volume_set_read_only(self): + arglist = ['--read-only', self.new_volume.id] verifylist = [ ('read_only', True), - ('read_write', False), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.update_readonly_flag.assert_called_once_with( - self.new_volume.id, - True) + self.new_volume.id, True + ) self.assertIsNone(result) def test_volume_set_read_write(self): - arglist = [ - '--read-write', - self.new_volume.id - ] + arglist = ['--read-write', self.new_volume.id] verifylist = [ ('read_only', False), - ('read_write', True), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.update_readonly_flag.assert_called_once_with( - self.new_volume.id, - False) + self.new_volume.id, False + ) self.assertIsNone(result) def test_volume_set_type(self): - arglist = [ - '--type', self.volume_type.id, - self.new_volume.id - ] + arglist = ['--type', self.volume_type.id, self.new_volume.id] verifylist = [ ('retype_policy', None), ('type', self.volume_type.id), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.retype.assert_called_once_with( - self.new_volume.id, - self.volume_type.id, - 'never') + self.new_volume.id, self.volume_type.id, 'never' + ) self.assertIsNone(result) def test_volume_set_type_with_policy(self): arglist = [ - '--retype-policy', 'on-demand', - '--type', self.volume_type.id, - self.new_volume.id + '--retype-policy', + 'on-demand', + '--type', + self.volume_type.id, + self.new_volume.id, ] verifylist = [ ('retype_policy', 'on-demand'), ('type', self.volume_type.id), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.retype.assert_called_once_with( - self.new_volume.id, - self.volume_type.id, - 'on-demand') + self.new_volume.id, self.volume_type.id, 'on-demand' + ) self.assertIsNone(result) @mock.patch.object(volume.LOG, 'warning') def test_volume_set_with_only_retype_policy(self, mock_warning): - arglist = [ - '--retype-policy', 'on-demand', - self.new_volume.id - ] + arglist = ['--retype-policy', 'on-demand', self.new_volume.id] verifylist = [ ('retype_policy', 'on-demand'), - ('volume', self.new_volume.id) + ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.volumes_mock.retype.assert_not_called() - mock_warning.assert_called_with("'--retype-policy' option will " - "not work without '--type' option") + mock_warning.assert_called_with( + "'--retype-policy' option will not work without '--type' option" + ) self.assertIsNone(result) -class TestVolumeShow(TestVolume): - +class TestVolumeShow(volume_fakes.TestVolume): def setUp(self): super().setUp() - self._volume = volume_fakes.create_one_volume() - self.volumes_mock.get.return_value = self._volume - # Get the command object to test + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + + self.columns = ( + 'attachments', + 'availability_zone', + 'bootable', + 'consistencygroup_id', + 'created_at', + 'description', + 'encrypted', + 'id', + 'multiattach', + 'name', + 'os-vol-host-attr:host', + 'os-vol-mig-status-attr:migstat', + 'os-vol-mig-status-attr:name_id', + 'os-vol-tenant-attr:tenant_id', + 'os-volume-replication:driver_data', + 'os-volume-replication:extended_status', + 'properties', + 'replication_status', + 'size', + 'snapshot_id', + 'source_volid', + 'status', + 'type', + 'updated_at', + 'user_id', + 'volume_image_metadata', + ) + self.data = ( + self.volume.attachments, + self.volume.availability_zone, + self.volume.is_bootable, + self.volume.consistency_group_id, + self.volume.created_at, + self.volume.description, + self.volume.is_encrypted, + self.volume.id, + self.volume.is_multiattach, + self.volume.name, + self.volume.host, + self.volume.migration_status, + self.volume.migration_id, + self.volume.project_id, + self.volume.replication_driver_data, + self.volume.extended_replication_status, + format_columns.DictColumn(self.volume.metadata), + self.volume.replication_status, + self.volume.size, + self.volume.snapshot_id, + self.volume.source_volume_id, + self.volume.status, + self.volume.volume_type, + self.volume.updated_at, + self.volume.user_id, + self.volume.volume_image_metadata, + ) + self.cmd = volume.ShowVolume(self.app, None) def test_volume_show(self): - arglist = [ - self._volume.id - ] - verifylist = [ - ("volume", self._volume.id) - ] + arglist = [self.volume.id] + verifylist = [("volume", self.volume.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_with(self._volume.id) - self.assertEqual( - volume_fakes.get_volume_columns(self._volume), - columns, - ) - self.assertCountEqual( - volume_fakes.get_volume_data(self._volume), - data, + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, ignore_missing=False ) class TestVolumeUnset(TestVolume): - def setUp(self): super().setUp() @@ -1623,15 +1717,16 @@ def setUp(self): self.cmd_unset = volume.UnsetVolume(self.app, None) def test_volume_unset_image_property(self): - # Arguments for setting image properties arglist = [ - '--image-property', 'Alpha=a', - '--image-property', 'Beta=b', + '--image-property', + 'Alpha=a', + '--image-property', + 'Beta=b', self.new_volume.id, ] verifylist = [ - ('image_property', {'Alpha': 'a', 'Beta': 'b'}), + ('image_properties', {'Alpha': 'a', 'Beta': 'b'}), ('volume', self.new_volume.id), ] parsed_args = self.check_parser(self.cmd_set, arglist, verifylist) @@ -1642,75 +1737,100 @@ def test_volume_unset_image_property(self): # Arguments for unsetting image properties arglist_unset = [ - '--image-property', 'Alpha', + '--image-property', + 'Alpha', self.new_volume.id, ] verifylist_unset = [ - ('image_property', ['Alpha']), + ('image_properties', ['Alpha']), ('volume', self.new_volume.id), ] - parsed_args_unset = self.check_parser(self.cmd_unset, - arglist_unset, - verifylist_unset) + parsed_args_unset = self.check_parser( + self.cmd_unset, arglist_unset, verifylist_unset + ) # In base command class ShowOne in cliff, abstract method take_action() # returns nothing self.cmd_unset.take_action(parsed_args_unset) self.volumes_mock.delete_image_metadata.assert_called_with( - self.new_volume.id, parsed_args_unset.image_property) + self.new_volume.id, parsed_args_unset.image_properties + ) def test_volume_unset_image_property_fail(self): self.volumes_mock.delete_image_metadata.side_effect = ( - exceptions.CommandError()) + exceptions.CommandError() + ) arglist = [ - '--image-property', 'Alpha', - '--property', 'Beta', + '--image-property', + 'Alpha', + '--property', + 'Beta', self.new_volume.id, ] verifylist = [ - ('image_property', ['Alpha']), - ('property', ['Beta']), + ('image_properties', ['Alpha']), + ('properties', ['Beta']), ('volume', self.new_volume.id), ] - parsed_args = self.check_parser( - self.cmd_unset, arglist, verifylist) + parsed_args = self.check_parser(self.cmd_unset, arglist, verifylist) try: self.cmd_unset.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('One or more of the unset operations failed', - str(e)) + self.assertEqual( + 'One or more of the unset operations failed', str(e) + ) self.volumes_mock.delete_image_metadata.assert_called_with( - self.new_volume.id, parsed_args.image_property) + self.new_volume.id, parsed_args.image_properties + ) self.volumes_mock.delete_metadata.assert_called_with( - self.new_volume.id, parsed_args.property) - + self.new_volume.id, parsed_args.properties + ) -class TestColumns(TestVolume): +class TestColumns(volume_fakes.TestVolume): def test_attachments_column_without_server_cache(self): - _volume = volume_fakes.create_one_volume() - server_id = _volume.attachments[0]['server_id'] - device = _volume.attachments[0]['device'] + vol = sdk_fakes.generate_fake_resource( + _volume.Volume, + attachments=[ + { + 'device': '/dev/' + uuid.uuid4().hex, + 'server_id': uuid.uuid4().hex, + }, + ], + ) + server_id = vol.attachments[0]['server_id'] + device = vol.attachments[0]['device'] - col = volume.AttachmentsColumn(_volume.attachments, {}) - self.assertEqual('Attached to %s on %s ' % (server_id, device), - col.human_readable()) - self.assertEqual(_volume.attachments, col.machine_readable()) + col = volume.AttachmentsColumn(vol.attachments, {}) + self.assertEqual( + f'Attached to {server_id} on {device} ', + col.human_readable(), + ) + self.assertEqual(vol.attachments, col.machine_readable()) def test_attachments_column_with_server_cache(self): - _volume = volume_fakes.create_one_volume() + vol = sdk_fakes.generate_fake_resource( + _volume.Volume, + attachments=[ + { + 'device': '/dev/' + uuid.uuid4().hex, + 'server_id': uuid.uuid4().hex, + }, + ], + ) - server_id = _volume.attachments[0]['server_id'] - device = _volume.attachments[0]['device'] + server_id = vol.attachments[0]['server_id'] + device = vol.attachments[0]['device'] fake_server = mock.Mock() fake_server.name = 'fake-server-name' server_cache = {server_id: fake_server} - col = volume.AttachmentsColumn(_volume.attachments, server_cache) + col = volume.AttachmentsColumn(vol.attachments, server_cache) self.assertEqual( - 'Attached to %s on %s ' % ('fake-server-name', device), - col.human_readable()) - self.assertEqual(_volume.attachments, col.machine_readable()) + 'Attached to {} on {} '.format('fake-server-name', device), + col.human_readable(), + ) + self.assertEqual(vol.attachments, col.machine_readable()) diff --git a/openstackclient/tests/unit/volume/v2/test_volume_backend.py b/openstackclient/tests/unit/volume/v2/test_volume_backend.py index 6c64f64558..8a11f01c53 100644 --- a/openstackclient/tests/unit/volume/v2/test_volume_backend.py +++ b/openstackclient/tests/unit/volume/v2/test_volume_backend.py @@ -12,6 +12,8 @@ # under the License. # +from osc_lib.cli import format_columns + from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes from openstackclient.volume.v2 import volume_backend @@ -25,9 +27,8 @@ class TestShowVolumeCapability(volume_fakes.TestVolume): def setUp(self): super().setUp() - # Get a shortcut to the capability Mock - self.capability_mock = self.app.client_manager.volume.capabilities - self.capability_mock.get.return_value = self.capability + # Assign return value to capabilities mock + self.volume_sdk_client.get_capabilities.return_value = self.capability # Get the command object to test self.cmd = volume_backend.ShowCapability(self.app, None) @@ -68,7 +69,7 @@ def test_capability_show(self): self.assertIn(cap[0], capabilities) # checking if proper call was made to get capabilities - self.capability_mock.get.assert_called_with( + self.volume_sdk_client.get_capabilities.assert_called_with( 'fake', ) @@ -82,8 +83,7 @@ class TestListVolumePool(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.pool_mock = self.app.client_manager.volume.pools - self.pool_mock.list.return_value = [self.pools] + self.volume_sdk_client.backend_pools.return_value = [self.pools] # Get the command object to test self.cmd = volume_backend.ListPool(self.app, None) @@ -105,15 +105,13 @@ def test_pool_list(self): # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) - datalist = (( - self.pools.name, - ), ) + datalist = ((self.pools.name,),) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list pools - self.pool_mock.list.assert_called_with( + self.volume_sdk_client.backend_pools.assert_called_with( detailed=False, ) @@ -122,12 +120,8 @@ def test_pool_list(self): self.assertNotIn("storage_protocol", columns) def test_service_list_with_long_option(self): - arglist = [ - '--long' - ] - verifylist = [ - ('long', True) - ] + arglist = ['--long'] + verifylist = [('long', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() @@ -137,32 +131,22 @@ def test_service_list_with_long_option(self): expected_columns = [ 'Name', - 'Protocol', - 'Thick', - 'Thin', - 'Volumes', - 'Capacity', - 'Allocated', - 'Max Over Ratio', + 'Capabilities', ] # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) - datalist = (( - self.pools.name, - self.pools.storage_protocol, - self.pools.thick_provisioning_support, - self.pools.thin_provisioning_support, - self.pools.total_volumes, - self.pools.total_capacity_gb, - self.pools.allocated_capacity_gb, - self.pools.max_over_subscription_ratio, - ), ) + datalist = ( + ( + self.pools.name, + format_columns.DictColumn(self.pools.capabilities), + ), + ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) - self.pool_mock.list.assert_called_with( + self.volume_sdk_client.backend_pools.assert_called_with( detailed=True, ) diff --git a/openstackclient/tests/unit/volume/v2/test_volume_backup.py b/openstackclient/tests/unit/volume/v2/test_volume_backup.py index 7d00b8bf81..e7bbb69999 100644 --- a/openstackclient/tests/unit/volume/v2/test_volume_backup.py +++ b/openstackclient/tests/unit/volume/v2/test_volume_backup.py @@ -10,263 +10,137 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from unittest import mock from unittest.mock import call -from cinderclient import api_versions +from openstack.block_storage.v2 import backup as _backup +from openstack.block_storage.v2 import snapshot as _snapshot +from openstack.block_storage.v2 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions -from osc_lib import utils from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes from openstackclient.volume.v2 import volume_backup -class TestBackup(volume_fakes.TestVolume): - - def setUp(self): - super().setUp() - - self.backups_mock = self.app.client_manager.volume.backups - self.backups_mock.reset_mock() - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots - self.snapshots_mock.reset_mock() - self.restores_mock = self.app.client_manager.volume.restores - self.restores_mock.reset_mock() - - -class TestBackupCreate(TestBackup): - - volume = volume_fakes.create_one_volume() - snapshot = volume_fakes.create_one_snapshot() - new_backup = volume_fakes.create_one_backup( - attrs={'volume_id': volume.id, 'snapshot_id': snapshot.id}) - +class TestBackupCreate(volume_fakes.TestVolume): columns = ( - 'availability_zone', - 'container', - 'description', 'id', 'name', - 'object_count', - 'size', - 'snapshot_id', - 'status', 'volume_id', ) - data = ( - new_backup.availability_zone, - new_backup.container, - new_backup.description, - new_backup.id, - new_backup.name, - new_backup.object_count, - new_backup.size, - new_backup.snapshot_id, - new_backup.status, - new_backup.volume_id, - ) def setUp(self): super().setUp() - self.volumes_mock.get.return_value = self.volume - self.snapshots_mock.get.return_value = self.snapshot - self.backups_mock.create.return_value = self.new_backup + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, + volume_id=self.volume.id, + snapshot_id=self.snapshot.id, + ) + self.volume_sdk_client.create_backup.return_value = self.backup + + self.data = ( + self.backup.id, + self.backup.name, + self.backup.volume_id, + ) - # Get the command object to test self.cmd = volume_backup.CreateVolumeBackup(self.app, None) def test_backup_create(self): arglist = [ - "--name", self.new_backup.name, - "--description", self.new_backup.description, - "--container", self.new_backup.container, + "--name", + self.backup.name, + "--description", + self.backup.description, + "--container", + self.backup.container, "--force", "--incremental", - "--snapshot", self.new_backup.snapshot_id, - self.new_backup.volume_id, + "--snapshot", + self.backup.snapshot_id, + self.backup.volume_id, ] verifylist = [ - ("name", self.new_backup.name), - ("description", self.new_backup.description), - ("container", self.new_backup.container), + ("name", self.backup.name), + ("description", self.backup.description), + ("container", self.backup.container), ("force", True), ("incremental", True), - ("snapshot", self.new_backup.snapshot_id), - ("volume", self.new_backup.volume_id), + ("snapshot", self.backup.snapshot_id), + ("volume", self.backup.volume_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.backups_mock.create.assert_called_with( - self.new_backup.volume_id, - container=self.new_backup.container, - name=self.new_backup.name, - description=self.new_backup.description, + self.volume_sdk_client.create_backup.assert_called_with( + volume_id=self.backup.volume_id, + container=self.backup.container, + name=self.backup.name, + description=self.backup.description, force=True, - incremental=True, - snapshot_id=self.new_backup.snapshot_id, + is_incremental=True, + snapshot_id=self.backup.snapshot_id, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - def test_backup_create_with_properties(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.43') - - arglist = [ - "--property", "foo=bar", - "--property", "wow=much-cool", - self.new_backup.volume_id, - ] - verifylist = [ - ("properties", {"foo": "bar", "wow": "much-cool"}), - ("volume", self.new_backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.backups_mock.create.assert_called_with( - self.new_backup.volume_id, - container=None, - name=None, - description=None, - force=False, - incremental=False, - metadata={"foo": "bar", "wow": "much-cool"}, - ) - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) - - def test_backup_create_with_properties_pre_v343(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.42') - - arglist = [ - "--property", "foo=bar", - "--property", "wow=much-cool", - self.new_backup.volume_id, - ] - verifylist = [ - ("properties", {"foo": "bar", "wow": "much-cool"}), - ("volume", self.new_backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) - - def test_backup_create_with_availability_zone(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.51') - - arglist = [ - "--availability-zone", "my-az", - self.new_backup.volume_id, - ] - verifylist = [ - ("availability_zone", "my-az"), - ("volume", self.new_backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.backups_mock.create.assert_called_with( - self.new_backup.volume_id, - container=None, - name=None, - description=None, - force=False, - incremental=False, - availability_zone="my-az", - ) - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) - - def test_backup_create_with_availability_zone_pre_v351(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.50') - - arglist = [ - "--availability-zone", "my-az", - self.new_backup.volume_id, - ] - verifylist = [ - ("availability_zone", "my-az"), - ("volume", self.new_backup.volume_id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.51 or greater", str(exc)) - def test_backup_create_without_name(self): arglist = [ - "--description", self.new_backup.description, - "--container", self.new_backup.container, - self.new_backup.volume_id, + "--description", + self.backup.description, + "--container", + self.backup.container, + self.backup.volume_id, ] verifylist = [ - ("description", self.new_backup.description), - ("container", self.new_backup.container), - ("volume", self.new_backup.volume_id), + ("description", self.backup.description), + ("container", self.backup.container), + ("volume", self.backup.volume_id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.backups_mock.create.assert_called_with( - self.new_backup.volume_id, - container=self.new_backup.container, + self.volume_sdk_client.create_backup.assert_called_with( + volume_id=self.backup.volume_id, + container=self.backup.container, name=None, - description=self.new_backup.description, + description=self.backup.description, force=False, - incremental=False, + is_incremental=False, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) -class TestBackupDelete(TestBackup): - - backups = volume_fakes.create_backups(count=2) - +class TestBackupDelete(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.backups_mock.get = ( - volume_fakes.get_backups(self.backups)) - self.backups_mock.delete.return_value = None + self.backups = list(sdk_fakes.generate_fake_resources(_backup.Backup)) + self.volume_sdk_client.find_backup.side_effect = self.backups + self.volume_sdk_client.delete_backup.return_value = None - # Get the command object to mock self.cmd = volume_backup.DeleteVolumeBackup(self.app, None) def test_backup_delete(self): - arglist = [ - self.backups[0].id - ] - verifylist = [ - ("backups", [self.backups[0].id]) - ] + arglist = [self.backups[0].id] + verifylist = [("backups", [self.backups[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.backups_mock.delete.assert_called_with( - self.backups[0].id, False) + self.volume_sdk_client.delete_backup.assert_called_with( + self.backups[0].id, ignore_missing=False, force=False + ) self.assertIsNone(result) def test_backup_delete_with_force(self): @@ -274,15 +148,14 @@ def test_backup_delete_with_force(self): '--force', self.backups[0].id, ] - verifylist = [ - ('force', True), - ("backups", [self.backups[0].id]) - ] + verifylist = [('force', True), ("backups", [self.backups[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.backups_mock.delete.assert_called_with(self.backups[0].id, True) + self.volume_sdk_client.delete_backup.assert_called_with( + self.backups[0].id, ignore_missing=False, force=True + ) self.assertIsNone(result) def test_delete_multiple_backups(self): @@ -298,8 +171,8 @@ def test_delete_multiple_backups(self): calls = [] for b in self.backups: - calls.append(call(b.id, False)) - self.backups_mock.delete.assert_has_calls(calls) + calls.append(call(b.id, ignore_missing=False, force=False)) + self.volume_sdk_client.delete_backup.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_backups_with_exception(self): @@ -314,36 +187,38 @@ def test_delete_multiple_backups_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.backups[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 backups failed to delete.', - str(e)) - - find_mock.assert_any_call(self.backups_mock, self.backups[0].id) - find_mock.assert_any_call(self.backups_mock, 'unexist_backup') - - self.assertEqual(2, find_mock.call_count) - self.backups_mock.delete.assert_called_once_with( - self.backups[0].id, False - ) + self.volume_sdk_client.find_backup.side_effect = find_mock_result + try: + self.cmd.take_action(parsed_args) + self.fail('CommandError should be raised.') + except exceptions.CommandError as e: + self.assertEqual('1 of 2 backups failed to delete.', str(e)) -class TestBackupList(TestBackup): + self.volume_sdk_client.find_backup.assert_any_call( + self.backups[0].id, ignore_missing=False + ) + self.volume_sdk_client.find_backup.assert_any_call( + 'unexist_backup', ignore_missing=False + ) + + self.assertEqual(2, self.volume_sdk_client.find_backup.call_count) + self.volume_sdk_client.delete_backup.assert_called_once_with( + self.backups[0].id, + ignore_missing=False, + force=False, + ) - volume = volume_fakes.create_one_volume() - backups = volume_fakes.create_backups( - attrs={'volume_id': volume.name}, count=3) +class TestBackupList(volume_fakes.TestVolume): columns = ( 'ID', 'Name', 'Description', 'Status', 'Size', + 'Incremental', + 'Created At', ) columns_long = columns + ( 'Availability Zone', @@ -351,36 +226,51 @@ class TestBackupList(TestBackup): 'Container', ) - data = [] - for b in backups: - data.append(( - b.id, - b.name, - b.description, - b.status, - b.size, - )) - data_long = [] - for b in backups: - data_long.append(( - b.id, - b.name, - b.description, - b.status, - b.size, - b.availability_zone, - volume_backup.VolumeIdColumn(b.volume_id), - b.container, - )) - def setUp(self): super().setUp() - self.volumes_mock.list.return_value = [self.volume] - self.backups_mock.list.return_value = self.backups - self.volumes_mock.get.return_value = self.volume - self.backups_mock.get.return_value = self.backups[0] - # Get the command to test + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.volumes.return_value = [self.volume] + self.backups = list( + sdk_fakes.generate_fake_resources( + _backup.Backup, + attrs={'volume_id': self.volume.id}, + ) + ) + self.volume_sdk_client.backups.return_value = self.backups + self.volume_sdk_client.find_backup.return_value = self.backups[0] + + self.data = [] + for b in self.backups: + self.data.append( + ( + b.id, + b.name, + b.description, + b.status, + b.size, + b.is_incremental, + b.created_at, + ) + ) + self.data_long = [] + for b in self.backups: + self.data_long.append( + ( + b.id, + b.name, + b.description, + b.status, + b.size, + b.is_incremental, + b.created_at, + b.availability_zone, + volume_backup.VolumeIdColumn(b.volume_id), + b.container, + ) + ) + self.cmd = volume_backup.ListVolumeBackup(self.app, None) def test_backup_list_without_options(self): @@ -398,16 +288,13 @@ def test_backup_list_without_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - search_opts = { - "name": None, - "status": None, - "volume_id": None, - 'all_tenants': False, - } - self.volumes_mock.get.assert_not_called() - self.backups_mock.get.assert_not_called() - self.backups_mock.list.assert_called_with( - search_opts=search_opts, + self.volume_sdk_client.find_volume.assert_not_called() + self.volume_sdk_client.find_backup.assert_not_called() + self.volume_sdk_client.backups.assert_called_with( + name=None, + status=None, + volume_id=None, + all_tenants=False, marker=None, limit=None, ) @@ -417,12 +304,17 @@ def test_backup_list_without_options(self): def test_backup_list_with_options(self): arglist = [ "--long", - "--name", self.backups[0].name, - "--status", "error", - "--volume", self.volume.id, - "--marker", self.backups[0].id, + "--name", + self.backups[0].name, + "--status", + "error", + "--volume", + self.volume.id, + "--marker", + self.backups[0].id, "--all-projects", - "--limit", "3", + "--limit", + "3", ] verifylist = [ ("long", True), @@ -437,16 +329,17 @@ def test_backup_list_with_options(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - search_opts = { - "name": self.backups[0].name, - "status": "error", - "volume_id": self.volume.id, - 'all_tenants': True, - } - self.volumes_mock.get.assert_called_once_with(self.volume.id) - self.backups_mock.get.assert_called_once_with(self.backups[0].id) - self.backups_mock.list.assert_called_with( - search_opts=search_opts, + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volume.id, ignore_missing=False + ) + self.volume_sdk_client.find_backup.assert_called_once_with( + self.backups[0].id, ignore_missing=False + ) + self.volume_sdk_client.backups.assert_called_with( + name=self.backups[0].name, + status="error", + volume_id=self.volume.id, + all_tenants=True, marker=self.backups[0].id, limit=3, ) @@ -454,47 +347,61 @@ def test_backup_list_with_options(self): self.assertCountEqual(self.data_long, list(data)) -class TestBackupRestore(TestBackup): - - volume = volume_fakes.create_one_volume() - backup = volume_fakes.create_one_backup( - attrs={'volume_id': volume.id}, +class TestBackupRestore(volume_fakes.TestVolume): + columns = ( + "id", + "volume_id", + "volume_name", ) def setUp(self): super().setUp() - self.backups_mock.get.return_value = self.backup - self.volumes_mock.get.return_value = self.volume - self.restores_mock.restore.return_value = ( - volume_fakes.create_one_volume( - {'id': self.volume['id']}, - ) + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, volume_id=self.volume.id + ) + self.volume_sdk_client.find_backup.return_value = self.backup + self.volume_sdk_client.restore_backup.return_value = { + 'id': self.backup['id'], + 'volume_id': self.volume['id'], + 'volume_name': self.volume['name'], + } + + self.data = ( + self.backup.id, + self.volume.id, + self.volume.name, ) - # Get the command object to mock + self.cmd = volume_backup.RestoreVolumeBackup(self.app, None) def test_backup_restore(self): - self.volumes_mock.get.side_effect = exceptions.CommandError() - self.volumes_mock.find.side_effect = exceptions.CommandError() - arglist = [ - self.backup.id - ] + self.volume_sdk_client.find_volume.side_effect = ( + exceptions.CommandError() + ) + arglist = [self.backup.id] verifylist = [ ("backup", self.backup.id), ("volume", None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.restores_mock.restore.assert_called_with( - self.backup.id, None, None, + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.restore_backup.assert_called_with( + self.backup.id, + volume_id=None, + name=None, ) - self.assertIsNotNone(result) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) def test_backup_restore_with_volume(self): - self.volumes_mock.get.side_effect = exceptions.CommandError() - self.volumes_mock.find.side_effect = exceptions.CommandError() + self.volume_sdk_client.find_volume.side_effect = ( + exceptions.CommandError() + ) arglist = [ self.backup.id, self.backup.volume_id, @@ -505,11 +412,15 @@ def test_backup_restore_with_volume(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.restores_mock.restore.assert_called_with( - self.backup.id, None, self.backup.volume_id, + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.restore_backup.assert_called_with( + self.backup.id, + volume_id=None, + name=self.backup.volume_id, ) - self.assertIsNotNone(result) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) def test_backup_restore_with_volume_force(self): arglist = [ @@ -524,11 +435,15 @@ def test_backup_restore_with_volume_force(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - self.restores_mock.restore.assert_called_with( - self.backup.id, self.volume.id, None, + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.restore_backup.assert_called_with( + self.backup.id, + volume_id=self.volume.id, + name=None, ) - self.assertIsNotNone(result) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) def test_backup_restore_with_volume_existing(self): arglist = [ @@ -548,348 +463,111 @@ def test_backup_restore_with_volume_existing(self): ) -class TestBackupSet(TestBackup): - - backup = volume_fakes.create_one_backup( - attrs={'metadata': {'wow': 'cool'}}, - ) - +class TestBackupSet(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.backups_mock.get.return_value = self.backup + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, metadata={'wow': 'cool'} + ) + self.volume_sdk_client.find_backup.return_value = self.backup - # Get the command object to test self.cmd = volume_backup.SetVolumeBackup(self.app, None) - def test_backup_set_name(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.9') + def test_backup_set_state(self): + arglist = ['--state', 'error', self.backup.id] + verifylist = [('state', 'error'), ('backup', self.backup.id)] - arglist = [ - '--name', 'new_name', - self.backup.id, - ] - verifylist = [ - ('name', 'new_name'), - ('backup', self.backup.id), - ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - # In base command class ShowOne in cliff, abstract method take_action() - # returns nothing result = self.cmd.take_action(parsed_args) - self.backups_mock.update.assert_called_once_with( - self.backup.id, **{'name': 'new_name'}) self.assertIsNone(result) - def test_backup_set_name_pre_v39(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.8') - - arglist = [ - '--name', 'new_name', - self.backup.id, - ] - verifylist = [ - ('name', 'new_name'), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.9 or greater", str(exc)) - - def test_backup_set_description(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.9') - - arglist = [ - '--description', 'new_description', - self.backup.id, - ] - verifylist = [ - ('name', None), - ('description', 'new_description'), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'description': 'new_description' - } - self.backups_mock.update.assert_called_once_with( - self.backup.id, - **kwargs + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.reset_backup_status.assert_called_with( + self.backup, status='error' ) - self.assertIsNone(result) - - def test_backup_set_description_pre_v39(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.8') - - arglist = [ - '--description', 'new_description', - self.backup.id, - ] - verifylist = [ - ('name', None), - ('description', 'new_description'), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.9 or greater", str(exc)) - - def test_backup_set_state(self): - arglist = [ - '--state', 'error', - self.backup.id - ] - verifylist = [ - ('state', 'error'), - ('backup', self.backup.id) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - self.backups_mock.reset_state.assert_called_once_with( - self.backup.id, 'error') - self.assertIsNone(result) def test_backup_set_state_failed(self): - self.backups_mock.reset_state.side_effect = exceptions.CommandError() - arglist = [ - '--state', 'error', - self.backup.id - ] - verifylist = [ - ('state', 'error'), - ('backup', self.backup.id) - ] - - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('One or more of the set operations failed', - str(e)) - self.backups_mock.reset_state.assert_called_with( - self.backup.id, 'error') - - def test_backup_set_no_property(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.43') - - arglist = [ - '--no-property', - self.backup.id, - ] - verifylist = [ - ('no_property', True), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'metadata': {}, - } - self.backups_mock.update.assert_called_once_with( - self.backup.id, - **kwargs + self.volume_sdk_client.reset_backup_status.side_effect = ( + sdk_exceptions.NotFoundException('foo') ) - self.assertIsNone(result) - def test_backup_set_no_property_pre_v343(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.42') + arglist = ['--state', 'error', self.backup.id] + verifylist = [('state', 'error'), ('backup', self.backup.id)] - arglist = [ - '--no-property', - self.backup.id, - ] - verifylist = [ - ('no_property', True), - ('backup', self.backup.id), - ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) - - def test_backup_set_property(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.43') - - arglist = [ - '--property', 'foo=bar', - self.backup.id, - ] - verifylist = [ - ('properties', {'foo': 'bar'}), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'metadata': {'wow': 'cool', 'foo': 'bar'}, - } - self.backups_mock.update.assert_called_once_with( - self.backup.id, - **kwargs + exceptions.CommandError, self.cmd.take_action, parsed_args ) - self.assertIsNone(result) - - def test_backup_set_property_pre_v343(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.42') - - arglist = [ - '--property', 'foo=bar', - self.backup.id, - ] - verifylist = [ - ('properties', {'foo': 'bar'}), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) + self.assertEqual('One or more of the set operations failed', str(exc)) + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.reset_backup_status.assert_called_with( + self.backup, status='error' + ) -class TestBackupUnset(TestBackup): - backup = volume_fakes.create_one_backup( - attrs={'metadata': {'foo': 'bar'}}, +class TestBackupShow(volume_fakes.TestVolume): + columns = ( + "availability_zone", + "container", + "created_at", + "data_timestamp", + "description", + "fail_reason", + "has_dependent_backups", + "id", + "is_incremental", + "name", + "object_count", + "size", + "snapshot_id", + "status", + "updated_at", + "volume_id", ) def setUp(self): super().setUp() - self.backups_mock.get.return_value = self.backup - - # Get the command object to test - self.cmd = volume_backup.UnsetVolumeBackup(self.app, None) - - def test_backup_unset_property(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.43') - - arglist = [ - '--property', 'foo', - self.backup.id, - ] - verifylist = [ - ('properties', ['foo']), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'metadata': {}, - } - self.backups_mock.update.assert_called_once_with( + self.backup = sdk_fakes.generate_fake_resource(_backup.Backup) + self.volume_sdk_client.find_backup.return_value = self.backup + + self.data = ( + self.backup.availability_zone, + self.backup.container, + self.backup.created_at, + self.backup.data_timestamp, + self.backup.description, + self.backup.fail_reason, + self.backup.has_dependent_backups, self.backup.id, - **kwargs + self.backup.is_incremental, + self.backup.name, + self.backup.object_count, + self.backup.size, + self.backup.snapshot_id, + self.backup.status, + self.backup.updated_at, + self.backup.volume_id, ) - self.assertIsNone(result) - - def test_backup_unset_property_pre_v343(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.42') - - arglist = [ - '--property', 'foo', - self.backup.id, - ] - verifylist = [ - ('properties', ['foo']), - ('backup', self.backup.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) - - -class TestBackupShow(TestBackup): - backup = volume_fakes.create_one_backup() - - columns = ( - 'availability_zone', - 'container', - 'description', - 'id', - 'name', - 'object_count', - 'size', - 'snapshot_id', - 'status', - 'volume_id', - ) - data = ( - backup.availability_zone, - backup.container, - backup.description, - backup.id, - backup.name, - backup.object_count, - backup.size, - backup.snapshot_id, - backup.status, - backup.volume_id, - ) - - def setUp(self): - super().setUp() - - self.backups_mock.get.return_value = self.backup - # Get the command object to test self.cmd = volume_backup.ShowVolumeBackup(self.app, None) def test_backup_show(self): - arglist = [ - self.backup.id - ] - verifylist = [ - ("backup", self.backup.id) - ] + arglist = [self.backup.id] + verifylist = [("backup", self.backup.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.backups_mock.get.assert_called_with(self.backup.id) + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v2/test_volume_host.py b/openstackclient/tests/unit/volume/v2/test_volume_host.py index 730085a379..498ab0d383 100644 --- a/openstackclient/tests/unit/volume/v2/test_volume_host.py +++ b/openstackclient/tests/unit/volume/v2/test_volume_host.py @@ -17,16 +17,14 @@ class TestVolumeHost(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.host_mock = self.app.client_manager.volume.services + self.host_mock = self.volume_client.services self.host_mock.reset_mock() class TestVolumeHostSet(TestVolumeHost): - service = volume_fakes.create_one_service() def setUp(self): @@ -88,7 +86,6 @@ def test_volume_host_set_disable(self): class TestVolumeHostFailover(TestVolumeHost): - service = volume_fakes.create_one_service() def setUp(self): @@ -101,7 +98,8 @@ def setUp(self): def test_volume_host_failover(self): arglist = [ - '--volume-backend', 'backend_test', + '--volume-backend', + 'backend_test', self.service.host, ] verifylist = [ @@ -113,5 +111,6 @@ def test_volume_host_failover(self): result = self.cmd.take_action(parsed_args) self.host_mock.failover_host.assert_called_with( - self.service.host, 'backend_test') + self.service.host, 'backend_test' + ) self.assertIsNone(result) diff --git a/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py b/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py index 6cffcaaca9..0df379bb24 100644 --- a/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py +++ b/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py @@ -10,36 +10,23 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import argparse from unittest import mock +from openstack.block_storage.v2 import snapshot as _snapshot +from openstack.block_storage.v3 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes from osc_lib.cli import format_columns from osc_lib import exceptions -from osc_lib import utils from openstackclient.tests.unit.identity.v3 import fakes as project_fakes -from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit import utils as test_utils from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes from openstackclient.volume.v2 import volume_snapshot -class TestVolumeSnapshot(volume_fakes.TestVolume): - - def setUp(self): - super().setUp() - - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots - self.snapshots_mock.reset_mock() - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - self.project_mock = self.app.client_manager.identity.projects - self.project_mock.reset_mock() - - -class TestVolumeSnapshotCreate(TestVolumeSnapshot): - +class TestVolumeSnapshotCreate(volume_fakes.TestVolume): columns = ( 'created_at', 'description', @@ -54,66 +41,74 @@ class TestVolumeSnapshotCreate(TestVolumeSnapshot): def setUp(self): super().setUp() - self.volume = volume_fakes.create_one_volume() - self.new_snapshot = volume_fakes.create_one_snapshot( - attrs={'volume_id': self.volume.id}) + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.snapshot = sdk_fakes.generate_fake_resource( + _snapshot.Snapshot, volume_id=self.volume.id + ) + self.volume_sdk_client.create_snapshot.return_value = self.snapshot + self.volume_sdk_client.manage_snapshot.return_value = self.snapshot self.data = ( - self.new_snapshot.created_at, - self.new_snapshot.description, - self.new_snapshot.id, - self.new_snapshot.name, - format_columns.DictColumn(self.new_snapshot.metadata), - self.new_snapshot.size, - self.new_snapshot.status, - self.new_snapshot.volume_id, - ) - - self.volumes_mock.get.return_value = self.volume - self.snapshots_mock.create.return_value = self.new_snapshot - self.snapshots_mock.manage.return_value = self.new_snapshot - # Get the command object to test + self.snapshot.created_at, + self.snapshot.description, + self.snapshot.id, + self.snapshot.name, + format_columns.DictColumn(self.snapshot.metadata), + self.snapshot.size, + self.snapshot.status, + self.snapshot.volume_id, + ) + self.cmd = volume_snapshot.CreateVolumeSnapshot(self.app, None) def test_snapshot_create(self): arglist = [ - "--volume", self.new_snapshot.volume_id, - "--description", self.new_snapshot.description, + "--volume", + self.snapshot.volume_id, + "--description", + self.snapshot.description, "--force", - '--property', 'Alpha=a', - '--property', 'Beta=b', - self.new_snapshot.name, + '--property', + 'Alpha=a', + '--property', + 'Beta=b', + self.snapshot.name, ] verifylist = [ - ("volume", self.new_snapshot.volume_id), - ("description", self.new_snapshot.description), + ("volume", self.snapshot.volume_id), + ("description", self.snapshot.description), ("force", True), - ('property', {'Alpha': 'a', 'Beta': 'b'}), - ("snapshot_name", self.new_snapshot.name), + ('properties', {'Alpha': 'a', 'Beta': 'b'}), + ("snapshot_name", self.snapshot.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.create.assert_called_with( - self.new_snapshot.volume_id, + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.snapshot.volume_id, ignore_missing=False + ) + self.volume_sdk_client.create_snapshot.assert_called_with( + volume_id=self.snapshot.volume_id, force=True, - name=self.new_snapshot.name, - description=self.new_snapshot.description, + name=self.snapshot.name, + description=self.snapshot.description, metadata={'Alpha': 'a', 'Beta': 'b'}, ) - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) def test_snapshot_create_without_name(self): arglist = [ - "--volume", self.new_snapshot.volume_id, + "--volume", + self.snapshot.volume_id, ] verifylist = [ - ("volume", self.new_snapshot.volume_id), + ("volume", self.snapshot.volume_id), ] self.assertRaises( - tests_utils.ParserException, + test_utils.ParserException, self.check_parser, self.cmd, arglist, @@ -122,107 +117,113 @@ def test_snapshot_create_without_name(self): def test_snapshot_create_without_volume(self): arglist = [ - "--description", self.new_snapshot.description, + "--description", + self.snapshot.description, "--force", - self.new_snapshot.name + self.snapshot.name, ] verifylist = [ - ("description", self.new_snapshot.description), + ("description", self.snapshot.description), ("force", True), - ("snapshot_name", self.new_snapshot.name) + ("snapshot_name", self.snapshot.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_once_with( - self.new_snapshot.name) - self.snapshots_mock.create.assert_called_once_with( - self.new_snapshot.volume_id, + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.snapshot.name, ignore_missing=False + ) + self.volume_sdk_client.create_snapshot.assert_called_once_with( + volume_id=self.snapshot.volume_id, force=True, - name=self.new_snapshot.name, - description=self.new_snapshot.description, + name=self.snapshot.name, + description=self.snapshot.description, metadata=None, ) - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) def test_snapshot_create_with_remote_source(self): arglist = [ - '--remote-source', 'source-name=test_source_name', - '--remote-source', 'source-id=test_source_id', - '--volume', self.new_snapshot.volume_id, - self.new_snapshot.name, + '--remote-source', + 'source-name=test_source_name', + '--remote-source', + 'source-id=test_source_id', + '--volume', + self.snapshot.volume_id, + self.snapshot.name, ] - ref_dict = {'source-name': 'test_source_name', - 'source-id': 'test_source_id'} + ref_dict = { + 'source-name': 'test_source_name', + 'source-id': 'test_source_id', + } verifylist = [ ('remote_source', ref_dict), - ('volume', self.new_snapshot.volume_id), - ("snapshot_name", self.new_snapshot.name), + ('volume', self.snapshot.volume_id), + ("snapshot_name", self.snapshot.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.manage.assert_called_with( - volume_id=self.new_snapshot.volume_id, + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.snapshot.volume_id, ignore_missing=False + ) + self.volume_sdk_client.manage_snapshot.assert_called_with( + volume_id=self.snapshot.volume_id, ref=ref_dict, - name=self.new_snapshot.name, + name=self.snapshot.name, description=None, metadata=None, ) - self.snapshots_mock.create.assert_not_called() - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) - - -class TestVolumeSnapshotDelete(TestVolumeSnapshot): + self.volume_sdk_client.create_snapshot.assert_not_called() - snapshots = volume_fakes.create_snapshots(count=2) +class TestVolumeSnapshotDelete(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.snapshots_mock.get = ( - volume_fakes.get_snapshots(self.snapshots)) - self.snapshots_mock.delete.return_value = None + self.snapshots = list( + sdk_fakes.generate_fake_resources(_snapshot.Snapshot) + ) + self.volume_sdk_client.find_snapshot.side_effect = self.snapshots + self.volume_sdk_client.delete_snapshot.return_value = None - # Get the command object to mock self.cmd = volume_snapshot.DeleteVolumeSnapshot(self.app, None) def test_snapshot_delete(self): - arglist = [ - self.snapshots[0].id - ] - verifylist = [ - ("snapshots", [self.snapshots[0].id]) - ] + arglist = [self.snapshots[0].id] + verifylist = [("snapshots", [self.snapshots[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - self.snapshots_mock.delete.assert_called_with( - self.snapshots[0].id, False) self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + self.snapshots[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_snapshot.assert_called_once_with( + self.snapshots[0].id, force=False + ) + def test_snapshot_delete_with_force(self): - arglist = [ - '--force', - self.snapshots[0].id - ] - verifylist = [ - ('force', True), - ("snapshots", [self.snapshots[0].id]) - ] + arglist = ['--force', self.snapshots[0].id] + verifylist = [('force', True), ("snapshots", [self.snapshots[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - - self.snapshots_mock.delete.assert_called_with( - self.snapshots[0].id, True) self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + self.snapshots[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_snapshot.assert_called_once_with( + self.snapshots[0].id, force=True + ) + def test_delete_multiple_snapshots(self): arglist = [] for s in self.snapshots: @@ -230,17 +231,24 @@ def test_delete_multiple_snapshots(self): verifylist = [ ('snapshots', arglist), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - result = self.cmd.take_action(parsed_args) - calls = [] - for s in self.snapshots: - calls.append(mock.call(s.id, False)) - self.snapshots_mock.delete.assert_has_calls(calls) + result = self.cmd.take_action(parsed_args) self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_has_calls( + [mock.call(x.id, ignore_missing=False) for x in self.snapshots] + ) + self.volume_sdk_client.delete_snapshot.assert_has_calls( + [mock.call(x.id, force=False) for x in self.snapshots] + ) + def test_delete_multiple_snapshots_with_exception(self): + self.volume_sdk_client.find_snapshot.side_effect = [ + self.snapshots[0], + sdk_exceptions.NotFoundException(), + ] + arglist = [ self.snapshots[0].id, 'unexist_snapshot', @@ -251,98 +259,94 @@ def test_delete_multiple_snapshots_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [self.snapshots[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('1 of 2 snapshots failed to delete.', - str(e)) - - find_mock.assert_any_call( - self.snapshots_mock, self.snapshots[0].id) - find_mock.assert_any_call(self.snapshots_mock, 'unexist_snapshot') - - self.assertEqual(2, find_mock.call_count) - self.snapshots_mock.delete.assert_called_once_with( - self.snapshots[0].id, False - ) + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 2 snapshots failed to delete.', str(exc)) + self.volume_sdk_client.find_snapshot.assert_has_calls( + [ + mock.call(self.snapshots[0].id, ignore_missing=False), + mock.call('unexist_snapshot', ignore_missing=False), + ] + ) + self.volume_sdk_client.delete_snapshot.assert_has_calls( + [ + mock.call(self.snapshots[0].id, force=False), + ] + ) -class TestVolumeSnapshotList(TestVolumeSnapshot): - - volume = volume_fakes.create_one_volume() - project = project_fakes.FakeProject.create_one_project() - snapshots = volume_fakes.create_snapshots( - attrs={'volume_id': volume.name}, count=3) - - columns = [ - "ID", - "Name", - "Description", - "Status", - "Size" - ] - columns_long = columns + [ - "Created At", - "Volume", - "Properties" - ] - - data = [] - for s in snapshots: - data.append(( - s.id, - s.name, - s.description, - s.status, - s.size, - )) - data_long = [] - for s in snapshots: - data_long.append(( - s.id, - s.name, - s.description, - s.status, - s.size, - s.created_at, - volume_snapshot.VolumeIdColumn( - s.volume_id, volume_cache={volume.id: volume}), - format_columns.DictColumn(s.metadata), - )) +class TestVolumeSnapshotList(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.volumes_mock.list.return_value = [self.volume] - self.volumes_mock.get.return_value = self.volume + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.snapshots = list( + sdk_fakes.generate_fake_resources( + _snapshot.Snapshot, attrs={'volume_id': self.volume.name} + ) + ) + self.project = project_fakes.FakeProject.create_one_project() + self.volume_sdk_client.volumes.return_value = [self.volume] + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.snapshots.return_value = self.snapshots + self.project_mock = self.identity_client.projects self.project_mock.get.return_value = self.project - self.snapshots_mock.list.return_value = self.snapshots - # Get the command to test + + self.columns = ("ID", "Name", "Description", "Status", "Size") + self.columns_long = self.columns + ( + "Created At", + "Volume", + "Properties", + ) + + self.data = [] + self.data_long = [] + for s in self.snapshots: + self.data.append( + ( + s.id, + s.name, + s.description, + s.status, + s.size, + ) + ) + self.data_long.append( + ( + s.id, + s.name, + s.description, + s.status, + s.size, + s.created_at, + volume_snapshot.VolumeIdColumn( + s.volume_id, volume_cache={self.volume.id: self.volume} + ), + format_columns.DictColumn(s.metadata), + ) + ) + self.cmd = volume_snapshot.ListVolumeSnapshot(self.app, None) def test_snapshot_list_without_options(self): arglist = [] - verifylist = [ - ('all_projects', False), - ('long', False) - ] + verifylist = [('all_projects', False), ('long', False)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.list.assert_called_once_with( - limit=None, marker=None, - search_opts={ - 'all_tenants': False, - 'name': None, - 'status': None, - 'project_id': None, - 'volume_id': None - } + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=None, + status=None, + project_id=None, + volume_id=None, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) @@ -350,9 +354,12 @@ def test_snapshot_list_without_options(self): def test_snapshot_list_with_options(self): arglist = [ "--long", - "--limit", "2", - "--project", self.project.id, - "--marker", self.snapshots[0].id, + "--limit", + "2", + "--project", + self.project.id, + "--marker", + self.snapshots[0].id, ] verifylist = [ ("long", True), @@ -365,16 +372,14 @@ def test_snapshot_list_with_options(self): columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.list.assert_called_once_with( + self.volume_sdk_client.snapshots.assert_called_once_with( limit=2, marker=self.snapshots[0].id, - search_opts={ - 'all_tenants': True, - 'project_id': self.project.id, - 'name': None, - 'status': None, - 'volume_id': None - } + all_projects=True, + project_id=self.project.id, + name=None, + status=None, + volume_id=None, ) self.assertEqual(self.columns_long, columns) self.assertEqual(self.data_long, list(data)) @@ -383,30 +388,27 @@ def test_snapshot_list_all_projects(self): arglist = [ '--all-projects', ] - verifylist = [ - ('long', False), - ('all_projects', True) - ] + verifylist = [('long', False), ('all_projects', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.list.assert_called_once_with( - limit=None, marker=None, - search_opts={ - 'all_tenants': True, - 'name': None, - 'status': None, - 'project_id': None, - 'volume_id': None - } + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=True, + name=None, + status=None, + project_id=None, + volume_id=None, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_snapshot_list_name_option(self): arglist = [ - '--name', self.snapshots[0].name, + '--name', + self.snapshots[0].name, ] verifylist = [ ('all_projects', False), @@ -417,48 +419,48 @@ def test_snapshot_list_name_option(self): columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.list.assert_called_once_with( - limit=None, marker=None, - search_opts={ - 'all_tenants': False, - 'name': self.snapshots[0].name, - 'status': None, - 'project_id': None, - 'volume_id': None - } + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=self.snapshots[0].name, + status=None, + project_id=None, + volume_id=None, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_snapshot_list_status_option(self): arglist = [ - '--status', self.snapshots[0].status, + '--status', + 'available', ] verifylist = [ ('all_projects', False), ('long', False), - ('status', self.snapshots[0].status), + ('status', 'available'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.list.assert_called_once_with( - limit=None, marker=None, - search_opts={ - 'all_tenants': False, - 'name': None, - 'status': self.snapshots[0].status, - 'project_id': None, - 'volume_id': None - } + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=None, + status='available', + project_id=None, + volume_id=None, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_snapshot_list_volumeid_option(self): arglist = [ - '--volume', self.volume.id, + '--volume', + self.volume.id, ] verifylist = [ ('all_projects', False), @@ -469,41 +471,47 @@ def test_snapshot_list_volumeid_option(self): columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.list.assert_called_once_with( - limit=None, marker=None, - search_opts={ - 'all_tenants': False, - 'name': None, - 'status': None, - 'project_id': None, - 'volume_id': self.volume.id - } + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=None, + status=None, + project_id=None, + volume_id=self.volume.id, ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) def test_snapshot_list_negative_limit(self): arglist = [ - "--limit", "-2", + "--limit", + "-2", ] verifylist = [ ("limit", -2), ] - self.assertRaises(argparse.ArgumentTypeError, self.check_parser, - self.cmd, arglist, verifylist) - - -class TestVolumeSnapshotSet(TestVolumeSnapshot): + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) - snapshot = volume_fakes.create_one_snapshot() +class TestVolumeSnapshotSet(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.snapshots_mock.get.return_value = self.snapshot - self.snapshots_mock.set_metadata.return_value = None - self.snapshots_mock.update.return_value = None - # Get the command object to mock + self.snapshot = sdk_fakes.generate_fake_resource( + _snapshot.Snapshot, metadata={'foo': 'bar'} + ) + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.volume_sdk_client.delete_snapshot_metadata.return_value = None + self.volume_sdk_client.set_snapshot_metadata.return_value = None + self.volume_sdk_client.update_snapshot.return_value = None + self.cmd = volume_snapshot.SetVolumeSnapshot(self.app, None) def test_snapshot_set_no_option(self): @@ -516,38 +524,41 @@ def test_snapshot_set_no_option(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.snapshots_mock.get.assert_called_once_with(parsed_args.snapshot) - self.assertNotCalled(self.snapshots_mock.reset_state) - self.assertNotCalled(self.snapshots_mock.update) - self.assertNotCalled(self.snapshots_mock.set_metadata) + self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + parsed_args.snapshot, ignore_missing=False + ) + self.volume_sdk_client.reset_snapshot_status.assert_not_called() + self.volume_sdk_client.update_snapshot.assert_not_called() + self.volume_sdk_client.set_snapshot_metadata.assert_not_called() def test_snapshot_set_name_and_property(self): arglist = [ - "--name", "new_snapshot", - "--property", "x=y", - "--property", "foo=foo", + "--name", + "new_snapshot", + "--property", + "x=y", + "--property", + "foo=foo", self.snapshot.id, ] - new_property = {"x": "y", "foo": "foo"} verifylist = [ ("name", "new_snapshot"), - ("property", new_property), + ("properties", {"x": "y", "foo": "foo"}), ("snapshot", self.snapshot.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - kwargs = { - "name": "new_snapshot", - } - self.snapshots_mock.update.assert_called_with( - self.snapshot.id, **kwargs) - self.snapshots_mock.set_metadata.assert_called_with( - self.snapshot.id, new_property - ) self.assertIsNone(result) + self.volume_sdk_client.update_snapshot.assert_called_with( + self.snapshot.id, name="new_snapshot" + ) + self.volume_sdk_client.set_snapshot_metadata.assert_called_with( + self.snapshot.id, x="y", foo="foo" + ) def test_snapshot_set_with_no_property(self): arglist = [ @@ -561,124 +572,126 @@ def test_snapshot_set_with_no_property(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.snapshots_mock.get.assert_called_once_with(parsed_args.snapshot) - self.assertNotCalled(self.snapshots_mock.reset_state) - self.assertNotCalled(self.snapshots_mock.update) - self.assertNotCalled(self.snapshots_mock.set_metadata) - self.snapshots_mock.delete_metadata.assert_called_with( - self.snapshot.id, ["foo"] - ) + self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + parsed_args.snapshot, ignore_missing=False + ) + self.volume_sdk_client.reset_snapshot_status.assert_not_called() + self.volume_sdk_client.update_snapshot.assert_not_called() + self.volume_sdk_client.set_snapshot_metadata.assert_not_called() + self.volume_sdk_client.delete_snapshot_metadata.assert_called_with( + self.snapshot.id, keys=["foo"] + ) def test_snapshot_set_with_no_property_and_property(self): arglist = [ "--no-property", - "--property", "foo_1=bar_1", + "--property", + "foo_1=bar_1", self.snapshot.id, ] verifylist = [ ("no_property", True), - ("property", {"foo_1": "bar_1"}), + ("properties", {"foo_1": "bar_1"}), ("snapshot", self.snapshot.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.snapshots_mock.get.assert_called_once_with(parsed_args.snapshot) - self.assertNotCalled(self.snapshots_mock.reset_state) - self.assertNotCalled(self.snapshots_mock.update) - self.snapshots_mock.delete_metadata.assert_called_with( - self.snapshot.id, ["foo"] - ) - self.snapshots_mock.set_metadata.assert_called_once_with( - self.snapshot.id, {"foo_1": "bar_1"}) + self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + parsed_args.snapshot, ignore_missing=False + ) + self.volume_sdk_client.reset_snapshot_status.assert_not_called() + self.volume_sdk_client.update_snapshot.assert_not_called() + self.volume_sdk_client.delete_snapshot_metadata.assert_called_with( + self.snapshot.id, keys=["foo"] + ) + self.volume_sdk_client.set_snapshot_metadata.assert_called_once_with( + self.snapshot.id, + foo_1="bar_1", + ) def test_snapshot_set_state_to_error(self): - arglist = [ - "--state", "error", - self.snapshot.id - ] - verifylist = [ - ("state", "error"), - ("snapshot", self.snapshot.id) - ] + arglist = ["--state", "error", self.snapshot.id] + verifylist = [("state", "error"), ("snapshot", self.snapshot.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.snapshots_mock.reset_state.assert_called_with( - self.snapshot.id, "error") self.assertIsNone(result) + self.volume_sdk_client.reset_snapshot_status.assert_called_with( + self.snapshot.id, "error" + ) def test_volume_set_state_failed(self): - self.snapshots_mock.reset_state.side_effect = exceptions.CommandError() - arglist = [ - '--state', 'error', - self.snapshot.id - ] - verifylist = [ - ('state', 'error'), - ('snapshot', self.snapshot.id) - ] + self.volume_sdk_client.reset_snapshot_status.side_effect = ( + exceptions.CommandError() + ) + arglist = ['--state', 'error', self.snapshot.id] + verifylist = [('state', 'error'), ('snapshot', self.snapshot.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('One or more of the set operations failed', - str(e)) - self.snapshots_mock.reset_state.assert_called_once_with( - self.snapshot.id, 'error') + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertEqual('One or more of the set operations failed', str(exc)) + self.volume_sdk_client.reset_snapshot_status.assert_called_once_with( + self.snapshot.id, 'error' + ) def test_volume_set_name_and_state_failed(self): - self.snapshots_mock.reset_state.side_effect = exceptions.CommandError() + self.volume_sdk_client.reset_snapshot_status.side_effect = ( + exceptions.CommandError() + ) arglist = [ - '--state', 'error', - "--name", "new_snapshot", - self.snapshot.id + '--state', + 'error', + "--name", + "new_snapshot", + self.snapshot.id, ] verifylist = [ ('state', 'error'), ("name", "new_snapshot"), - ('snapshot', self.snapshot.id) + ('snapshot', self.snapshot.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual('One or more of the set operations failed', - str(e)) - kwargs = { - "name": "new_snapshot", - } - self.snapshots_mock.update.assert_called_once_with( - self.snapshot.id, **kwargs) - self.snapshots_mock.reset_state.assert_called_once_with( - self.snapshot.id, 'error') + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) -class TestVolumeSnapshotShow(TestVolumeSnapshot): + self.assertEqual('One or more of the set operations failed', str(exc)) + self.volume_sdk_client.update_snapshot.assert_called_once_with( + self.snapshot.id, name="new_snapshot" + ) + self.volume_sdk_client.reset_snapshot_status.assert_called_once_with( + self.snapshot.id, 'error' + ) - columns = ( - 'created_at', - 'description', - 'id', - 'name', - 'properties', - 'size', - 'status', - 'volume_id', - ) +class TestVolumeSnapshotShow(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.snapshot = volume_fakes.create_one_snapshot() - + self.snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + + self.columns = ( + 'created_at', + 'description', + 'id', + 'name', + 'properties', + 'size', + 'status', + 'volume_id', + ) self.data = ( self.snapshot.created_at, self.snapshot.description, @@ -690,45 +703,42 @@ def setUp(self): self.snapshot.volume_id, ) - self.snapshots_mock.get.return_value = self.snapshot - # Get the command object to test + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.cmd = volume_snapshot.ShowVolumeSnapshot(self.app, None) def test_snapshot_show(self): - arglist = [ - self.snapshot.id - ] - verifylist = [ - ("snapshot", self.snapshot.id) - ] + arglist = [self.snapshot.id] + verifylist = [("snapshot", self.snapshot.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.snapshots_mock.get.assert_called_with(self.snapshot.id) + self.volume_sdk_client.find_snapshot.assert_called_with( + self.snapshot.id, ignore_missing=False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) -class TestVolumeSnapshotUnset(TestVolumeSnapshot): - - snapshot = volume_fakes.create_one_snapshot() - +class TestVolumeSnapshotUnset(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.snapshots_mock.get.return_value = self.snapshot - self.snapshots_mock.delete_metadata.return_value = None - # Get the command object to mock + self.snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.volume_sdk_client.delete_snapshot_metadata.return_value = None + self.cmd = volume_snapshot.UnsetVolumeSnapshot(self.app, None) def test_snapshot_unset(self): arglist = [ - "--property", "foo", + "--property", + "foo", self.snapshot.id, ] verifylist = [ - ("property", ["foo"]), + ("properties", ["foo"]), ("snapshot", self.snapshot.id), ] @@ -736,7 +746,7 @@ def test_snapshot_unset(self): result = self.cmd.take_action(parsed_args) - self.snapshots_mock.delete_metadata.assert_called_with( - self.snapshot.id, ["foo"] - ) self.assertIsNone(result) + self.volume_sdk_client.delete_snapshot_metadata.assert_called_with( + self.snapshot.id, keys=["foo"] + ) diff --git a/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py b/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py index c8c6fac92b..2677ddc10a 100644 --- a/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py +++ b/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py @@ -15,7 +15,6 @@ from unittest import mock from unittest.mock import call -from cinderclient import api_versions from osc_lib import exceptions from osc_lib import utils @@ -25,21 +24,19 @@ class TestTransfer(volume_fakes.TestVolume): - def setUp(self): super().setUp() # Get a shortcut to the TransferManager Mock - self.transfer_mock = self.app.client_manager.volume.transfers + self.transfer_mock = self.volume_client.transfers self.transfer_mock.reset_mock() # Get a shortcut to the VolumeManager Mock - self.volumes_mock = self.app.client_manager.volume.volumes + self.volumes_mock = self.volume_client.volumes self.volumes_mock.reset_mock() class TestTransferAccept(TestTransfer): - columns = ( 'id', 'name', @@ -61,11 +58,13 @@ def setUp(self): # Get the command object to test self.cmd = volume_transfer_request.AcceptTransferRequest( - self.app, None) + self.app, None + ) def test_transfer_accept(self): arglist = [ - '--auth-key', 'key_value', + '--auth-key', + 'key_value', self.volume_transfer.id, ] verifylist = [ @@ -104,7 +103,6 @@ def test_transfer_accept_no_option(self): class TestTransferCreate(TestTransfer): - volume = volume_fakes.create_one_volume() columns = ( @@ -138,7 +136,8 @@ def setUp(self): # Get the command object to test self.cmd = volume_transfer_request.CreateTransferRequest( - self.app, None) + self.app, None + ) def test_transfer_create_without_name(self): arglist = [ @@ -151,14 +150,14 @@ def test_transfer_create_without_name(self): columns, data = self.cmd.take_action(parsed_args) - self.transfer_mock.create.assert_called_once_with( - self.volume.id, None) + self.transfer_mock.create.assert_called_once_with(self.volume.id, None) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_transfer_create_with_name(self): arglist = [ - '--name', self.volume_transfer.name, + '--name', + self.volume_transfer.name, self.volume.id, ] verifylist = [ @@ -170,58 +169,14 @@ def test_transfer_create_with_name(self): columns, data = self.cmd.take_action(parsed_args) self.transfer_mock.create.assert_called_once_with( - self.volume.id, self.volume_transfer.name,) - self.assertEqual(self.columns, columns) - self.assertEqual(self.data, data) - - def test_transfer_create_with_no_snapshots(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.55') - - arglist = [ - '--no-snapshots', self.volume.id, - ] - verifylist = [ - ('name', None), - ('snapshots', False), - ('volume', self.volume.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - columns, data = self.cmd.take_action(parsed_args) - - self.transfer_mock.create.assert_called_once_with( - self.volume.id, None, no_snapshots=True) + self.volume_transfer.name, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) - def test_transfer_create_pre_v355(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.54') - - arglist = [ - '--no-snapshots', - self.volume.id, - ] - verifylist = [ - ('name', None), - ('snapshots', False), - ('volume', self.volume.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) - self.assertIn( - '--os-volume-api-version 3.55 or greater is required', - str(exc)) - class TestTransferDelete(TestTransfer): - volume_transfers = volume_fakes.create_transfers(count=2) def setUp(self): @@ -234,21 +189,19 @@ def setUp(self): # Get the command object to mock self.cmd = volume_transfer_request.DeleteTransferRequest( - self.app, None) + self.app, None + ) def test_transfer_delete(self): - arglist = [ - self.volume_transfers[0].id - ] - verifylist = [ - ("transfer_request", [self.volume_transfers[0].id]) - ] + arglist = [self.volume_transfers[0].id] + verifylist = [("transfer_request", [self.volume_transfers[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.transfer_mock.delete.assert_called_with( - self.volume_transfers[0].id) + self.volume_transfers[0].id + ) self.assertIsNone(result) def test_delete_multiple_transfers(self): @@ -280,17 +233,21 @@ def test_delete_multiple_transfers_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.volume_transfers[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 volume transfer requests failed ' - 'to delete', str(e)) + self.assertEqual( + '1 of 2 volume transfer requests failed to delete', + str(e), + ) find_mock.assert_any_call( - self.transfer_mock, self.volume_transfers[0].id) + self.transfer_mock, self.volume_transfers[0].id + ) find_mock.assert_any_call(self.transfer_mock, 'unexist_transfer') self.assertEqual(2, find_mock.call_count) @@ -300,7 +257,6 @@ def test_delete_multiple_transfers_with_exception(self): class TestTransferList(TestTransfer): - # The Transfers to be listed volume_transfers = volume_fakes.create_one_transfer() @@ -331,28 +287,25 @@ def test_transfer_list_without_argument(self): # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) - datalist = (( - self.volume_transfers.id, - self.volume_transfers.name, - self.volume_transfers.volume_id, - ), ) + datalist = ( + ( + self.volume_transfers.id, + self.volume_transfers.name, + self.volume_transfers.volume_id, + ), + ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list volume_transfers self.transfer_mock.list.assert_called_with( - detailed=True, - search_opts={'all_tenants': 0} + detailed=True, search_opts={'all_tenants': 0} ) def test_transfer_list_with_argument(self): - arglist = [ - "--all-projects" - ] - verifylist = [ - ("all_projects", True) - ] + arglist = ["--all-projects"] + verifylist = [("all_projects", True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -370,24 +323,24 @@ def test_transfer_list_with_argument(self): # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) - datalist = (( - self.volume_transfers.id, - self.volume_transfers.name, - self.volume_transfers.volume_id, - ), ) + datalist = ( + ( + self.volume_transfers.id, + self.volume_transfers.name, + self.volume_transfers.volume_id, + ), + ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list volume_transfers self.transfer_mock.list.assert_called_with( - detailed=True, - search_opts={'all_tenants': 1} + detailed=True, search_opts={'all_tenants': 1} ) class TestTransferShow(TestTransfer): - columns = ( 'created_at', 'id', @@ -411,8 +364,7 @@ def setUp(self): self.transfer_mock.get.return_value = self.volume_transfer # Get the command object to test - self.cmd = volume_transfer_request.ShowTransferRequest( - self.app, None) + self.cmd = volume_transfer_request.ShowTransferRequest(self.app, None) def test_transfer_show(self): arglist = [ @@ -425,7 +377,6 @@ def test_transfer_show(self): columns, data = self.cmd.take_action(parsed_args) - self.transfer_mock.get.assert_called_once_with( - self.volume_transfer.id) + self.transfer_mock.get.assert_called_once_with(self.volume_transfer.id) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v2/test_type.py b/openstackclient/tests/unit/volume/v2/test_volume_type.py similarity index 60% rename from openstackclient/tests/unit/volume/v2/test_type.py rename to openstackclient/tests/unit/volume/v2/test_volume_type.py index 1cb46c4595..6f50ff2ef6 100644 --- a/openstackclient/tests/unit/volume/v2/test_type.py +++ b/openstackclient/tests/unit/volume/v2/test_volume_type.py @@ -26,39 +26,38 @@ class TestType(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.types_mock = self.app.client_manager.volume.volume_types - self.types_mock.reset_mock() + self.volume_types_mock = self.volume_client.volume_types + self.volume_types_mock.reset_mock() - self.types_access_mock = ( - self.app.client_manager.volume.volume_type_access) - self.types_access_mock.reset_mock() + self.volume_type_access_mock = self.volume_client.volume_type_access + self.volume_type_access_mock.reset_mock() - self.encryption_types_mock = ( - self.app.client_manager.volume.volume_encryption_types) - self.encryption_types_mock.reset_mock() + self.volume_encryption_types_mock = ( + self.volume_client.volume_encryption_types + ) + self.volume_encryption_types_mock.reset_mock() - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() class TestTypeCreate(TestType): - - project = identity_fakes.FakeProject.create_one_project() - columns = ( - 'description', - 'id', - 'is_public', - 'name', - ) - def setUp(self): super().setUp() - self.new_volume_type = volume_fakes.create_one_volume_type() + self.new_volume_type = volume_fakes.create_one_volume_type( + methods={'set_keys': None}, + ) + self.project = identity_fakes.FakeProject.create_one_project() + self.columns = ( + 'description', + 'id', + 'is_public', + 'name', + ) self.data = ( self.new_volume_type.description, self.new_volume_type.id, @@ -66,27 +65,27 @@ def setUp(self): self.new_volume_type.name, ) - self.types_mock.create.return_value = self.new_volume_type + self.volume_types_mock.create.return_value = self.new_volume_type self.projects_mock.get.return_value = self.project # Get the command object to test self.cmd = volume_type.CreateVolumeType(self.app, None) def test_type_create_public(self): arglist = [ - "--description", self.new_volume_type.description, + "--description", + self.new_volume_type.description, "--public", self.new_volume_type.name, ] verifylist = [ ("description", self.new_volume_type.description), - ("public", True), - ("private", False), + ("is_public", True), ("name", self.new_volume_type.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.create.assert_called_with( + self.volume_types_mock.create.assert_called_with( self.new_volume_type.name, description=self.new_volume_type.description, is_public=True, @@ -97,22 +96,23 @@ def test_type_create_public(self): def test_type_create_private(self): arglist = [ - "--description", self.new_volume_type.description, + "--description", + self.new_volume_type.description, "--private", - "--project", self.project.id, + "--project", + self.project.id, self.new_volume_type.name, ] verifylist = [ ("description", self.new_volume_type.description), - ("public", False), - ("private", True), + ("is_public", False), ("project", self.project.id), ("name", self.new_volume_type.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.create.assert_called_with( + self.volume_types_mock.create.assert_called_with( self.new_volume_type.name, description=self.new_volume_type.description, is_public=False, @@ -121,20 +121,66 @@ def test_type_create_private(self): self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) - def test_public_type_create_with_project(self): + def test_type_create_with_properties(self): arglist = [ - '--project', self.project.id, + '--property', + 'myprop=myvalue', + # this combination isn't viable server-side but is okay for testing + '--multiattach', + '--cacheable', + '--replicated', + '--availability-zone', + 'az1', self.new_volume_type.name, ] verifylist = [ + ('properties', {'myprop': 'myvalue'}), + ('multiattach', True), + ('cacheable', True), + ('replicated', True), + ('availability_zones', ['az1']), + ('name', self.new_volume_type.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.create.assert_called_with( + self.new_volume_type.name, description=None + ) + self.new_volume_type.set_keys.assert_called_once_with( + { + 'myprop': 'myvalue', + 'multiattach': ' True', + 'cacheable': ' True', + 'replication_enabled': ' True', + 'RESKEY:availability_zones': 'az1', + } + ) + + self.columns += ('properties',) + self.data += (format_columns.DictColumn(None),) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_public_type_create_with_project_public(self): + arglist = [ + '--project', + self.project.id, + self.new_volume_type.name, + ] + verifylist = [ + ('is_public', None), ('project', self.project.id), ('name', self.new_volume_type.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.assertRaises(exceptions.CommandError, - self.cmd.take_action, - parsed_args) + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) def test_type_create_with_encryption(self): encryption_info = { @@ -149,8 +195,8 @@ def test_type_create_with_encryption(self): self.new_volume_type = volume_fakes.create_one_volume_type( attrs={'encryption': encryption_info}, ) - self.types_mock.create.return_value = self.new_volume_type - self.encryption_types_mock.create.return_value = encryption_type + self.volume_types_mock.create.return_value = self.new_volume_type + self.volume_encryption_types_mock.create.return_value = encryption_type encryption_columns = ( 'description', 'encryption', @@ -166,10 +212,14 @@ def test_type_create_with_encryption(self): self.new_volume_type.name, ) arglist = [ - '--encryption-provider', 'LuksEncryptor', - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-key-size', '128', - '--encryption-control-location', 'front-end', + '--encryption-provider', + 'LuksEncryptor', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-key-size', + '128', + '--encryption-control-location', + 'front-end', self.new_volume_type.name, ] verifylist = [ @@ -182,7 +232,7 @@ def test_type_create_with_encryption(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.create.assert_called_with( + self.volume_types_mock.create.assert_called_with( self.new_volume_type.name, description=None, ) @@ -192,7 +242,7 @@ def test_type_create_with_encryption(self): 'key_size': 128, 'control_location': 'front-end', } - self.encryption_types_mock.create.assert_called_with( + self.volume_encryption_types_mock.create.assert_called_with( self.new_volume_type, body, ) @@ -201,32 +251,27 @@ def test_type_create_with_encryption(self): class TestTypeDelete(TestType): - volume_types = volume_fakes.create_volume_types(count=2) def setUp(self): super().setUp() - self.types_mock.get = volume_fakes.get_volume_types( + self.volume_types_mock.get = volume_fakes.get_volume_types( self.volume_types, ) - self.types_mock.delete.return_value = None + self.volume_types_mock.delete.return_value = None # Get the command object to mock self.cmd = volume_type.DeleteVolumeType(self.app, None) def test_type_delete(self): - arglist = [ - self.volume_types[0].id - ] - verifylist = [ - ("volume_types", [self.volume_types[0].id]) - ] + arglist = [self.volume_types[0].id] + verifylist = [("volume_types", [self.volume_types[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.types_mock.delete.assert_called_with(self.volume_types[0]) + self.volume_types_mock.delete.assert_called_with(self.volume_types[0]) self.assertIsNone(result) def test_delete_multiple_types(self): @@ -243,7 +288,7 @@ def test_delete_multiple_types(self): calls = [] for t in self.volume_types: calls.append(call(t)) - self.types_mock.delete.assert_has_calls(calls) + self.volume_types_mock.delete.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_types_with_exception(self): @@ -258,26 +303,28 @@ def test_delete_multiple_types_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.volume_types[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 volume types failed to delete.', - str(e)) + self.assertEqual( + '1 of 2 volume types failed to delete.', str(e) + ) find_mock.assert_any_call( - self.types_mock, self.volume_types[0].id) - find_mock.assert_any_call(self.types_mock, 'unexist_type') + self.volume_types_mock, self.volume_types[0].id + ) + find_mock.assert_any_call(self.volume_types_mock, 'unexist_type') self.assertEqual(2, find_mock.call_count) - self.types_mock.delete.assert_called_once_with( + self.volume_types_mock.delete.assert_called_once_with( self.volume_types[0] ) class TestTypeList(TestType): - volume_types = volume_fakes.create_volume_types() columns = [ @@ -285,37 +332,33 @@ class TestTypeList(TestType): "Name", "Is Public", ] - columns_long = columns + [ - "Description", - "Properties" - ] - data_with_default_type = [( - volume_types[0].id, - volume_types[0].name, - True - )] + columns_long = columns + ["Description"] + data_with_default_type = [(volume_types[0].id, volume_types[0].name, True)] data = [] for t in volume_types: - data.append(( - t.id, - t.name, - t.is_public, - )) + data.append( + ( + t.id, + t.name, + t.is_public, + ) + ) data_long = [] for t in volume_types: - data_long.append(( - t.id, - t.name, - t.is_public, - t.description, - format_columns.DictColumn(t.extra_specs), - )) + data_long.append( + ( + t.id, + t.name, + t.is_public, + t.description, + ) + ) def setUp(self): super().setUp() - self.types_mock.list.return_value = self.volume_types - self.types_mock.default.return_value = self.volume_types[0] + self.volume_types_mock.list.return_value = self.volume_types + self.volume_types_mock.default.return_value = self.volume_types[0] # get the command to test self.cmd = volume_type.ListVolumeType(self.app, None) @@ -323,14 +366,13 @@ def test_type_list_without_options(self): arglist = [] verifylist = [ ("long", False), - ("private", False), - ("public", False), + ("is_public", None), ("default", False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.list.assert_called_once_with(is_public=None) + self.volume_types_mock.list.assert_called_once_with(is_public=None) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -341,14 +383,13 @@ def test_type_list_with_options(self): ] verifylist = [ ("long", True), - ("private", False), - ("public", True), + ("is_public", True), ("default", False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.list.assert_called_once_with(is_public=True) + self.volume_types_mock.list.assert_called_once_with(is_public=True) self.assertEqual(self.columns_long, columns) self.assertCountEqual(self.data_long, list(data)) @@ -358,14 +399,13 @@ def test_type_list_with_private_option(self): ] verifylist = [ ("long", False), - ("private", True), - ("public", False), + ("is_public", False), ("default", False), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.list.assert_called_once_with(is_public=False) + self.volume_types_mock.list.assert_called_once_with(is_public=False) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, list(data)) @@ -376,14 +416,13 @@ def test_type_list_with_default_option(self): verifylist = [ ("encryption_type", False), ("long", False), - ("private", False), - ("public", False), + ("is_public", None), ("default", True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.default.assert_called_once_with() + self.volume_types_mock.default.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertCountEqual(self.data_with_default_type, list(data)) @@ -401,23 +440,27 @@ def test_type_list_with_encryption(self): "Encryption", ] encryption_data = [] - encryption_data.append(( - self.volume_types[0].id, - self.volume_types[0].name, - self.volume_types[0].is_public, - volume_type.EncryptionInfoColumn( + encryption_data.append( + ( self.volume_types[0].id, - {self.volume_types[0].id: encryption_info}), - )) - encryption_data.append(( - self.volume_types[1].id, - self.volume_types[1].name, - self.volume_types[1].is_public, - volume_type.EncryptionInfoColumn( - self.volume_types[1].id, {}), - )) - - self.encryption_types_mock.list.return_value = [encryption_type] + self.volume_types[0].name, + self.volume_types[0].is_public, + volume_type.EncryptionInfoColumn( + self.volume_types[0].id, + {self.volume_types[0].id: encryption_info}, + ), + ) + ) + encryption_data.append( + ( + self.volume_types[1].id, + self.volume_types[1].name, + self.volume_types[1].is_public, + volume_type.EncryptionInfoColumn(self.volume_types[1].id, {}), + ) + ) + + self.volume_encryption_types_mock.list.return_value = [encryption_type] arglist = [ "--encryption-type", ] @@ -427,104 +470,105 @@ def test_type_list_with_encryption(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.encryption_types_mock.list.assert_called_once_with() - self.types_mock.list.assert_called_once_with(is_public=None) + self.volume_encryption_types_mock.list.assert_called_once_with() + self.volume_types_mock.list.assert_called_once_with(is_public=None) self.assertEqual(encryption_columns, columns) self.assertCountEqual(encryption_data, list(data)) class TestTypeSet(TestType): - - project = identity_fakes.FakeProject.create_one_project() - volume_type = volume_fakes.create_one_volume_type( - methods={'set_keys': None}, - ) - def setUp(self): super().setUp() - self.types_mock.get.return_value = self.volume_type - - # Return a project + self.project = identity_fakes.FakeProject.create_one_project() self.projects_mock.get.return_value = self.project - self.encryption_types_mock.create.return_value = None - self.encryption_types_mock.update.return_value = None - # Get the command object to test - self.cmd = volume_type.SetVolumeType(self.app, None) - def test_type_set_name(self): - new_name = 'new_name' - arglist = [ - '--name', new_name, - self.volume_type.id, - ] - verifylist = [ - ('name', new_name), - ('description', None), - ('property', None), - ('volume_type', self.volume_type.id), - ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) - - result = self.cmd.take_action(parsed_args) - - # Set expected values - kwargs = { - 'name': new_name, - } - self.types_mock.update.assert_called_with( - self.volume_type.id, - **kwargs + self.volume_type = volume_fakes.create_one_volume_type( + methods={'set_keys': None}, ) - self.assertIsNone(result) + self.volume_types_mock.get.return_value = self.volume_type + self.volume_encryption_types_mock.create.return_value = None + self.volume_encryption_types_mock.update.return_value = None + + self.cmd = volume_type.SetVolumeType(self.app, None) - def test_type_set_description(self): - new_desc = 'new_desc' + def test_type_set(self): arglist = [ - '--description', new_desc, + '--name', + 'new_name', + '--description', + 'new_description', + '--private', self.volume_type.id, ] verifylist = [ - ('name', None), - ('description', new_desc), - ('property', None), + ('name', 'new_name'), + ('description', 'new_description'), + ('properties', None), ('volume_type', self.volume_type.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - # Set expected values kwargs = { - 'description': new_desc, + 'name': 'new_name', + 'description': 'new_description', + 'is_public': False, } - self.types_mock.update.assert_called_with( - self.volume_type.id, - **kwargs + self.volume_types_mock.update.assert_called_with( + self.volume_type.id, **kwargs ) self.assertIsNone(result) + self.volume_type_access_mock.add_project_access.assert_not_called() + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() + def test_type_set_property(self): arglist = [ - '--property', 'myprop=myvalue', + '--property', + 'myprop=myvalue', + # this combination isn't viable server-side but is okay for testing + '--multiattach', + '--cacheable', + '--replicated', + '--availability-zone', + 'az1', self.volume_type.id, ] verifylist = [ ('name', None), ('description', None), - ('property', {'myprop': 'myvalue'}), + ('properties', {'myprop': 'myvalue'}), + ('multiattach', True), + ('cacheable', True), + ('replicated', True), + ('availability_zones', ['az1']), ('volume_type', self.volume_type.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.volume_type.set_keys.assert_called_once_with( - {'myprop': 'myvalue'}) self.assertIsNone(result) - def test_type_set_not_called_without_project_argument(self): + self.volume_type.set_keys.assert_called_once_with( + { + 'myprop': 'myvalue', + 'multiattach': ' True', + 'cacheable': ' True', + 'replication_enabled': ' True', + 'RESKEY:availability_zones': 'az1', + } + ) + self.volume_type_access_mock.add_project_access.assert_not_called() + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() + + def test_type_set_with_empty_project(self): arglist = [ - '--project', '', + '--project', + '', self.volume_type.id, ] verifylist = [ @@ -537,25 +581,15 @@ def test_type_set_not_called_without_project_argument(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.assertFalse(self.types_access_mock.add_project_access.called) + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_not_called() + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() - def test_type_set_failed_with_missing_volume_type_argument(self): + def test_type_set_with_project(self): arglist = [ - '--project', 'identity_fakes.project_id', - ] - verifylist = [ - ('project', 'identity_fakes.project_id'), - ] - - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) - - def test_type_set_project_access(self): - arglist = [ - '--project', self.project.id, + '--project', + self.project.id, self.volume_type.id, ] verifylist = [ @@ -567,19 +601,27 @@ def test_type_set_project_access(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.types_access_mock.add_project_access.assert_called_with( + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_called_with( self.volume_type.id, self.project.id, ) + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() - def test_type_set_new_encryption(self): - self.encryption_types_mock.update.side_effect = ( - exceptions.NotFound('NotFound')) + def test_type_set_with_new_encryption(self): + self.volume_encryption_types_mock.update.side_effect = ( + exceptions.NotFound('NotFound') + ) arglist = [ - '--encryption-provider', 'LuksEncryptor', - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-key-size', '128', - '--encryption-control-location', 'front-end', + '--encryption-provider', + 'LuksEncryptor', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-key-size', + '128', + '--encryption-control-location', + 'front-end', self.volume_type.id, ] verifylist = [ @@ -592,30 +634,33 @@ def test_type_set_new_encryption(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + body = { 'provider': 'LuksEncryptor', 'cipher': 'aes-xts-plain64', 'key_size': 128, 'control_location': 'front-end', } - self.encryption_types_mock.update.assert_called_with( + self.volume_encryption_types_mock.update.assert_called_with( self.volume_type, body, ) - self.encryption_types_mock.create.assert_called_with( + self.volume_encryption_types_mock.create.assert_called_with( self.volume_type, body, ) - self.assertIsNone(result) @mock.patch.object(utils, 'find_resource') - def test_type_set_existing_encryption(self, mock_find): - mock_find.side_effect = [self.volume_type, - "existing_encryption_type"] + def test_type_set_with_existing_encryption(self, mock_find): + mock_find.side_effect = [self.volume_type, "existing_encryption_type"] arglist = [ - '--encryption-provider', 'LuksEncryptor', - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-control-location', 'front-end', + '--encryption-provider', + 'LuksEncryptor', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-control-location', + 'front-end', self.volume_type.id, ] verifylist = [ @@ -627,25 +672,32 @@ def test_type_set_existing_encryption(self, mock_find): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_not_called() body = { 'provider': 'LuksEncryptor', 'cipher': 'aes-xts-plain64', 'control_location': 'front-end', } - self.encryption_types_mock.update.assert_called_with( + self.volume_encryption_types_mock.update.assert_called_with( self.volume_type, body, ) - self.encryption_types_mock.create.assert_not_called() - self.assertIsNone(result) + self.volume_encryption_types_mock.create.assert_not_called() def test_type_set_new_encryption_without_provider(self): - self.encryption_types_mock.update.side_effect = ( - exceptions.NotFound('NotFound')) + self.volume_encryption_types_mock.update.side_effect = ( + exceptions.NotFound('NotFound') + ) arglist = [ - '--encryption-cipher', 'aes-xts-plain64', - '--encryption-key-size', '128', - '--encryption-control-location', 'front-end', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-key-size', + '128', + '--encryption-control-location', + 'front-end', self.volume_type.id, ] verifylist = [ @@ -655,27 +707,32 @@ def test_type_set_new_encryption_without_provider(self): ('volume_type', self.volume_type.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - try: - self.cmd.take_action(parsed_args) - self.fail('CommandError should be raised.') - except exceptions.CommandError as e: - self.assertEqual("Command Failed: One or more of" - " the operations failed", - str(e)) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual( + "Command Failed: One or more of the operations failed", + str(exc), + ) + + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_not_called() body = { 'cipher': 'aes-xts-plain64', 'key_size': 128, 'control_location': 'front-end', } - self.encryption_types_mock.update.assert_called_with( + self.volume_encryption_types_mock.update.assert_called_with( self.volume_type, body, ) - self.encryption_types_mock.create.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() class TestTypeShow(TestType): - columns = ( 'access_project_ids', 'description', @@ -695,37 +752,31 @@ def setUp(self): self.volume_type.id, True, self.volume_type.name, - format_columns.DictColumn(self.volume_type.extra_specs) + format_columns.DictColumn(self.volume_type.extra_specs), ) - self.types_mock.get.return_value = self.volume_type + self.volume_types_mock.get.return_value = self.volume_type # Get the command object to test self.cmd = volume_type.ShowVolumeType(self.app, None) def test_type_show(self): - arglist = [ - self.volume_type.id - ] + arglist = [self.volume_type.id] verifylist = [ ("encryption_type", False), - ("volume_type", self.volume_type.id) + ("volume_type", self.volume_type.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_with(self.volume_type.id) + self.volume_types_mock.get.assert_called_with(self.volume_type.id) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_type_show_with_access(self): - arglist = [ - self.volume_type.id - ] - verifylist = [ - ("volume_type", self.volume_type.id) - ] + arglist = [self.volume_type.id] + verifylist = [("volume_type", self.volume_type.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) private_type = volume_fakes.create_one_volume_type( @@ -733,20 +784,22 @@ def test_type_show_with_access(self): ) type_access_list = volume_fakes.create_one_type_access() with mock.patch.object( - self.types_mock, + self.volume_types_mock, 'get', return_value=private_type, ): with mock.patch.object( - self.types_access_mock, + self.volume_type_access_mock, 'list', return_value=[type_access_list], ): columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_once_with( - self.volume_type.id) - self.types_access_mock.list.assert_called_once_with( - private_type.id) + self.volume_types_mock.get.assert_called_once_with( + self.volume_type.id + ) + self.volume_type_access_mock.list.assert_called_once_with( + private_type.id + ) self.assertEqual(self.columns, columns) private_type_data = ( @@ -755,31 +808,31 @@ def test_type_show_with_access(self): private_type.id, private_type.is_public, private_type.name, - format_columns.DictColumn(private_type.extra_specs) + format_columns.DictColumn(private_type.extra_specs), ) self.assertCountEqual(private_type_data, data) def test_type_show_with_list_access_exec(self): - arglist = [ - self.volume_type.id - ] - verifylist = [ - ("volume_type", self.volume_type.id) - ] + arglist = [self.volume_type.id] + verifylist = [("volume_type", self.volume_type.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) private_type = volume_fakes.create_one_volume_type( attrs={'is_public': False}, ) - with mock.patch.object(self.types_mock, 'get', - return_value=private_type): - with mock.patch.object(self.types_access_mock, 'list', - side_effect=Exception()): + with mock.patch.object( + self.volume_types_mock, 'get', return_value=private_type + ): + with mock.patch.object( + self.volume_type_access_mock, 'list', side_effect=Exception() + ): columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_once_with( - self.volume_type.id) - self.types_access_mock.list.assert_called_once_with( - private_type.id) + self.volume_types_mock.get.assert_called_once_with( + self.volume_type.id + ) + self.volume_type_access_mock.list.assert_called_once_with( + private_type.id + ) self.assertEqual(self.columns, columns) private_type_data = ( @@ -788,7 +841,7 @@ def test_type_show_with_list_access_exec(self): private_type.id, private_type.is_public, private_type.name, - format_columns.DictColumn(private_type.extra_specs) + format_columns.DictColumn(private_type.extra_specs), ) self.assertCountEqual(private_type_data, data) @@ -803,8 +856,8 @@ def test_type_show_with_encryption(self): self.volume_type = volume_fakes.create_one_volume_type( attrs={'encryption': encryption_info}, ) - self.types_mock.get.return_value = self.volume_type - self.encryption_types_mock.get.return_value = encryption_type + self.volume_types_mock.get.return_value = self.volume_type + self.volume_encryption_types_mock.get.return_value = encryption_type encryption_columns = ( 'access_project_ids', 'description', @@ -821,27 +874,25 @@ def test_type_show_with_encryption(self): self.volume_type.id, True, self.volume_type.name, - format_columns.DictColumn(self.volume_type.extra_specs) + format_columns.DictColumn(self.volume_type.extra_specs), ) - arglist = [ - '--encryption-type', - self.volume_type.id - ] + arglist = ['--encryption-type', self.volume_type.id] verifylist = [ ('encryption_type', True), - ("volume_type", self.volume_type.id) + ("volume_type", self.volume_type.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) - self.types_mock.get.assert_called_with(self.volume_type.id) - self.encryption_types_mock.get.assert_called_with(self.volume_type.id) + self.volume_types_mock.get.assert_called_with(self.volume_type.id) + self.volume_encryption_types_mock.get.assert_called_with( + self.volume_type.id + ) self.assertEqual(encryption_columns, columns) self.assertCountEqual(encryption_data, data) class TestTypeUnset(TestType): - project = identity_fakes.FakeProject.create_one_project() volume_type = volume_fakes.create_one_volume_type( methods={'unset_keys': None}, @@ -850,7 +901,7 @@ class TestTypeUnset(TestType): def setUp(self): super().setUp() - self.types_mock.get.return_value = self.volume_type + self.volume_types_mock.get.return_value = self.volume_type # Return a project self.projects_mock.get.return_value = self.project @@ -860,12 +911,14 @@ def setUp(self): def test_type_unset(self): arglist = [ - '--property', 'property', - '--property', 'multi_property', + '--property', + 'property', + '--property', + 'multi_property', self.volume_type.id, ] verifylist = [ - ('property', ['property', 'multi_property']), + ('properties', ['property', 'multi_property']), ('volume_type', self.volume_type.id), ] @@ -873,12 +926,14 @@ def test_type_unset(self): result = self.cmd.take_action(parsed_args) self.volume_type.unset_keys.assert_called_once_with( - ['property', 'multi_property']) + ['property', 'multi_property'] + ) self.assertIsNone(result) def test_type_unset_project_access(self): arglist = [ - '--project', self.project.id, + '--project', + self.project.id, self.volume_type.id, ] verifylist = [ @@ -890,14 +945,15 @@ def test_type_unset_project_access(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.types_access_mock.remove_project_access.assert_called_with( + self.volume_type_access_mock.remove_project_access.assert_called_with( self.volume_type.id, self.project.id, ) def test_type_unset_not_called_without_project_argument(self): arglist = [ - '--project', '', + '--project', + '', self.volume_type.id, ] verifylist = [ @@ -910,22 +966,27 @@ def test_type_unset_not_called_without_project_argument(self): result = self.cmd.take_action(parsed_args) self.assertIsNone(result) - self.encryption_types_mock.delete.assert_not_called() - self.assertFalse(self.types_access_mock.remove_project_access.called) + self.volume_encryption_types_mock.delete.assert_not_called() + self.assertFalse( + self.volume_type_access_mock.remove_project_access.called + ) def test_type_unset_failed_with_missing_volume_type_argument(self): arglist = [ - '--project', 'identity_fakes.project_id', + '--project', + 'identity_fakes.project_id', ] verifylist = [ ('project', 'identity_fakes.project_id'), ] - self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) def test_type_unset_encryption_type(self): arglist = [ @@ -939,12 +1000,13 @@ def test_type_unset_encryption_type(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) - self.encryption_types_mock.delete.assert_called_with(self.volume_type) + self.volume_encryption_types_mock.delete.assert_called_with( + self.volume_type + ) self.assertIsNone(result) class TestColumns(TestType): - def test_encryption_info_column_with_info(self): fake_volume_type = volume_fakes.create_one_volume_type() type_id = fake_volume_type.id @@ -955,10 +1017,12 @@ def test_encryption_info_column_with_info(self): 'key_size': None, 'control_location': 'front-end', } - col = volume_type.EncryptionInfoColumn(type_id, - {type_id: encryption_info}) - self.assertEqual(utils.format_dict(encryption_info), - col.human_readable()) + col = volume_type.EncryptionInfoColumn( + type_id, {type_id: encryption_info} + ) + self.assertEqual( + utils.format_dict(encryption_info), col.human_readable() + ) self.assertEqual(encryption_info, col.machine_readable()) def test_encryption_info_column_without_info(self): diff --git a/openstackclient/tests/unit/volume/v3/fakes.py b/openstackclient/tests/unit/volume/v3/fakes.py index 623835804d..eb5c170f63 100644 --- a/openstackclient/tests/unit/volume/v3/fakes.py +++ b/openstackclient/tests/unit/volume/v3/fakes.py @@ -10,13 +10,24 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import random +import re from unittest import mock import uuid from cinderclient import api_versions +from keystoneauth1 import discover +from openstack.block_storage.v3 import _proxy +from openstack.block_storage.v3 import availability_zone as _availability_zone +from openstack.block_storage.v3 import backup as _backup +from openstack.block_storage.v3 import extension as _extension +from openstack.block_storage.v3 import limits as _limits +from openstack.block_storage.v3 import resource_filter as _filters +from openstack.block_storage.v3 import volume as _volume +from openstack.compute.v2 import _proxy as _compute_proxy +from openstack.image.v2 import _proxy as _image_proxy -from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes from openstackclient.tests.unit import fakes from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes from openstackclient.tests.unit import utils @@ -31,6 +42,12 @@ def __init__(self, **kwargs): self.attachments = mock.Mock() self.attachments.resource_class = fakes.FakeResource(None, {}) + self.availability_zones = mock.Mock() + self.availability_zones.resource_class = fakes.FakeResource(None, {}) + self.backups = mock.Mock() + self.backups.resource_class = fakes.FakeResource(None, {}) + self.consistencygroups = mock.Mock() + self.consistencygroups.resource_class = fakes.FakeResource(None, {}) self.clusters = mock.Mock() self.clusters.resource_class = fakes.FakeResource(None, {}) self.groups = mock.Mock() @@ -43,35 +60,285 @@ def __init__(self, **kwargs): self.messages.resource_class = fakes.FakeResource(None, {}) self.resource_filters = mock.Mock() self.resource_filters.resource_class = fakes.FakeResource(None, {}) - self.volumes = mock.Mock() - self.volumes.resource_class = fakes.FakeResource(None, {}) + self.restores = mock.Mock() + self.restores.resource_class = fakes.FakeResource(None, {}) + self.transfers = mock.Mock() + self.transfers.resource_class = fakes.FakeResource(None, {}) + self.volume_encryption_types = mock.Mock() + self.volume_encryption_types.resource_class = fakes.FakeResource( + None, {} + ) + self.volume_snapshots = mock.Mock() + self.volume_snapshots.resource_class = fakes.FakeResource(None, {}) + self.volume_type_access = mock.Mock() + self.volume_type_access.resource_class = fakes.FakeResource(None, {}) self.volume_types = mock.Mock() self.volume_types.resource_class = fakes.FakeResource(None, {}) + self.volumes = mock.Mock() + self.volumes.resource_class = fakes.FakeResource(None, {}) self.services = mock.Mock() self.services.resource_class = fakes.FakeResource(None, {}) self.workers = mock.Mock() self.workers.resource_class = fakes.FakeResource(None, {}) -class TestVolume(utils.TestCommand): +class FakeClientMixin: def setUp(self): super().setUp() self.app.client_manager.volume = FakeVolumeClient( endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN ) - self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client( - endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN + self.volume_client = self.app.client_manager.volume + + # TODO(stephenfin): Rename to 'volume_client' once all commands are + # migrated to SDK + self.app.client_manager.sdk_connection.volume = mock.Mock( + spec=_proxy.Proxy, ) - self.app.client_manager.compute = compute_fakes.FakeComputev2Client( - endpoint=fakes.AUTH_URL, - token=fakes.AUTH_TOKEN, + self.volume_sdk_client = self.app.client_manager.sdk_connection.volume + self.set_volume_api_version() # default to the lowest + + def set_volume_api_version(self, version: str = '3.0'): + """Set a fake block storage API version. + + :param version: The fake microversion to "support". This should be a + string of format '3.xx'. + :returns: None + """ + assert re.match(r'3.\d+', version) + + self.volume_client.api_version = api_versions.APIVersion(version) + + self.volume_sdk_client.default_microversion = version + self.volume_sdk_client.get_endpoint_data.return_value = ( + discover.EndpointData( + min_microversion='3.0', # cinder has not bumped this yet + max_microversion=version, + ) ) +class TestVolume( + identity_fakes.FakeClientMixin, + FakeClientMixin, + utils.TestCommand, +): + def setUp(self): + super().setUp() + + # avoid circular imports by defining this manually rather than using + # openstackclient.tests.unit.compute.v2.fakes.FakeClientMixin + self.app.client_manager.compute = mock.Mock(_compute_proxy.Proxy) + self.compute_client = self.app.client_manager.compute + + # avoid circular imports by defining this manually rather than using + # openstackclient.tests.unit.image.v2.fakes.FakeClientMixin + self.app.client_manager.image = mock.Mock(spec=_image_proxy.Proxy) + self.image_client = self.app.client_manager.image + + # TODO(stephenfin): Check if the responses are actually the same -create_one_volume = volume_v2_fakes.create_one_volume -create_one_volume_type = volume_v2_fakes.create_one_volume_type +create_one_snapshot = volume_v2_fakes.create_one_snapshot +create_one_service = volume_v2_fakes.create_one_service + + +def create_one_availability_zone(attrs=None): + """Create a fake AZ. + + :param dict attrs: A dictionary with all attributes + :return: A fake + openstack.block_storage.v3.availability_zone.AvailabilityZone object + """ + attrs = attrs or {} + + # Set default attributes. + availability_zone_info = { + 'name': uuid.uuid4().hex, + 'state': {'available': True}, + } + + # Overwrite default attributes. + availability_zone_info.update(attrs) + + availability_zone = _availability_zone.AvailabilityZone( + **availability_zone_info + ) + return availability_zone + + +def create_availability_zones(attrs=None, count=2): + """Create multiple fake AZs. + + :param dict attrs: A dictionary with all attributes + :param int count: The number of availability zones to fake + :return: A list of fake + openstack.block_storage.v3.availability_zone.AvailabilityZone objects + """ + availability_zones = [] + for i in range(0, count): + availability_zone = create_one_availability_zone(attrs) + availability_zones.append(availability_zone) + + return availability_zones + + +def create_one_consistency_group(attrs=None): + """Create a fake consistency group. + + :param dict attrs: + A dictionary with all attributes + :return: + A FakeResource object with id, name, description, etc. + """ + attrs = attrs or {} + + # Set default attributes. + consistency_group_info = { + "id": 'backup-id-' + uuid.uuid4().hex, + "name": 'backup-name-' + uuid.uuid4().hex, + "description": 'description-' + uuid.uuid4().hex, + "status": "error", + "availability_zone": 'zone' + uuid.uuid4().hex, + "created_at": 'time-' + uuid.uuid4().hex, + "volume_types": ['volume-type1'], + } + + # Overwrite default attributes. + consistency_group_info.update(attrs) + + consistency_group = fakes.FakeResource( + info=copy.deepcopy(consistency_group_info), loaded=True + ) + return consistency_group + + +def create_consistency_groups(attrs=None, count=2): + """Create multiple fake consistency groups. + + :param dict attrs: + A dictionary with all attributes + :param int count: + The number of consistency groups to fake + :return: + A list of FakeResource objects faking the consistency groups + """ + consistency_groups = [] + for i in range(0, count): + consistency_group = create_one_consistency_group(attrs) + consistency_groups.append(consistency_group) + + return consistency_groups + + +def create_one_extension(attrs=None): + """Create a fake extension. + + :param dict attrs: A dictionary with all attributes + :return: A fake + openstack.block_storage.v3.extension.Extension object + """ + attrs = attrs or {} + + # Set default attributes. + extension_info = { + 'alias': 'OS-SCH-HNT', + 'description': 'description-' + uuid.uuid4().hex, + 'links': [ + { + "href": "https://github.com/openstack/block-api", + "type": "text/html", + "rel": "describedby", + } + ], + 'name': 'name-' + uuid.uuid4().hex, + 'updated_at': '2013-04-18T00:00:00+00:00', + } + + # Overwrite default attributes. + extension_info.update(attrs) + + extension = _extension.Extension(**extension_info) + return extension + + +def create_one_backup(attrs=None): + """Create a fake backup. + + :param dict attrs: + A dictionary with all attributes + :return: A fake + openstack.block_storage.v3.backup.Backup object + """ + attrs = attrs or {} + + # Set default attributes. + backup_info = { + "availability_zone": 'zone' + uuid.uuid4().hex, + "container": 'container-' + uuid.uuid4().hex, + "created_at": 'time-' + uuid.uuid4().hex, + "data_timestamp": 'time-' + uuid.uuid4().hex, + "description": 'description-' + uuid.uuid4().hex, + "encryption_key_id": None, + "fail_reason": "Service not found for creating backup.", + "has_dependent_backups": False, + "id": 'backup-id-' + uuid.uuid4().hex, + "is_incremental": False, + "metadata": {}, + "name": 'backup-name-' + uuid.uuid4().hex, + "object_count": None, + "project_id": uuid.uuid4().hex, + "size": random.randint(1, 20), + "snapshot_id": 'snapshot-id' + uuid.uuid4().hex, + "status": "error", + "updated_at": 'time-' + uuid.uuid4().hex, + "user_id": uuid.uuid4().hex, + "volume_id": 'volume-id-' + uuid.uuid4().hex, + } + + # Overwrite default attributes. + backup_info.update(attrs) + + backup = _backup.Backup(**backup_info) + return backup + + +def create_backups(attrs=None, count=2): + """Create multiple fake backups. + + :param dict attrs: + A dictionary with all attributes + :param int count: + The number of backups to fake + :return: A list of fake + openstack.block_storage.v3.backup.Backup objects + """ + backups = [] + for i in range(0, count): + backup = create_one_backup(attrs) + backups.append(backup) + + return backups + + +def get_backups(backups=None, count=2): + """Get an iterable MagicMock object with a list of faked backups. + + If backups list is provided, then initialize the Mock object with the + list. Otherwise create one. + + :param List backups: + A list of FakeResource objects faking backups + :param Integer count: + The number of backups to be faked + :return + An iterable Mock object with side_effect set to a list of faked + backups + """ + if backups is None: + backups = create_backups(count) + + return mock.Mock(side_effect=backups) def create_one_cluster(attrs=None): @@ -119,6 +386,86 @@ def create_clusters(attrs=None, count=2): return clusters +def create_one_encryption_volume_type(attrs=None): + """Create a fake encryption volume type. + + :param dict attrs: + A dictionary with all attributes + :return: + A FakeResource object with volume_type_id etc. + """ + attrs = attrs or {} + + # Set default attributes. + encryption_info = { + "volume_type_id": 'type-id-' + uuid.uuid4().hex, + 'provider': 'LuksEncryptor', + 'cipher': None, + 'key_size': None, + 'control_location': 'front-end', + } + + # Overwrite default attributes. + encryption_info.update(attrs) + + encryption_type = fakes.FakeResource( + info=copy.deepcopy(encryption_info), loaded=True + ) + return encryption_type + + +def create_limits(attrs=None): + """Create a fake limits object.""" + attrs = attrs or {} + + limits_attrs = { + 'absolute': { + 'totalSnapshotsUsed': 1, + 'maxTotalBackups': 10, + 'maxTotalVolumeGigabytes': 1000, + 'maxTotalSnapshots': 10, + 'maxTotalBackupGigabytes': 1000, + 'totalBackupGigabytesUsed': 0, + 'maxTotalVolumes': 10, + 'totalVolumesUsed': 4, + 'totalBackupsUsed': 0, + 'totalGigabytesUsed': 35, + }, + 'rate': [ + { + "uri": "*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z", + }, + { + "value": 10, + "verb": "PUT", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z", + }, + { + "value": 100, + "verb": "DELETE", + "remaining": 100, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z", + }, + ], + } + ], + } + limits_attrs.update(attrs) + + limits = _limits.Limit(**limits_attrs) + return limits + + def create_one_resource_filter(attrs=None): """Create a fake resource filter. @@ -143,7 +490,7 @@ def create_one_resource_filter(attrs=None): # Overwrite default attributes if there are some attributes set resource_filter_info.update(attrs) - return fakes.FakeResource(None, resource_filter_info, loaded=True) + return _filters.ResourceFilter(**resource_filter_info) def create_resource_filters(attrs=None, count=2): @@ -160,6 +507,228 @@ def create_resource_filters(attrs=None, count=2): return resource_filters +def create_one_transfer(attrs=None): + """Create a fake transfer. + + :param dict attrs: + A dictionary with all attributes of Transfer Request + :return: + A FakeResource object with volume_id, name, id. + """ + # Set default attribute + transfer_info = { + 'volume_id': 'volume-id-' + uuid.uuid4().hex, + 'name': 'fake_transfer_name', + 'id': 'id-' + uuid.uuid4().hex, + 'links': 'links-' + uuid.uuid4().hex, + } + + # Overwrite default attributes if there are some attributes set + attrs = attrs or {} + + transfer_info.update(attrs) + + transfer = fakes.FakeResource(None, transfer_info, loaded=True) + + return transfer + + +def create_transfers(attrs=None, count=2): + """Create multiple fake transfers. + + :param dict attrs: + A dictionary with all attributes of transfer + :param Integer count: + The number of transfers to be faked + :return: + A list of FakeResource objects + """ + transfers = [] + for n in range(0, count): + transfers.append(create_one_transfer(attrs)) + + return transfers + + +def get_transfers(transfers=None, count=2): + """Get an iterable MagicMock object with a list of faked transfers. + + If transfers list is provided, then initialize the Mock object with the + list. Otherwise create one. + + :param List transfers: + A list of FakeResource objects faking transfers + :param Integer count: + The number of transfers to be faked + :return + An iterable Mock object with side_effect set to a list of faked + transfers + """ + if transfers is None: + transfers = create_transfers(count) + + return mock.Mock(side_effect=transfers) + + +def create_one_type_access(attrs=None): + """Create a fake volume type access for project. + + :param dict attrs: + A dictionary with all attributes + :return: + A FakeResource object, with Volume_type_ID and Project_ID. + """ + if attrs is None: + attrs = {} + + # Set default attributes. + type_access_attrs = { + 'volume_type_id': 'volume-type-id-' + uuid.uuid4().hex, + 'project_id': 'project-id-' + uuid.uuid4().hex, + } + + # Overwrite default attributes. + type_access_attrs.update(attrs) + + type_access = fakes.FakeResource(None, type_access_attrs, loaded=True) + + return type_access + + +def create_one_volume(attrs=None): + """Create a fake volume. + + :param dict attrs: + A dictionary with all attributes of volume + :return: + A FakeResource object with id, name, status, etc. + """ + attrs = attrs or {} + + # Set default attribute + volume_info = { + 'id': 'volume-id' + uuid.uuid4().hex, + 'name': 'volume-name' + uuid.uuid4().hex, + 'description': 'description' + uuid.uuid4().hex, + 'status': random.choice(['available', 'in_use']), + 'size': random.randint(1, 20), + 'volume_type': random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']), + 'bootable': random.randint(0, 1), + 'metadata': { + 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, + 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, + 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, + }, + 'snapshot_id': random.randint(1, 5), + 'availability_zone': 'zone' + uuid.uuid4().hex, + 'attachments': [ + { + 'device': '/dev/' + uuid.uuid4().hex, + 'server_id': uuid.uuid4().hex, + }, + ], + } + + # Overwrite default attributes if there are some attributes set + volume_info.update(attrs) + + volume = fakes.FakeResource(None, volume_info, loaded=True) + return volume + + +def create_volumes(attrs=None, count=2): + """Create multiple fake volumes. + + :param dict attrs: + A dictionary with all attributes of volume + :param Integer count: + The number of volumes to be faked + :return: + A list of FakeResource objects + """ + volumes = [] + for n in range(0, count): + volumes.append(create_one_volume(attrs)) + + return volumes + + +def get_volumes(volumes=None, count=2): + """Get an iterable MagicMock object with a list of faked volumes. + + If volumes list is provided, then initialize the Mock object with the + list. Otherwise create one. + + :param List volumes: + A list of FakeResource objects faking volumes + :param Integer count: + The number of volumes to be faked + :return + An iterable Mock object with side_effect set to a list of faked + volumes + """ + if volumes is None: + volumes = create_volumes(count) + + return mock.Mock(side_effect=volumes) + + +def create_one_sdk_volume(attrs=None): + """Create a fake volume. + + :param dict attrs: + A dictionary with all attributes of volume + :return: + A FakeResource object with id, name, status, etc. + """ + attrs = attrs or {} + + # Set default attribute + volume_info = { + 'id': 'volume-id' + uuid.uuid4().hex, + 'name': 'volume-name' + uuid.uuid4().hex, + 'description': 'description' + uuid.uuid4().hex, + 'status': random.choice(['available', 'in_use']), + 'size': random.randint(1, 20), + 'volume_type': random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']), + 'bootable': random.choice(['true', 'false']), + 'metadata': { + 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, + 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, + 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex, + }, + 'snapshot_id': random.randint(1, 5), + 'availability_zone': 'zone' + uuid.uuid4().hex, + 'attachments': [ + { + 'device': '/dev/' + uuid.uuid4().hex, + 'server_id': uuid.uuid4().hex, + }, + ], + } + + # Overwrite default attributes if there are some attributes set + volume_info.update(attrs) + return _volume.Volume(**volume_info) + + +def create_sdk_volumes(attrs=None, count=2): + """Create multiple fake volumes. + + :param dict attrs: + A dictionary with all attributes of volume + :param Integer count: + The number of volumes to be faked + :return: + A list of FakeResource objects + """ + volumes = [] + for n in range(0, count): + volumes.append(create_one_sdk_volume(attrs)) + + return volumes + + def create_one_volume_group(attrs=None): """Create a fake group. @@ -442,6 +1011,75 @@ def get_volume_attachments(attachments=None, count=2): return mock.Mock(side_effect=attachments) +def create_one_volume_type(attrs=None, methods=None): + """Create a fake volume type. + + :param dict attrs: + A dictionary with all attributes + :param dict methods: + A dictionary with all methods + :return: + A FakeResource object with id, name, description, etc. + """ + attrs = attrs or {} + methods = methods or {} + + # Set default attributes. + volume_type_info = { + "id": 'type-id-' + uuid.uuid4().hex, + "name": 'type-name-' + uuid.uuid4().hex, + "description": 'type-description-' + uuid.uuid4().hex, + "extra_specs": {"foo": "bar"}, + "is_public": True, + } + + # Overwrite default attributes. + volume_type_info.update(attrs) + + volume_type = fakes.FakeResource( + info=copy.deepcopy(volume_type_info), methods=methods, loaded=True + ) + return volume_type + + +def create_volume_types(attrs=None, count=2): + """Create multiple fake volume_types. + + :param dict attrs: + A dictionary with all attributes + :param int count: + The number of types to fake + :return: + A list of FakeResource objects faking the types + """ + volume_types = [] + for i in range(0, count): + volume_type = create_one_volume_type(attrs) + volume_types.append(volume_type) + + return volume_types + + +def get_volume_types(volume_types=None, count=2): + """Get an iterable MagicMock object with a list of faked volume types. + + If volume_types list is provided, then initialize the Mock object with + the list. Otherwise create one. + + :param List volume_types: + A list of FakeResource objects faking volume types + :param Integer count: + The number of volume types to be faked + :return + An iterable Mock object with side_effect set to a list of faked + volume types + """ + if volume_types is None: + volume_types = create_volume_types(count) + + return mock.Mock(side_effect=volume_types) + + def create_service_log_level_entry(attrs=None): service_log_level_info = { 'host': 'host_test', @@ -455,7 +1093,8 @@ def create_service_log_level_entry(attrs=None): service_log_level_info.update(attrs) service_log_level = fakes.FakeResource( - None, service_log_level_info, loaded=True) + None, service_log_level_info, loaded=True + ) return service_log_level @@ -481,10 +1120,13 @@ def create_cleanup_records(): cleaning_records.append(cleaning_work_info) unavailable_records.append(unavailable_work_info) - cleaning = [fakes.FakeResource( - None, obj, loaded=True) for obj in cleaning_records] - unavailable = [fakes.FakeResource( - None, obj, loaded=True) for obj in unavailable_records] + cleaning = [ + fakes.FakeResource(None, obj, loaded=True) for obj in cleaning_records + ] + unavailable = [ + fakes.FakeResource(None, obj, loaded=True) + for obj in unavailable_records + ] return cleaning, unavailable @@ -513,7 +1155,8 @@ def create_volume_manage_list_records(count=2): volume_manage_list = [] for i in range(count): volume_manage_list.append( - create_one_manage_record({'size': str(i + 1)})) + create_one_manage_record({'size': str(i + 1)}) + ) return volume_manage_list @@ -522,6 +1165,7 @@ def create_snapshot_manage_list_records(count=2): snapshot_manage_list = [] for i in range(count): snapshot_manage_list.append( - create_one_manage_record({'size': str(i + 1)}, snapshot=True)) + create_one_manage_record({'size': str(i + 1)}, snapshot=True) + ) return snapshot_manage_list diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py index b48ce2f911..425b3875c9 100644 --- a/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py +++ b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py @@ -12,7 +12,6 @@ import uuid -from cinderclient import api_versions from osc_lib import exceptions from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes @@ -20,17 +19,15 @@ class TestBlockStorage(volume_fakes.TestVolume): - def setUp(self): super().setUp() # Get a shortcut to the BlockStorageWorkerManager Mock - self.worker_mock = self.app.client_manager.volume.workers + self.worker_mock = self.volume_client.workers self.worker_mock.reset_mock() class TestBlockStorageCleanup(TestBlockStorage): - cleaning, unavailable = volume_fakes.create_cleanup_records() def setUp(self): @@ -39,15 +36,12 @@ def setUp(self): self.worker_mock.clean.return_value = (self.cleaning, self.unavailable) # Get the command object to test - self.cmd = \ - block_storage_cleanup.BlockStorageCleanup(self.app, None) + self.cmd = block_storage_cleanup.BlockStorageCleanup(self.app, None) def test_cleanup(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.24') + self.set_volume_api_version('3.24') - arglist = [ - ] + arglist = [] verifylist = [ ('cluster', None), ('host', None), @@ -62,22 +56,12 @@ def test_cleanup(self): expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status') cleaning_data = tuple( - ( - obj.id, - obj.cluster_name, - obj.host, - obj.binary, - 'Cleaning' - ) for obj in self.cleaning + (obj.id, obj.cluster_name, obj.host, obj.binary, 'Cleaning') + for obj in self.cleaning ) unavailable_data = tuple( - ( - obj.id, - obj.cluster_name, - obj.host, - obj.binary, - 'Unavailable' - ) for obj in self.unavailable + (obj.id, obj.cluster_name, obj.host, obj.binary, 'Unavailable') + for obj in self.unavailable ) expected_data = cleaning_data + unavailable_data columns, data = self.cmd.take_action(parsed_args) @@ -91,8 +75,7 @@ def test_cleanup(self): self.worker_mock.clean.assert_called_once_with() def test_block_storage_cleanup_pre_324(self): - arglist = [ - ] + arglist = [] verifylist = [ ('cluster', None), ('host', None), @@ -104,14 +87,15 @@ def test_block_storage_cleanup_pre_324(self): ('service_id', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.24 or greater is required', str(exc)) + '--os-volume-api-version 3.24 or greater is required', str(exc) + ) def test_cleanup_with_args(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.24') + self.set_volume_api_version('3.24') fake_cluster = 'fake-cluster' fake_host = 'fake-host' @@ -120,14 +104,20 @@ def test_cleanup_with_args(self): fake_resource_type = 'Volume' fake_service_id = 1 arglist = [ - '--cluster', fake_cluster, - '--host', fake_host, - '--binary', fake_binary, + '--cluster', + fake_cluster, + '--host', + fake_host, + '--binary', + fake_binary, '--down', '--enabled', - '--resource-id', fake_resource_id, - '--resource-type', fake_resource_type, - '--service-id', str(fake_service_id), + '--resource-id', + fake_resource_id, + '--resource-type', + fake_resource_type, + '--service-id', + str(fake_service_id), ] verifylist = [ ('cluster', fake_cluster), @@ -143,22 +133,12 @@ def test_cleanup_with_args(self): expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status') cleaning_data = tuple( - ( - obj.id, - obj.cluster_name, - obj.host, - obj.binary, - 'Cleaning' - ) for obj in self.cleaning + (obj.id, obj.cluster_name, obj.host, obj.binary, 'Cleaning') + for obj in self.cleaning ) unavailable_data = tuple( - ( - obj.id, - obj.cluster_name, - obj.host, - obj.binary, - 'Unavailable' - ) for obj in self.unavailable + (obj.id, obj.cluster_name, obj.host, obj.binary, 'Unavailable') + for obj in self.unavailable ) expected_data = cleaning_data + unavailable_data columns, data = self.cmd.take_action(parsed_args) @@ -175,4 +155,5 @@ def test_cleanup_with_args(self): disabled=False, resource_id=fake_resource_id, resource_type=fake_resource_type, - service_id=fake_service_id) + service_id=fake_service_id, + ) diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py b/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py index fdfd110084..758105c569 100644 --- a/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py +++ b/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from cinderclient import api_versions from osc_lib import exceptions from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes @@ -18,17 +17,15 @@ class TestBlockStorageCluster(volume_fakes.TestVolume): - def setUp(self): super().setUp() # Get a shortcut to the BlockStorageClusterManager Mock - self.cluster_mock = self.app.client_manager.volume.clusters + self.cluster_mock = self.volume_client.clusters self.cluster_mock.reset_mock() class TestBlockStorageClusterList(TestBlockStorageCluster): - # The cluster to be listed fake_clusters = volume_fakes.create_clusters() @@ -38,15 +35,14 @@ def setUp(self): self.cluster_mock.list.return_value = self.fake_clusters # Get the command object to test - self.cmd = \ - block_storage_cluster.ListBlockStorageCluster(self.app, None) + self.cmd = block_storage_cluster.ListBlockStorageCluster( + self.app, None + ) def test_cluster_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') - arglist = [ - ] + arglist = [] verifylist = [ ('cluster', None), ('binary', None), @@ -65,7 +61,8 @@ def test_cluster_list(self): cluster.binary, cluster.state, cluster.status, - ) for cluster in self.fake_clusters + ) + for cluster in self.fake_clusters ) columns, data = self.cmd.take_action(parsed_args) @@ -84,16 +81,19 @@ def test_cluster_list(self): ) def test_cluster_list_with_full_options(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') arglist = [ - '--cluster', 'foo', - '--binary', 'bar', + '--cluster', + 'foo', + '--binary', + 'bar', '--up', '--disabled', - '--num-hosts', '5', - '--num-down-hosts', '0', + '--num-hosts', + '5', + '--num-down-hosts', + '0', '--long', ] verifylist = [ @@ -131,7 +131,8 @@ def test_cluster_list_with_full_options(self): cluster.disabled_reason, cluster.created_at, cluster.updated_at, - ) for cluster in self.fake_clusters + ) + for cluster in self.fake_clusters ) columns, data = self.cmd.take_action(parsed_args) @@ -150,11 +151,9 @@ def test_cluster_list_with_full_options(self): ) def test_cluster_list_pre_v37(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.6') + self.set_volume_api_version('3.6') - arglist = [ - ] + arglist = [] verifylist = [ ('cluster', None), ('binary', None), @@ -167,15 +166,14 @@ def test_cluster_list_pre_v37(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.7 or greater is required', str(exc)) + '--os-volume-api-version 3.7 or greater is required', str(exc) + ) class TestBlockStorageClusterSet(TestBlockStorageCluster): - cluster = volume_fakes.create_one_cluster() columns = ( 'Name', @@ -213,12 +211,10 @@ def setUp(self): self.cluster_mock.update.return_value = self.cluster - self.cmd = \ - block_storage_cluster.SetBlockStorageCluster(self.app, None) + self.cmd = block_storage_cluster.SetBlockStorageCluster(self.app, None) def test_cluster_set(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') arglist = [ '--enable', @@ -245,13 +241,14 @@ def test_cluster_set(self): ) def test_cluster_set_disable_with_reason(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') arglist = [ - '--binary', self.cluster.binary, + '--binary', + self.cluster.binary, '--disable', - '--disable-reason', 'foo', + '--disable-reason', + 'foo', self.cluster.name, ] verifylist = [ @@ -274,11 +271,11 @@ def test_cluster_set_disable_with_reason(self): ) def test_cluster_set_only_with_disable_reason(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') arglist = [ - '--disable-reason', 'foo', + '--disable-reason', + 'foo', self.cluster.name, ] verifylist = [ @@ -290,19 +287,19 @@ def test_cluster_set_only_with_disable_reason(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - "Cannot specify --disable-reason without --disable", str(exc)) + "Cannot specify --disable-reason without --disable", str(exc) + ) def test_cluster_set_enable_with_disable_reason(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') arglist = [ '--enable', - '--disable-reason', 'foo', + '--disable-reason', + 'foo', self.cluster.name, ] verifylist = [ @@ -314,15 +311,14 @@ def test_cluster_set_enable_with_disable_reason(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - "Cannot specify --disable-reason without --disable", str(exc)) + "Cannot specify --disable-reason without --disable", str(exc) + ) def test_cluster_set_pre_v37(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.6') + self.set_volume_api_version('3.6') arglist = [ '--enable', @@ -338,15 +334,14 @@ def test_cluster_set_pre_v37(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.7 or greater is required', str(exc)) + '--os-volume-api-version 3.7 or greater is required', str(exc) + ) class TestBlockStorageClusterShow(TestBlockStorageCluster): - cluster = volume_fakes.create_one_cluster() columns = ( 'Name', @@ -384,15 +379,16 @@ def setUp(self): self.cluster_mock.show.return_value = self.cluster - self.cmd = \ - block_storage_cluster.ShowBlockStorageCluster(self.app, None) + self.cmd = block_storage_cluster.ShowBlockStorageCluster( + self.app, None + ) def test_cluster_show(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.7') + self.set_volume_api_version('3.7') arglist = [ - '--binary', self.cluster.binary, + '--binary', + self.cluster.binary, self.cluster.name, ] verifylist = [ @@ -412,11 +408,11 @@ def test_cluster_show(self): ) def test_cluster_show_pre_v37(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.6') + self.set_volume_api_version('3.6') arglist = [ - '--binary', self.cluster.binary, + '--binary', + self.cluster.binary, self.cluster.name, ] verifylist = [ @@ -427,8 +423,8 @@ def test_cluster_show_pre_v37(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.7 or greater is required', str(exc)) + '--os-volume-api-version 3.7 or greater is required', str(exc) + ) diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py index 35ea62744c..9f27197c56 100644 --- a/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py +++ b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py @@ -10,10 +10,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from cinderclient import api_versions import ddt +from openstack.block_storage.v3 import service as _service +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.tests.unit import utils as tests_utils @@ -21,40 +21,34 @@ from openstackclient.volume.v3 import block_storage_log_level as service -class TestService(volume_fakes.TestVolume): - - def setUp(self): - super().setUp() - - # Get a shortcut to the ServiceManager Mock - self.service_mock = self.app.client_manager.volume.services - self.service_mock.reset_mock() - - -class TestBlockStorageLogLevelList(TestService): - - service_log = volume_fakes.create_service_log_level_entry() - +class TestBlockStorageLogLevelList(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.service_mock.get_log_levels.return_value = [self.service_log] + self.log_level = sdk_fakes.generate_fake_resource( + _service.LogLevel, binary='cinder-scheduler' + ) + self.volume_sdk_client.get_service_log_levels.return_value = [ + self.log_level + ] - # Get the command object to test self.cmd = service.BlockStorageLogLevelList(self.app, None) def test_block_storage_log_level_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') + arglist = [ - '--host', self.service_log.host, - '--service', self.service_log.binary, - '--log-prefix', self.service_log.prefix, + '--host', + self.log_level.host, + '--service', + self.log_level.binary, + '--log-prefix', + 'cinder.', ] verifylist = [ - ('host', self.service_log.host), - ('service', self.service_log.binary), - ('log_prefix', self.service_log.prefix), + ('host', self.log_level.host), + ('service', self.log_level.binary), + ('log_prefix', 'cinder.'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -66,168 +60,203 @@ def test_block_storage_log_level_list(self): 'Prefix', 'Level', ] - - # confirming if all expected columns are present in the result. + datalist = tuple( + ( + self.log_level.binary, + self.log_level.host, + prefix, + level, + ) + for prefix, level in self.log_level.levels.values() + ) self.assertEqual(expected_columns, columns) - - datalist = (( - self.service_log.binary, - self.service_log.host, - self.service_log.prefix, - self.service_log.level, - ), ) - - # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) - # checking if proper call was made to get log level of services - self.service_mock.get_log_levels.assert_called_with( - server=self.service_log.host, - binary=self.service_log.binary, - prefix=self.service_log.prefix, + self.volume_sdk_client.get_service_log_levels.assert_called_with( + server=self.log_level.host, + binary=self.log_level.binary, + prefix='cinder.', ) def test_block_storage_log_level_list_pre_332(self): arglist = [ - '--host', self.service_log.host, - '--service', 'cinder-api', - '--log-prefix', 'cinder_test.api.common', + '--host', + self.log_level.host, + '--service', + 'cinder-api', + '--log-prefix', + 'cinder_test.api.common', ] verifylist = [ - ('host', self.service_log.host), + ('host', self.log_level.host), ('service', 'cinder-api'), ('log_prefix', 'cinder_test.api.common'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.32 or greater is required', str(exc)) + '--os-volume-api-version 3.32 or greater is required', str(exc) + ) def test_block_storage_log_level_list_invalid_service_name(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') + arglist = [ - '--host', self.service_log.host, - '--service', 'nova-api', - '--log-prefix', 'cinder_test.api.common', + '--host', + self.log_level.host, + '--service', + 'nova-api', + '--log-prefix', + 'cinder_test.api.common', ] verifylist = [ - ('host', self.service_log.host), + ('host', self.log_level.host), ('service', 'nova-api'), ('log_prefix', 'cinder_test.api.common'), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) @ddt.ddt -class TestBlockStorageLogLevelSet(TestService): - - service_log = volume_fakes.create_service_log_level_entry() - +class TestBlockStorageLogLevelSet(volume_fakes.TestVolume): def setUp(self): super().setUp() - # Get the command object to test + self.log_level = sdk_fakes.generate_fake_resource( + _service.LogLevel, binary='cinder-api' + ) + self.volume_sdk_client.set_service_log_levels.return_value = None + self.cmd = service.BlockStorageLogLevelSet(self.app, None) def test_block_storage_log_level_set(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') + arglist = [ 'ERROR', - '--host', self.service_log.host, - '--service', self.service_log.binary, - '--log-prefix', self.service_log.prefix, + '--host', + self.log_level.host, + '--service', + self.log_level.binary, + '--log-prefix', + 'cinder.api.common', ] verifylist = [ ('level', 'ERROR'), - ('host', self.service_log.host), - ('service', self.service_log.binary), - ('log_prefix', self.service_log.prefix), + ('host', self.log_level.host), + ('service', self.log_level.binary), + ('log_prefix', 'cinder.api.common'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - self.cmd.take_action(parsed_args) + ret = self.cmd.take_action(parsed_args) - # checking if proper call was made to set log level of services - self.service_mock.set_log_levels.assert_called_with( + self.assertIsNone(ret) + self.volume_sdk_client.set_service_log_levels.assert_called_with( level='ERROR', - server=self.service_log.host, - binary=self.service_log.binary, - prefix=self.service_log.prefix, + server=self.log_level.host, + binary=self.log_level.binary, + prefix='cinder.api.common', ) def test_block_storage_log_level_set_pre_332(self): arglist = [ 'ERROR', - '--host', self.service_log.host, - '--service', 'cinder-api', - '--log-prefix', 'cinder_test.api.common', + '--host', + self.log_level.host, + '--service', + 'cinder-api', + '--log-prefix', + 'cinder.api.common', ] verifylist = [ ('level', 'ERROR'), - ('host', self.service_log.host), + ('host', self.log_level.host), ('service', 'cinder-api'), - ('log_prefix', 'cinder_test.api.common'), + ('log_prefix', 'cinder.api.common'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.32 or greater is required', str(exc)) + '--os-volume-api-version 3.32 or greater is required', str(exc) + ) def test_block_storage_log_level_set_invalid_service_name(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') + arglist = [ 'ERROR', - '--host', self.service_log.host, - '--service', 'nova-api', - '--log-prefix', 'cinder.api.common', + '--host', + self.log_level.host, + '--service', + 'nova-api', + '--log-prefix', + 'cinder.api.common', ] verifylist = [ ('level', 'ERROR'), - ('host', self.service_log.host), + ('host', self.log_level.host), ('service', 'nova-api'), ('log_prefix', 'cinder.api.common'), ] - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) @ddt.data('WARNING', 'info', 'Error', 'debuG', 'fake-log-level') def test_block_storage_log_level_set_log_level(self, log_level): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') + arglist = [ log_level, - '--host', self.service_log.host, - '--service', 'cinder-api', - '--log-prefix', 'cinder.api.common', + '--host', + self.log_level.host, + '--service', + 'cinder-api', + '--log-prefix', + 'cinder.api.common', ] verifylist = [ ('level', log_level.upper()), - ('host', self.service_log.host), + ('host', self.log_level.host), ('service', 'cinder-api'), ('log_prefix', 'cinder.api.common'), ] if log_level == 'fake-log-level': - self.assertRaises(tests_utils.ParserException, self.check_parser, - self.cmd, arglist, verifylist) + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) else: parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) - # checking if proper call was made to set log level of services - self.service_mock.set_log_levels.assert_called_with( + self.volume_sdk_client.set_service_log_levels.assert_called_with( level=log_level.upper(), - server=self.service_log.host, - binary=self.service_log.binary, - prefix=self.service_log.prefix) + server=self.log_level.host, + binary=self.log_level.binary, + prefix='cinder.api.common', + ) diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py index afd0fd358e..55d7baf030 100644 --- a/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py +++ b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py @@ -9,51 +9,49 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from cinderclient import api_versions +from unittest import mock + from osc_lib import exceptions from openstackclient.tests.unit import utils as tests_utils -from openstackclient.tests.unit.volume.v2 import fakes as v2_volume_fakes from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes from openstackclient.volume.v3 import block_storage_manage -class TestBlockStorageManage(v2_volume_fakes.TestVolume): - +class TestBlockStorageManage(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.volumes_mock = self.app.client_manager.volume.volumes + self.volumes_mock = self.volume_client.volumes self.volumes_mock.reset_mock() - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots + self.snapshots_mock = self.volume_client.volume_snapshots self.snapshots_mock.reset_mock() class TestBlockStorageVolumeManage(TestBlockStorageManage): - volume_manage_list = volume_fakes.create_volume_manage_list_records() def setUp(self): super().setUp() self.volumes_mock.list_manageable.return_value = ( - self.volume_manage_list) + self.volume_manage_list + ) # Get the command object to test self.cmd = block_storage_manage.BlockStorageManageVolumes( - self.app, None) + self.app, None + ) def test_block_storage_volume_manage_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.8') - host = 'fake_host' + self.set_volume_api_version('3.8') + arglist = [ - host, + 'fake_host', ] verifylist = [ - ('host', host), + ('host', 'fake_host'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -63,121 +61,113 @@ def test_block_storage_volume_manage_list(self): 'reference', 'size', 'safe_to_manage', - 'reason_not_safe', - 'cinder_id', - 'extra_info', ] - - # confirming if all expected columns are present in the result. - self.assertEqual(expected_columns, columns) - datalist = [] for volume_record in self.volume_manage_list: manage_details = ( volume_record.reference, volume_record.size, volume_record.safe_to_manage, - volume_record.reason_not_safe, - volume_record.cinder_id, - volume_record.extra_info, ) datalist.append(manage_details) datalist = tuple(datalist) - # confirming if all expected values are present in the result. + self.assertEqual(expected_columns, columns) self.assertEqual(datalist, tuple(data)) # checking if proper call was made to get volume manageable list self.volumes_mock.list_manageable.assert_called_with( - host=parsed_args.host, - detailed=parsed_args.detailed, - marker=parsed_args.marker, - limit=parsed_args.limit, - offset=parsed_args.offset, - sort=parsed_args.sort, - cluster=parsed_args.cluster, + host='fake_host', + detailed=False, + marker=None, + limit=None, + offset=None, + sort=None, + cluster=None, ) - def test_block_storage_volume_manage_pre_38(self): - host = 'fake_host' + def test_block_storage_volume_manage_list__pre_v38(self): + self.set_volume_api_version('3.7') + arglist = [ - host, + 'fake_host', ] verifylist = [ - ('host', host), + ('host', 'fake_host'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.8 or greater is required', str(exc)) + '--os-volume-api-version 3.8 or greater is required', str(exc) + ) + + def test_block_storage_volume_manage_list__pre_v317(self): + self.set_volume_api_version('3.16') - def test_block_storage_volume_manage_pre_317(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.16') - cluster = 'fake_cluster' arglist = [ - '--cluster', cluster, + '--cluster', + 'fake_cluster', ] verifylist = [ - ('cluster', cluster), + ('cluster', 'fake_cluster'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.17 or greater is required', str(exc)) + '--os-volume-api-version 3.17 or greater is required', str(exc) + ) self.assertIn('--cluster', str(exc)) - def test_block_storage_volume_manage_host_and_cluster(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.17') - host = 'fake_host' - cluster = 'fake_cluster' + def test_block_storage_volume_manage_list__host_and_cluster(self): + self.set_volume_api_version('3.17') + arglist = [ - host, - '--cluster', cluster, + 'fake_host', + '--cluster', + 'fake_cluster', ] verifylist = [ - ('host', host), - ('cluster', cluster), + ('host', 'fake_host'), + ('cluster', 'fake_cluster'), ] - exc = self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, - arglist, verifylist) + exc = self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) self.assertIn( - 'argument --cluster: not allowed with argument ', str(exc)) - - def test_block_storage_volume_manage_list_all_args(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.8') - host = 'fake_host' - detailed = True - marker = 'fake_marker' - limit = '5' - offset = '3' - sort = 'size:asc' + 'argument --cluster: not allowed with argument ', str(exc) + ) + + def test_block_storage_volume_manage_list__detailed(self): + """This option is deprecated.""" + self.set_volume_api_version('3.8') + arglist = [ - host, - '--detailed', str(detailed), - '--marker', marker, - '--limit', limit, - '--offset', offset, - '--sort', sort, + '--detailed', + 'True', + 'fake_host', ] verifylist = [ - ('host', host), - ('detailed', str(detailed)), - ('marker', marker), - ('limit', limit), - ('offset', offset), - ('sort', sort), + ('host', 'fake_host'), + ('detailed', 'True'), + ('marker', None), + ('limit', None), + ('offset', None), + ('sort', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) + with mock.patch.object(self.cmd.log, 'warning') as mock_warning: + columns, data = self.cmd.take_action(parsed_args) expected_columns = [ 'reference', @@ -187,10 +177,74 @@ def test_block_storage_volume_manage_list_all_args(self): 'cinder_id', 'extra_info', ] + datalist = [] + for volume_record in self.volume_manage_list: + manage_details = ( + volume_record.reference, + volume_record.size, + volume_record.safe_to_manage, + volume_record.reason_not_safe, + volume_record.cinder_id, + volume_record.extra_info, + ) + datalist.append(manage_details) + datalist = tuple(datalist) - # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) + self.assertEqual(datalist, tuple(data)) + # checking if proper call was made to get volume manageable list + self.volumes_mock.list_manageable.assert_called_with( + host='fake_host', + detailed=True, + marker=None, + limit=None, + offset=None, + sort=None, + cluster=None, + ) + mock_warning.assert_called_once() + self.assertIn( + "The --detailed option has been deprecated.", + str(mock_warning.call_args[0][0]), + ) + + def test_block_storage_volume_manage_list__all_args(self): + self.set_volume_api_version('3.8') + + arglist = [ + 'fake_host', + '--long', + '--marker', + 'fake_marker', + '--limit', + '5', + '--offset', + '3', + '--sort', + 'size:asc', + ] + verifylist = [ + ('host', 'fake_host'), + ('detailed', None), + ('long', True), + ('marker', 'fake_marker'), + ('limit', '5'), + ('offset', '3'), + ('sort', 'size:asc'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + expected_columns = [ + 'reference', + 'size', + 'safe_to_manage', + 'reason_not_safe', + 'cinder_id', + 'extra_info', + ] datalist = [] for volume_record in self.volume_manage_list: manage_details = ( @@ -204,44 +258,44 @@ def test_block_storage_volume_manage_list_all_args(self): datalist.append(manage_details) datalist = tuple(datalist) - # confirming if all expected values are present in the result. + self.assertEqual(expected_columns, columns) self.assertEqual(datalist, tuple(data)) # checking if proper call was made to get volume manageable list self.volumes_mock.list_manageable.assert_called_with( - host=host, - detailed=detailed, - marker=marker, - limit=limit, - offset=offset, - sort=sort, - cluster=parsed_args.cluster, + host='fake_host', + detailed=True, + marker='fake_marker', + limit='5', + offset='3', + sort='size:asc', + cluster=None, ) class TestBlockStorageSnapshotManage(TestBlockStorageManage): - snapshot_manage_list = volume_fakes.create_snapshot_manage_list_records() def setUp(self): super().setUp() self.snapshots_mock.list_manageable.return_value = ( - self.snapshot_manage_list) + self.snapshot_manage_list + ) # Get the command object to test self.cmd = block_storage_manage.BlockStorageManageSnapshots( - self.app, None) + self.app, None + ) def test_block_storage_snapshot_manage_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.8') - host = 'fake_host' + self.set_volume_api_version('3.8') + arglist = [ - host, + 'fake_host', ] verifylist = [ - ('host', host), + ('host', 'fake_host'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -252,14 +306,7 @@ def test_block_storage_snapshot_manage_list(self): 'size', 'safe_to_manage', 'source_reference', - 'reason_not_safe', - 'cinder_id', - 'extra_info', ] - - # confirming if all expected columns are present in the result. - self.assertEqual(expected_columns, columns) - datalist = [] for snapshot_record in self.snapshot_manage_list: manage_details = ( @@ -267,107 +314,105 @@ def test_block_storage_snapshot_manage_list(self): snapshot_record.size, snapshot_record.safe_to_manage, snapshot_record.source_reference, - snapshot_record.reason_not_safe, - snapshot_record.cinder_id, - snapshot_record.extra_info, ) datalist.append(manage_details) datalist = tuple(datalist) - # confirming if all expected values are present in the result. + self.assertEqual(expected_columns, columns) self.assertEqual(datalist, tuple(data)) # checking if proper call was made to get snapshot manageable list self.snapshots_mock.list_manageable.assert_called_with( - host=parsed_args.host, - detailed=parsed_args.detailed, - marker=parsed_args.marker, - limit=parsed_args.limit, - offset=parsed_args.offset, - sort=parsed_args.sort, - cluster=parsed_args.cluster, + host='fake_host', + detailed=False, + marker=None, + limit=None, + offset=None, + sort=None, + cluster=None, ) - def test_block_storage_volume_manage_pre_38(self): - host = 'fake_host' + def test_block_storage_snapshot_manage_list__pre_v38(self): + self.set_volume_api_version('3.7') + arglist = [ - host, + 'fake_host', ] verifylist = [ - ('host', host), + ('host', 'fake_host'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.8 or greater is required', str(exc)) + '--os-volume-api-version 3.8 or greater is required', str(exc) + ) + + def test_block_storage_snapshot_manage_list__pre_v317(self): + self.set_volume_api_version('3.16') - def test_block_storage_volume_manage_pre_317(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.16') - cluster = 'fake_cluster' arglist = [ - '--cluster', cluster, + '--cluster', + 'fake_cluster', ] verifylist = [ - ('cluster', cluster), + ('cluster', 'fake_cluster'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action, - parsed_args) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.17 or greater is required', str(exc)) + '--os-volume-api-version 3.17 or greater is required', str(exc) + ) self.assertIn('--cluster', str(exc)) - def test_block_storage_volume_manage_host_and_cluster(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.17') - host = 'fake_host' - cluster = 'fake_cluster' + def test_block_storage_snapshot_manage_list__host_and_cluster(self): + self.set_volume_api_version('3.17') + arglist = [ - host, - '--cluster', cluster, + 'fake_host', + '--cluster', + 'fake_cluster', ] verifylist = [ - ('host', host), - ('cluster', cluster), + ('host', 'fake_host'), + ('cluster', 'fake_cluster'), ] - exc = self.assertRaises(tests_utils.ParserException, - self.check_parser, self.cmd, - arglist, verifylist) + exc = self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) self.assertIn( - 'argument --cluster: not allowed with argument ', str(exc)) - - def test_block_storage_volume_manage_list_all_args(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.8') - host = 'fake_host' - detailed = True - marker = 'fake_marker' - limit = '5' - offset = '3' - sort = 'size:asc' + 'argument --cluster: not allowed with argument ', str(exc) + ) + + def test_block_storage_snapshot_manage_list__detailed(self): + self.set_volume_api_version('3.8') + arglist = [ - host, - '--detailed', str(detailed), - '--marker', marker, - '--limit', limit, - '--offset', offset, - '--sort', sort, + '--detailed', + 'True', + 'fake_host', ] verifylist = [ - ('host', host), - ('detailed', str(detailed)), - ('marker', marker), - ('limit', limit), - ('offset', offset), - ('sort', sort), + ('host', 'fake_host'), + ('detailed', 'True'), + ('marker', None), + ('limit', None), + ('offset', None), + ('sort', None), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - columns, data = self.cmd.take_action(parsed_args) + with mock.patch.object(self.cmd.log, 'warning') as mock_warning: + columns, data = self.cmd.take_action(parsed_args) expected_columns = [ 'reference', @@ -378,10 +423,76 @@ def test_block_storage_volume_manage_list_all_args(self): 'cinder_id', 'extra_info', ] + datalist = [] + for snapshot_record in self.snapshot_manage_list: + manage_details = ( + snapshot_record.reference, + snapshot_record.size, + snapshot_record.safe_to_manage, + snapshot_record.source_reference, + snapshot_record.reason_not_safe, + snapshot_record.cinder_id, + snapshot_record.extra_info, + ) + datalist.append(manage_details) + datalist = tuple(datalist) - # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) + self.assertEqual(datalist, tuple(data)) + + # checking if proper call was made to get snapshot manageable list + self.snapshots_mock.list_manageable.assert_called_with( + host='fake_host', + detailed=True, + marker=None, + limit=None, + offset=None, + sort=None, + cluster=None, + ) + mock_warning.assert_called_once() + self.assertIn( + "The --detailed option has been deprecated.", + str(mock_warning.call_args[0][0]), + ) + + def test_block_storage_snapshot_manage_list__all_args(self): + self.set_volume_api_version('3.8') + + arglist = [ + '--long', + '--marker', + 'fake_marker', + '--limit', + '5', + '--offset', + '3', + '--sort', + 'size:asc', + 'fake_host', + ] + verifylist = [ + ('host', 'fake_host'), + ('detailed', None), + ('long', True), + ('marker', 'fake_marker'), + ('limit', '5'), + ('offset', '3'), + ('sort', 'size:asc'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + expected_columns = [ + 'reference', + 'size', + 'safe_to_manage', + 'source_reference', + 'reason_not_safe', + 'cinder_id', + 'extra_info', + ] datalist = [] for snapshot_record in self.snapshot_manage_list: manage_details = ( @@ -396,16 +507,16 @@ def test_block_storage_volume_manage_list_all_args(self): datalist.append(manage_details) datalist = tuple(datalist) - # confirming if all expected values are present in the result. + self.assertEqual(expected_columns, columns) self.assertEqual(datalist, tuple(data)) # checking if proper call was made to get snapshot manageable list self.snapshots_mock.list_manageable.assert_called_with( - host=host, - detailed=detailed, - marker=marker, - limit=limit, - offset=offset, - sort=sort, - cluster=parsed_args.cluster, + host='fake_host', + detailed=True, + marker='fake_marker', + limit='5', + offset='3', + sort='size:asc', + cluster=None, ) diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py b/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py index 086339ffd1..609458f748 100644 --- a/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py +++ b/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py @@ -10,42 +10,33 @@ # License for the specific language governing permissions and limitations # under the License. -from cinderclient import api_versions +from osc_lib.cli import format_columns from osc_lib import exceptions from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes from openstackclient.volume.v3 import block_storage_resource_filter -class TestBlockStorageResourceFilter(volume_fakes.TestVolume): - - def setUp(self): - super().setUp() - - # Get a shortcut to the ResourceFilterManager Mock - self.resource_filter_mock = \ - self.app.client_manager.volume.resource_filters - self.resource_filter_mock.reset_mock() - - -class TestBlockStorageResourceFilterList(TestBlockStorageResourceFilter): - +class TestBlockStorageResourceFilterList(volume_fakes.TestVolume): # The resource filters to be listed fake_resource_filters = volume_fakes.create_resource_filters() def setUp(self): super().setUp() - self.resource_filter_mock.list.return_value = \ + self.volume_sdk_client.resource_filters.return_value = ( self.fake_resource_filters + ) # Get the command object to test - self.cmd = block_storage_resource_filter\ - .ListBlockStorageResourceFilter(self.app, None) + self.cmd = ( + block_storage_resource_filter.ListBlockStorageResourceFilter( + self.app, None + ) + ) def test_resource_filter_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.33') + self.set_volume_api_version('3.33') arglist = [] verifylist = [] @@ -55,8 +46,9 @@ def test_resource_filter_list(self): expected_data = tuple( ( resource_filter.resource, - resource_filter.filters, - ) for resource_filter in self.fake_resource_filters + format_columns.ListColumn(resource_filter.filters), + ) + for resource_filter in self.fake_resource_filters ) columns, data = self.cmd.take_action(parsed_args) @@ -64,42 +56,43 @@ def test_resource_filter_list(self): self.assertEqual(expected_data, tuple(data)) # checking if proper call was made to list clusters - self.resource_filter_mock.list.assert_called_with() + self.volume_sdk_client.resource_filters.assert_called_with() def test_resource_filter_list_pre_v333(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.33 or greater is required', str(exc)) - + '--os-volume-api-version 3.33 or greater is required', str(exc) + ) -class TestBlockStorageResourceFilterShow(TestBlockStorageResourceFilter): +class TestBlockStorageResourceFilterShow(volume_fakes.TestVolume): # The resource filters to be listed fake_resource_filter = volume_fakes.create_one_resource_filter() def setUp(self): super().setUp() - self.resource_filter_mock.list.return_value = \ - iter([self.fake_resource_filter]) + self.volume_sdk_client.resource_filters.return_value = iter( + [self.fake_resource_filter] + ) # Get the command object to test - self.cmd = block_storage_resource_filter\ - .ShowBlockStorageResourceFilter(self.app, None) + self.cmd = ( + block_storage_resource_filter.ShowBlockStorageResourceFilter( + self.app, None + ) + ) def test_resource_filter_show(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.33') + self.set_volume_api_version('3.33') arglist = [ self.fake_resource_filter.resource, @@ -109,10 +102,10 @@ def test_resource_filter_show(self): ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - expected_columns = ('filters', 'resource') + expected_columns = ('Resource', 'Filters') expected_data = ( - self.fake_resource_filter.filters, self.fake_resource_filter.resource, + format_columns.ListColumn(self.fake_resource_filter.filters), ) columns, data = self.cmd.take_action(parsed_args) @@ -120,11 +113,12 @@ def test_resource_filter_show(self): self.assertEqual(expected_data, data) # checking if proper call was made to list clusters - self.resource_filter_mock.list.assert_called_with(resource='volume') + self.volume_sdk_client.resource_filters.assert_called_with( + resource='volume' + ) def test_resource_filter_show_pre_v333(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.32') + self.set_volume_api_version('3.32') arglist = [ self.fake_resource_filter.resource, @@ -135,8 +129,8 @@ def test_resource_filter_show_pre_v333(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.33 or greater is required', str(exc)) + '--os-volume-api-version 3.33 or greater is required', str(exc) + ) diff --git a/openstackclient/tests/unit/volume/v3/test_service.py b/openstackclient/tests/unit/volume/v3/test_service.py new file mode 100644 index 0000000000..53027fcb58 --- /dev/null +++ b/openstackclient/tests/unit/volume/v3/test_service.py @@ -0,0 +1,351 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from unittest import mock + +from openstack.block_storage.v3 import service as _service +from openstack.test import fakes as sdk_fakes +from osc_lib import exceptions + +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes +from openstackclient.volume.v3 import service + + +class TestServiceList(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.volume_sdk_client.services.return_value = [self.service] + + self.cmd = service.ListService(self.app, None) + + def test_service_list(self): + arglist = [ + '--host', + self.service.host, + '--service', + self.service.binary, + ] + verifylist = [ + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + expected_columns = ( + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + ) + datalist = ( + ( + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + ), + ) + self.assertEqual(expected_columns, columns) + self.assertEqual(datalist, tuple(data)) + self.volume_sdk_client.services.assert_called_with( + host=self.service.host, + binary=self.service.binary, + ) + + def test_service_list_with_long_option(self): + arglist = [ + '--host', + self.service.host, + '--service', + self.service.binary, + '--long', + ] + verifylist = [ + ('host', self.service.host), + ('service', self.service.binary), + ('long', True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + expected_columns = ( + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + 'Disabled Reason', + ) + datalist = ( + ( + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + self.service.disabled_reason, + ), + ) + self.assertEqual(expected_columns, columns) + self.assertEqual(datalist, tuple(data)) + self.volume_sdk_client.services.assert_called_with( + host=self.service.host, + binary=self.service.binary, + ) + + def test_service_list_with_cluster(self): + self.set_volume_api_version('3.7') + + arglist = [ + '--host', + self.service.host, + '--service', + self.service.binary, + ] + verifylist = [ + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + expected_columns = ( + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + 'Cluster', + ) + datalist = ( + ( + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + self.service.cluster, + ), + ) + self.assertEqual(expected_columns, columns) + self.assertEqual(datalist, tuple(data)) + self.volume_sdk_client.services.assert_called_with( + host=self.service.host, + binary=self.service.binary, + ) + + def test_service_list_with_backend_state(self): + self.set_volume_api_version('3.49') + + arglist = [ + '--host', + self.service.host, + '--service', + self.service.binary, + ] + verifylist = [ + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + expected_columns = ( + 'Binary', + 'Host', + 'Zone', + 'Status', + 'State', + 'Updated At', + 'Cluster', + 'Backend State', + ) + datalist = ( + ( + self.service.binary, + self.service.host, + self.service.availability_zone, + self.service.status, + self.service.state, + self.service.updated_at, + self.service.cluster, + self.service.backend_state, + ), + ) + self.assertEqual(expected_columns, columns) + self.assertEqual(datalist, tuple(data)) + self.volume_sdk_client.services.assert_called_with( + host=self.service.host, + binary=self.service.binary, + ) + + +class TestServiceSet(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.service = sdk_fakes.generate_fake_resource(_service.Service) + self.service.enable = mock.Mock(autospec=True) + self.service.disable = mock.Mock(autospec=True) + self.volume_sdk_client.find_service.return_value = self.service + + self.cmd = service.SetService(self.app, None) + + def test_service_set_nothing(self): + arglist = [ + self.service.host, + self.service.binary, + ] + verifylist = [ + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + self.service.enable.assert_not_called() + self.service.disable.assert_not_called() + self.assertIsNone(result) + + def test_service_set_enable(self): + arglist = [ + '--enable', + self.service.host, + self.service.binary, + ] + verifylist = [ + ('enable', True), + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.service.enable.assert_called_with(self.volume_sdk_client) + self.service.disable.assert_not_called() + self.assertIsNone(result) + + def test_service_set_disable(self): + arglist = [ + '--disable', + self.service.host, + self.service.binary, + ] + verifylist = [ + ('disable', True), + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.service.enable.assert_not_called() + self.service.disable.assert_called_with( + self.volume_sdk_client, reason=None + ) + self.assertIsNone(result) + + def test_service_set_disable_with_reason(self): + reason = 'earthquake' + arglist = [ + '--disable', + '--disable-reason', + reason, + self.service.host, + self.service.binary, + ] + verifylist = [ + ('disable', True), + ('disable_reason', reason), + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.service.enable.assert_not_called() + self.service.disable.assert_called_with( + self.volume_sdk_client, reason=reason + ) + self.assertIsNone(result) + + def test_service_set_only_with_disable_reason(self): + reason = 'earthquake' + arglist = [ + '--disable-reason', + reason, + self.service.host, + self.service.binary, + ] + verifylist = [ + ('disable_reason', reason), + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + try: + self.cmd.take_action(parsed_args) + self.fail("CommandError should be raised.") + except exceptions.CommandError as e: + self.assertEqual( + "Cannot specify option --disable-reason without " + "--disable specified.", + str(e), + ) + + def test_service_set_enable_with_disable_reason(self): + reason = 'earthquake' + arglist = [ + '--enable', + '--disable-reason', + reason, + self.service.host, + self.service.binary, + ] + verifylist = [ + ('enable', True), + ('disable_reason', reason), + ('host', self.service.host), + ('service', self.service.binary), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + try: + self.cmd.take_action(parsed_args) + self.fail("CommandError should be raised.") + except exceptions.CommandError as e: + self.assertEqual( + "Cannot specify option --disable-reason without " + "--disable specified.", + str(e), + ) diff --git a/openstackclient/tests/unit/volume/v3/test_volume.py b/openstackclient/tests/unit/volume/v3/test_volume.py index ed72bfa112..33dcfe5a47 100644 --- a/openstackclient/tests/unit/volume/v3/test_volume.py +++ b/openstackclient/tests/unit/volume/v3/test_volume.py @@ -10,22 +10,2184 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -import copy -from unittest import mock +import copy +from unittest import mock +import uuid + +from openstack.block_storage.v3 import backup as _backup +from openstack.block_storage.v3 import block_storage_summary as _summary +from openstack.block_storage.v3 import snapshot as _snapshot +from openstack.block_storage.v3 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes +from osc_lib.cli import format_columns +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient.api import volume_v3 +from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes +from openstackclient.tests.unit.image.v2 import fakes as image_fakes +from openstackclient.tests.unit import utils as test_utils +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes +from openstackclient.volume.v3 import volume + + +class TestVolumeCreate(volume_fakes.TestVolume): + columns = ( + 'attachments', + 'availability_zone', + 'backup_id', + 'bootable', + 'cluster_name', + 'consistencygroup_id', + 'consumes_quota', + 'created_at', + 'description', + 'encrypted', + 'encryption_key_id', + 'group_id', + 'id', + 'multiattach', + 'name', + 'os-vol-host-attr:host', + 'os-vol-mig-status-attr:migstat', + 'os-vol-mig-status-attr:name_id', + 'os-vol-tenant-attr:tenant_id', + 'properties', + 'provider_id', + 'replication_status', + 'service_uuid', + 'shared_targets', + 'size', + 'snapshot_id', + 'source_volid', + 'status', + 'type', + 'updated_at', + 'user_id', + 'volume_image_metadata', + 'volume_type_id', + ) + + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.create_volume.return_value = self.volume + + self.datalist = ( + self.volume.attachments, + self.volume.availability_zone, + self.volume.backup_id, + self.volume.is_bootable, + self.volume.cluster_name, + self.volume.consistency_group_id, + self.volume.consumes_quota, + self.volume.created_at, + self.volume.description, + self.volume.is_encrypted, + self.volume.encryption_key_id, + self.volume.group_id, + self.volume.id, + self.volume.is_multiattach, + self.volume.name, + self.volume.host, + self.volume.migration_status, + self.volume.migration_id, + self.volume.project_id, + format_columns.DictColumn(self.volume.metadata), + self.volume.provider_id, + self.volume.replication_status, + self.volume.service_uuid, + self.volume.shared_targets, + self.volume.size, + self.volume.snapshot_id, + self.volume.source_volume_id, + self.volume.status, + self.volume.volume_type, + self.volume.updated_at, + self.volume.user_id, + self.volume.volume_image_metadata, + self.volume.volume_type_id, + ) + + # Get the command object to test + self.cmd = volume.CreateVolume(self.app, None) + + def test_volume_create_min_options(self): + arglist = [ + '--size', + str(self.volume.size), + ] + verifylist = [ + ('size', self.volume.size), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=None, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_options(self): + consistency_group_id = 'cg123' + arglist = [ + '--size', + str(self.volume.size), + '--description', + self.volume.description, + '--type', + self.volume.volume_type, + '--availability-zone', + self.volume.availability_zone, + '--consistency-group', + consistency_group_id, + '--hint', + 'k=v', + self.volume.name, + ] + verifylist = [ + ('size', self.volume.size), + ('description', self.volume.description), + ('type', self.volume.volume_type), + ('availability_zone', self.volume.availability_zone), + ('consistency_group', consistency_group_id), + ('hint', {'k': 'v'}), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + with mock.patch.object( + volume_v3, + 'find_consistency_group', + return_value={'id': consistency_group_id}, + ) as mock_find_cg: + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=self.volume.description, + volume_type=self.volume.volume_type, + availability_zone=self.volume.availability_zone, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=consistency_group_id, + scheduler_hints={'k': 'v'}, + backup_id=None, + ) + mock_find_cg.assert_called_once_with( + self.volume_sdk_client, consistency_group_id + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_properties(self): + arglist = [ + '--property', + 'Alpha=a', + '--property', + 'Beta=b', + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('properties', {'Alpha': 'a', 'Beta': 'b'}), + ('size', self.volume.size), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata={'Alpha': 'a', 'Beta': 'b'}, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_image_id(self): + image = image_fakes.create_one_image() + self.image_client.find_image.return_value = image + + arglist = [ + '--image', + image.id, + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('image', image.id), + ('size', self.volume.size), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=image.id, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_image_name(self): + image = image_fakes.create_one_image() + self.image_client.find_image.return_value = image + + arglist = [ + '--image', + image.name, + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('image', image.name), + ('size', self.volume.size), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=image.id, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_with_snapshot(self): + snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + self.volume_sdk_client.find_snapshot.return_value = snapshot + + arglist = [ + '--snapshot', + snapshot.id, + self.volume.name, + ] + verifylist = [ + ('snapshot', snapshot.id), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=snapshot.size, + snapshot_id=snapshot.id, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + snapshot.id, ignore_missing=False + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_with_backup(self): + self.set_volume_api_version('3.47') + + backup = sdk_fakes.generate_fake_resource(_backup.Backup) + self.volume_sdk_client.find_backup.return_value = backup + + arglist = [ + '--backup', + backup.id, + self.volume.name, + ] + verifylist = [ + ('backup', backup.id), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=backup.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=backup.id, + ) + self.volume_sdk_client.find_backup.assert_called_once_with( + backup.id, ignore_missing=False + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_with_backup_pre_v347(self): + backup = sdk_fakes.generate_fake_resource(_backup.Backup) + self.volume_sdk_client.find_backup.return_value = backup + + arglist = [ + '--backup', + backup.id, + self.volume.name, + ] + verifylist = [ + ('backup', backup.id), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.47 or greater", str(exc)) + + self.volume_sdk_client.create_volume.assert_not_called() + + def test_volume_create_with_source_volume(self): + source_volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = source_volume + + arglist = [ + '--source', + source_volume.id, + self.volume.name, + ] + verifylist = [ + ('source', source_volume.id), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=source_volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=source_volume.id, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + self.volume_sdk_client.find_volume.assert_called_once_with( + source_volume.id, ignore_missing=False + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + @mock.patch.object(utils, 'wait_for_status', return_value=True) + def test_volume_create_with_bootable_and_readonly(self, mock_wait): + arglist = [ + '--bootable', + '--read-only', + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('bootable', True), + ('read_only', True), + ('size', self.volume.size), + ('name', self.volume.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + self.volume_sdk_client.set_volume_bootable_status.assert_called_once_with( + self.volume, True + ) + self.volume_sdk_client.set_volume_readonly.assert_called_once_with( + self.volume, True + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + @mock.patch.object(utils, 'wait_for_status', return_value=True) + def test_volume_create_with_nonbootable_and_readwrite(self, mock_wait): + arglist = [ + '--non-bootable', + '--read-write', + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('bootable', False), + ('read_only', False), + ('size', self.volume.size), + ('name', self.volume.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + self.volume_sdk_client.set_volume_bootable_status.assert_called_once_with( + self.volume, False + ) + self.volume_sdk_client.set_volume_readonly.assert_called_once_with( + self.volume, False + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + @mock.patch.object(volume.LOG, 'error') + @mock.patch.object(utils, 'wait_for_status', return_value=True) + def test_volume_create_with_bootable_and_readonly_fail( + self, mock_wait, mock_error + ): + self.volume_sdk_client.set_volume_bootable_status.side_effect = ( + sdk_exceptions.NotFoundException('foo') + ) + self.volume_sdk_client.set_volume_readonly.side_effect = ( + sdk_exceptions.NotFoundException('foo') + ) + + arglist = [ + '--bootable', + '--read-only', + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('bootable', True), + ('read_only', True), + ('size', self.volume.size), + ('name', self.volume.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + self.volume_sdk_client.set_volume_bootable_status.assert_called_once_with( + self.volume, True + ) + self.volume_sdk_client.set_volume_readonly.assert_called_once_with( + self.volume, True + ) + + self.assertEqual(2, mock_error.call_count) + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + @mock.patch.object(volume.LOG, 'error') + @mock.patch.object(utils, 'wait_for_status', return_value=False) + def test_volume_create_non_available_with_readonly( + self, mock_wait, mock_error + ): + arglist = [ + '--non-bootable', + '--read-only', + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('bootable', False), + ('read_only', True), + ('size', self.volume.size), + ('name', self.volume.name), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints=None, + backup_id=None, + ) + + self.assertEqual(2, mock_error.call_count) + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_without_size(self): + arglist = [ + self.volume.name, + ] + verifylist = [ + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + + def test_volume_create_with_multi_source(self): + arglist = [ + '--image', + 'source_image', + '--source', + 'source_volume', + '--snapshot', + 'source_snapshot', + '--size', + str(self.volume.size), + self.volume.name, + ] + verifylist = [ + ('image', 'source_image'), + ('source', 'source_volume'), + ('snapshot', 'source_snapshot'), + ('size', self.volume.size), + ('name', self.volume.name), + ] + + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + def test_volume_create_hints(self): + """--hint needs to behave differently based on the given hint + + different_host and same_host need to append to a list if given multiple + times. All other parameter are strings. + """ + arglist = [ + '--size', + str(self.volume.size), + '--hint', + 'k=v', + '--hint', + 'k=v2', + '--hint', + 'same_host=v3', + '--hint', + 'same_host=v4', + '--hint', + 'different_host=v5', + '--hint', + 'local_to_instance=v6', + '--hint', + 'different_host=v7', + self.volume.name, + ] + verifylist = [ + ('size', self.volume.size), + ( + 'hint', + { + 'k': 'v2', + 'same_host': ['v3', 'v4'], + 'local_to_instance': 'v6', + 'different_host': ['v5', 'v7'], + }, + ), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_volume.assert_called_with( + size=self.volume.size, + snapshot_id=None, + name=self.volume.name, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + image_id=None, + source_volume_id=None, + consistency_group_id=None, + scheduler_hints={ + 'k': 'v2', + 'same_host': ['v3', 'v4'], + 'local_to_instance': 'v6', + 'different_host': ['v5', 'v7'], + }, + backup_id=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_remote_source(self): + self.volume_sdk_client.manage_volume.return_value = self.volume + + arglist = [ + '--remote-source', + 'key=val', + '--host', + 'fake_host', + self.volume.name, + ] + verifylist = [ + ('remote_source', {'key': 'val'}), + ('host', 'fake_host'), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.manage_volume.assert_called_with( + host='fake_host', + ref={'key': 'val'}, + name=parsed_args.name, + description=parsed_args.description, + volume_type=parsed_args.type, + availability_zone=parsed_args.availability_zone, + metadata=parsed_args.properties, + bootable=parsed_args.bootable, + cluster=getattr(parsed_args, 'cluster', None), + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.datalist, data) + + def test_volume_create_remote_source_pre_v316(self): + self.set_volume_api_version('3.15') + arglist = [ + '--remote-source', + 'key=val', + '--cluster', + 'fake_cluster', + self.volume.name, + ] + verifylist = [ + ('remote_source', {'key': 'val'}), + ('cluster', 'fake_cluster'), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-volume-api-version 3.16 or greater is required', str(exc) + ) + + def test_volume_create_remote_source_host_and_cluster(self): + self.set_volume_api_version('3.16') + arglist = [ + '--remote-source', + 'key=val', + '--host', + 'fake_host', + '--cluster', + 'fake_cluster', + self.volume.name, + ] + verifylist = [ + ('remote_source', {'key': 'val'}), + ('host', 'fake_host'), + ('cluster', 'fake_cluster'), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + 'Only one of --host or --cluster needs to be specified', str(exc) + ) + + def test_volume_create_remote_source_no_host_or_cluster(self): + arglist = [ + '--remote-source', + 'key=val', + self.volume.name, + ] + verifylist = [ + ('remote_source', {'key': 'val'}), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + 'One of --host or --cluster needs to be specified to ', str(exc) + ) + + def test_volume_create_remote_source_size(self): + arglist = [ + '--size', + str(self.volume.size), + '--remote-source', + 'key=val', + self.volume.name, + ] + verifylist = [ + ('size', self.volume.size), + ('remote_source', {'key': 'val'}), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--size, --consistency-group, --hint, --read-only and ' + '--read-write options are not supported', + str(exc), + ) + + def test_volume_create_host_no_remote_source(self): + arglist = [ + '--size', + str(self.volume.size), + '--host', + 'fake_host', + self.volume.name, + ] + verifylist = [ + ('size', self.volume.size), + ('host', 'fake_host'), + ('name', self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--host and --cluster options are only supported ', + str(exc), + ) + + +class TestVolumeDelete(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volumes_mock = self.volume_client.volumes + self.volumes_mock.reset_mock() + + self.volumes = list(sdk_fakes.generate_fake_resources(_volume.Volume)) + self.volume_sdk_client.find_volume.side_effect = self.volumes + self.volume_sdk_client.delete_volume.return_value = None + self.volume_sdk_client.unmanage_volume.return_value = None + + # Get the command object to mock + self.cmd = volume.DeleteVolume(self.app, None) + + def test_volume_delete_one_volume(self): + arglist = [self.volumes[0].id] + verifylist = [ + ("force", False), + ("purge", False), + ("volumes", [self.volumes[0].id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_called_once_with( + self.volumes[0].id, cascade=False, force=False + ) + + def test_volume_delete_multi_volumes(self): + arglist = [v.id for v in self.volumes] + verifylist = [ + ('force', False), + ('purge', False), + ('volumes', arglist), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_has_calls( + [mock.call(v.id, ignore_missing=False) for v in self.volumes] + ) + self.volume_sdk_client.delete_volume.assert_has_calls( + [mock.call(v.id, cascade=False, force=False) for v in self.volumes] + ) + + def test_volume_delete_multi_volumes_with_exception(self): + self.volume_sdk_client.find_volume.side_effect = [ + self.volumes[0], + sdk_exceptions.NotFoundException(), + ] + + arglist = [ + self.volumes[0].id, + 'unexist_volume', + ] + verifylist = [ + ('force', False), + ('purge', False), + ('volumes', [self.volumes[0].id, 'unexist_volume']), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 2 volumes failed to delete.', str(exc)) + + self.volume_sdk_client.find_volume.assert_has_calls( + [ + mock.call(self.volumes[0].id, ignore_missing=False), + mock.call('unexist_volume', ignore_missing=False), + ] + ) + self.volume_sdk_client.delete_volume.assert_has_calls( + [ + mock.call(self.volumes[0].id, cascade=False, force=False), + ] + ) + + def test_volume_delete_with_purge(self): + arglist = [ + '--purge', + self.volumes[0].id, + ] + verifylist = [ + ('force', False), + ('purge', True), + ('volumes', [self.volumes[0].id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_called_once_with( + self.volumes[0].id, cascade=True, force=False + ) + + def test_volume_delete_with_force(self): + arglist = [ + '--force', + self.volumes[0].id, + ] + verifylist = [ + ('force', True), + ('purge', False), + ('volumes', [self.volumes[0].id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_called_once_with( + self.volumes[0].id, cascade=False, force=True + ) + + def test_volume_delete_remote(self): + arglist = ['--remote', self.volumes[0].id] + verifylist = [ + ("remote", True), + ("force", False), + ("purge", False), + ("volumes", [self.volumes[0].id]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volumes[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_volume.assert_not_called() + self.volume_sdk_client.unmanage_volume.assert_called_once_with( + self.volumes[0].id + ) + + def test_volume_delete_multi_volumes_remote(self): + arglist = ['--remote'] + [v.id for v in self.volumes] + verifylist = [ + ('remote', True), + ('force', False), + ('purge', False), + ('volumes', arglist[1:]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_has_calls( + [mock.call(v.id, ignore_missing=False) for v in self.volumes] + ) + self.volume_sdk_client.delete_volume.assert_not_called() + self.volume_sdk_client.unmanage_volume.assert_has_calls( + [mock.call(v.id) for v in self.volumes] + ) + + def test_volume_delete_remote_with_purge(self): + arglist = [ + '--remote', + '--purge', + self.volumes[0].id, + ] + verifylist = [ + ('remote', True), + ('force', False), + ('purge', True), + ('volumes', [self.volumes[0].id]), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + "The --force and --purge options are not supported with the " + "--remote parameter.", + str(exc), + ) + + self.volume_sdk_client.find_volume.assert_not_called() + self.volume_sdk_client.delete_volume.assert_not_called() + self.volume_sdk_client.unmanage_volume.assert_not_called() + + def test_volume_delete_remote_with_force(self): + arglist = [ + '--remote', + '--force', + self.volumes[0].id, + ] + verifylist = [ + ('remote', True), + ('force', True), + ('purge', False), + ('volumes', [self.volumes[0].id]), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + "The --force and --purge options are not supported with the " + "--remote parameter.", + str(exc), + ) + + self.volume_sdk_client.find_volume.assert_not_called() + self.volume_sdk_client.delete_volume.assert_not_called() + self.volume_sdk_client.unmanage_volume.assert_not_called() + + +class TestVolumeList(volume_fakes.TestVolume): + project = identity_fakes.FakeProject.create_one_project() + user = identity_fakes.FakeUser.create_one_user() + + columns = [ + 'ID', + 'Name', + 'Status', + 'Size', + 'Attached to', + ] + + def setUp(self): + super().setUp() + + self.volumes_mock = self.volume_client.volumes + self.volumes_mock.reset_mock() + + self.projects_mock = self.identity_client.projects + self.projects_mock.reset_mock() + + self.users_mock = self.identity_client.users + self.users_mock.reset_mock() + + self.mock_volume = volume_fakes.create_one_volume() + self.volumes_mock.list.return_value = [self.mock_volume] + + self.users_mock.get.return_value = self.user + + self.projects_mock.get.return_value = self.project + + # Get the command object to test + self.cmd = volume.ListVolume(self.app, None) + + def test_volume_list_no_options(self): + arglist = [] + verifylist = [ + ('long', False), + ('all_projects', False), + ('name', None), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': None, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_project(self): + arglist = [ + '--project', + self.project.name, + ] + verifylist = [ + ('project', self.project.name), + ('long', False), + ('all_projects', False), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': True, + 'project_id': self.project.id, + 'user_id': None, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_project_domain(self): + arglist = [ + '--project', + self.project.name, + '--project-domain', + self.project.domain_id, + ] + verifylist = [ + ('project', self.project.name), + ('project_domain', self.project.domain_id), + ('long', False), + ('all_projects', False), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': True, + 'project_id': self.project.id, + 'user_id': None, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_user(self): + arglist = [ + '--user', + self.user.name, + ] + verifylist = [ + ('user', self.user.name), + ('long', False), + ('all_projects', False), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': self.user.id, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_user_domain(self): + arglist = [ + '--user', + self.user.name, + '--user-domain', + self.user.domain_id, + ] + verifylist = [ + ('user', self.user.name), + ('user_domain', self.user.domain_id), + ('long', False), + ('all_projects', False), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': self.user.id, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_name(self): + arglist = [ + '--name', + self.mock_volume.name, + ] + verifylist = [ + ('long', False), + ('all_projects', False), + ('name', self.mock_volume.name), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': None, + 'name': self.mock_volume.name, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_status(self): + arglist = [ + '--status', + self.mock_volume.status, + ] + verifylist = [ + ('long', False), + ('all_projects', False), + ('name', None), + ('status', self.mock_volume.status), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': None, + 'name': None, + 'status': self.mock_volume.status, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_all_projects(self): + arglist = [ + '--all-projects', + ] + verifylist = [ + ('long', False), + ('all_projects', True), + ('name', None), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': True, + 'project_id': None, + 'user_id': None, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertEqual(self.columns, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_long(self): + arglist = [ + '--long', + ] + verifylist = [ + ('long', True), + ('all_projects', False), + ('name', None), + ('status', None), + ('marker', None), + ('limit', None), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': None, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + collist = [ + 'ID', + 'Name', + 'Status', + 'Size', + 'Type', + 'Bootable', + 'Attached to', + 'Properties', + ] + self.assertEqual(collist, columns) + + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + self.mock_volume.volume_type, + self.mock_volume.bootable, + volume.AttachmentsColumn(self.mock_volume.attachments), + format_columns.DictColumn(self.mock_volume.metadata), + ), + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_with_marker_and_limit(self): + arglist = [ + "--marker", + self.mock_volume.id, + "--limit", + "2", + ] + verifylist = [ + ('long', False), + ('all_projects', False), + ('name', None), + ('status', None), + ('marker', self.mock_volume.id), + ('limit', 2), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) -from cinderclient import api_versions -from osc_lib.cli import format_columns -from osc_lib import exceptions -from osc_lib import utils + self.assertEqual(self.columns, columns) -from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes -from openstackclient.volume.v3 import volume + datalist = ( + ( + self.mock_volume.id, + self.mock_volume.name, + self.mock_volume.status, + self.mock_volume.size, + volume.AttachmentsColumn(self.mock_volume.attachments), + ), + ) + self.volumes_mock.list.assert_called_once_with( + marker=self.mock_volume.id, + limit=2, + search_opts={ + 'status': None, + 'project_id': None, + 'user_id': None, + 'name': None, + 'all_tenants': False, + 'metadata': None, + }, + ) + self.assertCountEqual(datalist, tuple(data)) + + def test_volume_list_negative_limit(self): + arglist = [ + "--limit", + "-2", + ] + verifylist = [ + ("limit", -2), + ] + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + def test_volume_list_backward_compatibility(self): + arglist = [ + '-c', + 'Display Name', + ] + verifylist = [ + ('columns', ['Display Name']), + ('long', False), + ('all_projects', False), + ('name', None), + ('status', None), + ('marker', None), + ('limit', None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + search_opts = { + 'all_tenants': False, + 'project_id': None, + 'user_id': None, + 'name': None, + 'status': None, + 'metadata': None, + } + self.volumes_mock.list.assert_called_once_with( + search_opts=search_opts, + marker=None, + limit=None, + ) + + self.assertIn('Display Name', columns) + self.assertNotIn('Name', columns) + + for each_volume in data: + self.assertIn(self.mock_volume.name, each_volume) + + +class TestVolumeMigrate(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.migrate_volume.return_value = None + + self.cmd = volume.MigrateVolume(self.app, None) + + def test_volume_migrate(self): + arglist = [ + "--host", + "host@backend-name#pool", + self.volume.id, + ] + verifylist = [ + ("force_host_copy", False), + ("lock_volume", False), + ("host", "host@backend-name#pool"), + ("volume", self.volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, ignore_missing=False + ) + self.volume_sdk_client.migrate_volume.assert_called_once_with( + self.volume.id, + host="host@backend-name#pool", + force_host_copy=False, + lock_volume=False, + ) + + def test_volume_migrate_with_option(self): + arglist = [ + "--force-host-copy", + "--lock-volume", + "--host", + "host@backend-name#pool", + self.volume.id, + ] + verifylist = [ + ("force_host_copy", True), + ("lock_volume", True), + ("host", "host@backend-name#pool"), + ("volume", self.volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, ignore_missing=False + ) + self.volume_sdk_client.migrate_volume.assert_called_once_with( + self.volume.id, + host="host@backend-name#pool", + force_host_copy=True, + lock_volume=True, + ) + + def test_volume_migrate_without_host(self): + arglist = [ + self.volume.id, + ] + verifylist = [ + ("force_host_copy", False), + ("lock_volume", False), + ("volume", self.volume.id), + ] + + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + self.volume_sdk_client.find_volume.assert_not_called() + self.volume_sdk_client.migrate_volume.assert_not_called() + + +class TestVolumeSet(volume_fakes.TestVolume): + volume_type = volume_fakes.create_one_volume_type() + + def setUp(self): + super().setUp() + + self.volumes_mock = self.volume_client.volumes + self.volumes_mock.reset_mock() + + self.types_mock = self.volume_client.volume_types + self.types_mock.reset_mock() + + self.new_volume = volume_fakes.create_one_volume() + self.volumes_mock.get.return_value = self.new_volume + self.types_mock.get.return_value = self.volume_type + + # Get the command object to test + self.cmd = volume.SetVolume(self.app, None) + + def test_volume_set_property(self): + arglist = [ + '--property', + 'a=b', + '--property', + 'c=d', + self.new_volume.id, + ] + verifylist = [ + ('properties', {'a': 'b', 'c': 'd'}), + ('read_only', None), + ('bootable', None), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + self.volumes_mock.set_metadata.assert_called_with( + self.new_volume.id, parsed_args.properties + ) + + def test_volume_set_image_property(self): + arglist = [ + '--image-property', + 'Alpha=a', + '--image-property', + 'Beta=b', + self.new_volume.id, + ] + verifylist = [ + ('image_properties', {'Alpha': 'a', 'Beta': 'b'}), + ('read_only', None), + ('bootable', None), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + # In base command class ShowOne in cliff, abstract method take_action() + # returns nothing + self.cmd.take_action(parsed_args) + self.volumes_mock.set_image_metadata.assert_called_with( + self.new_volume.id, parsed_args.image_properties + ) + + def test_volume_set_state(self): + arglist = ['--state', 'error', self.new_volume.id] + verifylist = [ + ('state', 'error'), + ('read_only', None), + ('bootable', None), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.reset_state.assert_called_with( + self.new_volume.id, 'error' + ) + self.volumes_mock.update_readonly_flag.assert_not_called() + self.assertIsNone(result) + + def test_volume_set_state_failed(self): + self.volumes_mock.reset_state.side_effect = exceptions.CommandError() + arglist = ['--state', 'error', self.new_volume.id] + verifylist = [('state', 'error'), ('volume', self.new_volume.id)] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + try: + self.cmd.take_action(parsed_args) + self.fail('CommandError should be raised.') + except exceptions.CommandError as e: + self.assertEqual( + 'One or more of the set operations failed', str(e) + ) + self.volumes_mock.reset_state.assert_called_with( + self.new_volume.id, 'error' + ) + + def test_volume_set_attached(self): + arglist = ['--attached', self.new_volume.id] + verifylist = [ + ('attached', True), + ('detached', False), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.reset_state.assert_called_with( + self.new_volume.id, attach_status='attached', state=None + ) + self.assertIsNone(result) + + def test_volume_set_detached(self): + arglist = ['--detached', self.new_volume.id] + verifylist = [ + ('attached', False), + ('detached', True), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.reset_state.assert_called_with( + self.new_volume.id, attach_status='detached', state=None + ) + self.assertIsNone(result) + + def test_volume_set_bootable(self): + arglist = [ + '--bootable', + self.new_volume.id, + ] + verifylist = [ + ('bootable', True), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + self.volumes_mock.set_bootable.assert_called_with( + self.new_volume.id, verifylist[0][1] + ) + + def test_volume_set_non_bootable(self): + arglist = [ + '--non-bootable', + self.new_volume.id, + ] + verifylist = [ + ('bootable', False), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.cmd.take_action(parsed_args) + self.volumes_mock.set_bootable.assert_called_with( + self.new_volume.id, verifylist[0][1] + ) + + def test_volume_set_readonly(self): + arglist = ['--read-only', self.new_volume.id] + verifylist = [ + ('read_only', True), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.update_readonly_flag.assert_called_once_with( + self.new_volume.id, True + ) + self.assertIsNone(result) + + def test_volume_set_read_write(self): + arglist = ['--read-write', self.new_volume.id] + verifylist = [ + ('read_only', False), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.update_readonly_flag.assert_called_once_with( + self.new_volume.id, False + ) + self.assertIsNone(result) + + def test_volume_set_type(self): + arglist = ['--type', self.volume_type.id, self.new_volume.id] + verifylist = [ + ('retype_policy', None), + ('type', self.volume_type.id), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.retype.assert_called_once_with( + self.new_volume.id, self.volume_type.id, 'never' + ) + self.assertIsNone(result) + + def test_volume_set_type_with_policy(self): + arglist = [ + '--retype-policy', + 'on-demand', + '--type', + self.volume_type.id, + self.new_volume.id, + ] + verifylist = [ + ('retype_policy', 'on-demand'), + ('type', self.volume_type.id), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.retype.assert_called_once_with( + self.new_volume.id, self.volume_type.id, 'on-demand' + ) + self.assertIsNone(result) + + @mock.patch.object(volume.LOG, 'warning') + def test_volume_set_with_only_retype_policy(self, mock_warning): + arglist = ['--retype-policy', 'on-demand', self.new_volume.id] + verifylist = [ + ('retype_policy', 'on-demand'), + ('volume', self.new_volume.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volumes_mock.retype.assert_not_called() + mock_warning.assert_called_with( + "'--retype-policy' option will not work without '--type' option" + ) + self.assertIsNone(result) + + +class TestVolumeShow(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + + self.columns = ( + 'attachments', + 'availability_zone', + 'backup_id', + 'bootable', + 'cluster_name', + 'consistencygroup_id', + 'consumes_quota', + 'created_at', + 'description', + 'encrypted', + 'encryption_key_id', + 'group_id', + 'id', + 'multiattach', + 'name', + 'os-vol-host-attr:host', + 'os-vol-mig-status-attr:migstat', + 'os-vol-mig-status-attr:name_id', + 'os-vol-tenant-attr:tenant_id', + 'properties', + 'provider_id', + 'replication_status', + 'service_uuid', + 'shared_targets', + 'size', + 'snapshot_id', + 'source_volid', + 'status', + 'type', + 'updated_at', + 'user_id', + 'volume_image_metadata', + 'volume_type_id', + ) + self.data = ( + self.volume.attachments, + self.volume.availability_zone, + self.volume.backup_id, + self.volume.is_bootable, + self.volume.cluster_name, + self.volume.consistency_group_id, + self.volume.consumes_quota, + self.volume.created_at, + self.volume.description, + self.volume.is_encrypted, + self.volume.encryption_key_id, + self.volume.group_id, + self.volume.id, + self.volume.is_multiattach, + self.volume.name, + self.volume.host, + self.volume.migration_status, + self.volume.migration_id, + self.volume.project_id, + format_columns.DictColumn(self.volume.metadata), + self.volume.provider_id, + self.volume.replication_status, + self.volume.service_uuid, + self.volume.shared_targets, + self.volume.size, + self.volume.snapshot_id, + self.volume.source_volume_id, + self.volume.status, + self.volume.volume_type, + self.volume.updated_at, + self.volume.user_id, + self.volume.volume_image_metadata, + self.volume.volume_type_id, + ) + + self.cmd = volume.ShowVolume(self.app, None) + + def test_volume_show(self): + arglist = [self.volume.id] + verifylist = [("volume", self.volume.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, ignore_missing=False + ) + + +class TestVolumeUnset(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volumes_mock = self.volume_client.volumes + self.volumes_mock.reset_mock() + + self.new_volume = volume_fakes.create_one_volume() + self.volumes_mock.get.return_value = self.new_volume + + # Get the command object to set property + self.cmd_set = volume.SetVolume(self.app, None) + + # Get the command object to unset property + self.cmd_unset = volume.UnsetVolume(self.app, None) + + def test_volume_unset_image_property(self): + # Arguments for setting image properties + arglist = [ + '--image-property', + 'Alpha=a', + '--image-property', + 'Beta=b', + self.new_volume.id, + ] + verifylist = [ + ('image_properties', {'Alpha': 'a', 'Beta': 'b'}), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd_set, arglist, verifylist) + + # In base command class ShowOne in cliff, abstract method take_action() + # returns nothing + self.cmd_set.take_action(parsed_args) + + # Arguments for unsetting image properties + arglist_unset = [ + '--image-property', + 'Alpha', + self.new_volume.id, + ] + verifylist_unset = [ + ('image_properties', ['Alpha']), + ('volume', self.new_volume.id), + ] + parsed_args_unset = self.check_parser( + self.cmd_unset, arglist_unset, verifylist_unset + ) + + # In base command class ShowOne in cliff, abstract method take_action() + # returns nothing + self.cmd_unset.take_action(parsed_args_unset) + + self.volumes_mock.delete_image_metadata.assert_called_with( + self.new_volume.id, parsed_args_unset.image_properties + ) + + def test_volume_unset_image_property_fail(self): + self.volumes_mock.delete_image_metadata.side_effect = ( + exceptions.CommandError() + ) + arglist = [ + '--image-property', + 'Alpha', + '--property', + 'Beta', + self.new_volume.id, + ] + verifylist = [ + ('image_properties', ['Alpha']), + ('properties', ['Beta']), + ('volume', self.new_volume.id), + ] + parsed_args = self.check_parser(self.cmd_unset, arglist, verifylist) + + try: + self.cmd_unset.take_action(parsed_args) + self.fail('CommandError should be raised.') + except exceptions.CommandError as e: + self.assertEqual( + 'One or more of the unset operations failed', str(e) + ) + self.volumes_mock.delete_image_metadata.assert_called_with( + self.new_volume.id, parsed_args.image_properties + ) + self.volumes_mock.delete_metadata.assert_called_with( + self.new_volume.id, parsed_args.properties + ) -class TestVolumeSummary(volume_fakes.TestVolume): +class TestVolumeSummary(volume_fakes.TestVolume): columns = [ 'Total Count', 'Total Size', @@ -34,22 +2196,20 @@ class TestVolumeSummary(volume_fakes.TestVolume): def setUp(self): super().setUp() - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - self.mock_vol_1 = volume_fakes.create_one_volume() - self.mock_vol_2 = volume_fakes.create_one_volume() - self.return_dict = { - 'volume-summary': { - 'total_count': 2, - 'total_size': self.mock_vol_1.size + self.mock_vol_2.size}} - self.volumes_mock.summary.return_value = self.return_dict + self.volume_a = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_b = sdk_fakes.generate_fake_resource(_volume.Volume) + self.summary = sdk_fakes.generate_fake_resource( + _summary.BlockStorageSummary, + total_count=2, + total_size=self.volume_a.size + self.volume_b.size, + ) + self.volume_sdk_client.summary.return_value = self.summary # Get the command object to test self.cmd = volume.VolumeSummary(self.app, None) def test_volume_summary(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.12') + self.set_volume_api_version('3.12') arglist = [ '--all-projects', ] @@ -60,18 +2220,14 @@ def test_volume_summary(self): columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.summary.assert_called_once_with( - all_tenants=True, - ) + self.volume_sdk_client.summary.assert_called_once_with(True) self.assertEqual(self.columns, columns) - datalist = ( - 2, - self.mock_vol_1.size + self.mock_vol_2.size) + datalist = (2, self.volume_a.size + self.volume_b.size) self.assertCountEqual(datalist, tuple(data)) - def test_volume_summary_pre_312(self): + def test_volume_summary_pre_v312(self): arglist = [ '--all-projects', ] @@ -81,21 +2237,23 @@ def test_volume_summary_pre_312(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.12 or greater is required', - str(exc)) + '--os-volume-api-version 3.12 or greater is required', str(exc) + ) def test_volume_summary_with_metadata(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.36') + self.set_volume_api_version('3.36') - combine_meta = {**self.mock_vol_1.metadata, **self.mock_vol_2.metadata} - meta_dict = copy.deepcopy(self.return_dict) - meta_dict['volume-summary']['metadata'] = combine_meta - self.volumes_mock.summary.return_value = meta_dict + metadata = {**self.volume_a.metadata, **self.volume_b.metadata} + self.summary = sdk_fakes.generate_fake_resource( + _summary.BlockStorageSummary, + total_count=2, + total_size=self.volume_a.size + self.volume_b.size, + metadata=metadata, + ) + self.volume_sdk_client.summary.return_value = self.summary new_cols = copy.deepcopy(self.columns) new_cols.extend(['Metadata']) @@ -110,70 +2268,116 @@ def test_volume_summary_with_metadata(self): columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.summary.assert_called_once_with( - all_tenants=True, - ) + self.volume_sdk_client.summary.assert_called_once_with(True) self.assertEqual(new_cols, columns) datalist = ( 2, - self.mock_vol_1.size + self.mock_vol_2.size, - format_columns.DictColumn(combine_meta)) + self.volume_a.size + self.volume_b.size, + format_columns.DictColumn(metadata), + ) self.assertCountEqual(datalist, tuple(data)) class TestVolumeRevertToSnapshot(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - self.snapshots_mock = self.app.client_manager.volume.volume_snapshots - self.snapshots_mock.reset_mock() - self.mock_volume = volume_fakes.create_one_volume() - self.mock_snapshot = volume_fakes.create_one_snapshot( - attrs={'volume_id': self.volumes_mock.id}) + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.snapshot = sdk_fakes.generate_fake_resource( + _snapshot.Snapshot, + volume_id=self.volume.id, + ) + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.find_snapshot.return_value = self.snapshot # Get the command object to test self.cmd = volume.VolumeRevertToSnapshot(self.app, None) - def test_volume_revert_to_snapshot_pre_340(self): + def test_volume_revert_to_snapshot_pre_v340(self): arglist = [ - self.mock_snapshot.id, + self.snapshot.id, ] verifylist = [ - ('snapshot', self.mock_snapshot.id), + ('snapshot', self.snapshot.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.40 or greater is required', - str(exc)) + '--os-volume-api-version 3.40 or greater is required', str(exc) + ) def test_volume_revert_to_snapshot(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.40') + self.set_volume_api_version('3.40') arglist = [ - self.mock_snapshot.id, + self.snapshot.id, ] verifylist = [ - ('snapshot', self.mock_snapshot.id), + ('snapshot', self.snapshot.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) - find_mock_result = [self.mock_snapshot, self.mock_volume] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: - self.cmd.take_action(parsed_args) + self.cmd.take_action(parsed_args) - self.volumes_mock.revert_to_snapshot.assert_called_once_with( - volume=self.mock_volume, - snapshot=self.mock_snapshot, - ) - self.assertEqual(2, find_mock.call_count) + self.volume_sdk_client.revert_volume_to_snapshot.assert_called_once_with( + self.volume, + self.snapshot, + ) + self.volume_sdk_client.find_volume.assert_called_with( + self.volume.id, + ignore_missing=False, + ) + self.volume_sdk_client.find_snapshot.assert_called_with( + self.snapshot.id, + ignore_missing=False, + ) + + +class TestColumns(volume_fakes.TestVolume): + def test_attachments_column_without_server_cache(self): + vol = sdk_fakes.generate_fake_resource( + _volume.Volume, + attachments=[ + { + 'device': '/dev/' + uuid.uuid4().hex, + 'server_id': uuid.uuid4().hex, + }, + ], + ) + server_id = vol.attachments[0]['server_id'] + device = vol.attachments[0]['device'] + + col = volume.AttachmentsColumn(vol.attachments, {}) + self.assertEqual( + f'Attached to {server_id} on {device} ', + col.human_readable(), + ) + self.assertEqual(vol.attachments, col.machine_readable()) + + def test_attachments_column_with_server_cache(self): + vol = sdk_fakes.generate_fake_resource( + _volume.Volume, + attachments=[ + { + 'device': '/dev/' + uuid.uuid4().hex, + 'server_id': uuid.uuid4().hex, + }, + ], + ) + + server_id = vol.attachments[0]['server_id'] + device = vol.attachments[0]['device'] + fake_server = mock.Mock() + fake_server.name = 'fake-server-name' + server_cache = {server_id: fake_server} + + col = volume.AttachmentsColumn(vol.attachments, server_cache) + self.assertEqual( + 'Attached to {} on {} '.format('fake-server-name', device), + col.human_readable(), + ) + self.assertEqual(vol.attachments, col.machine_readable()) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_attachment.py b/openstackclient/tests/unit/volume/v3/test_volume_attachment.py index c0bf5ae783..b7838e034a 100644 --- a/openstackclient/tests/unit/volume/v3/test_volume_attachment.py +++ b/openstackclient/tests/unit/volume/v3/test_volume_attachment.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from cinderclient import api_versions from osc_lib.cli import format_columns from osc_lib import exceptions @@ -21,28 +20,15 @@ class TestVolumeAttachment(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.volumes_mock = self.app.client_manager.volume.volumes - self.volumes_mock.reset_mock() - - self.volume_attachments_mock = \ - self.app.client_manager.volume.attachments - self.volume_attachments_mock.reset_mock() - self.projects_mock = self.app.client_manager.identity.projects - self.projects_mock.reset_mock() - - self.servers_mock = self.app.client_manager.compute.servers - self.servers_mock.reset_mock() class TestVolumeAttachmentCreate(TestVolumeAttachment): - volume = volume_fakes.create_one_volume() - server = compute_fakes.FakeServer.create_one_server() + server = compute_fakes.create_one_server() volume_attachment = volume_fakes.create_one_volume_attachment( attrs={'instance': server.id, 'volume_id': volume.id}, ) @@ -71,17 +57,16 @@ class TestVolumeAttachmentCreate(TestVolumeAttachment): def setUp(self): super().setUp() - self.volumes_mock.get.return_value = self.volume - self.servers_mock.get.return_value = self.server - # VolumeAttachmentManager.create returns a dict - self.volume_attachments_mock.create.return_value = \ + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.create_attachment.return_value = ( self.volume_attachment.to_dict() + ) + self.compute_client.find_server.return_value = self.server self.cmd = volume_attachment.CreateVolumeAttachment(self.app, None) def test_volume_attachment_create(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.27') + self.set_volume_api_version('3.27') arglist = [ self.volume.id, @@ -104,30 +89,43 @@ def test_volume_attachment_create(self): columns, data = self.cmd.take_action(parsed_args) - self.volumes_mock.get.assert_called_once_with(self.volume.id) - self.servers_mock.get.assert_called_once_with(self.server.id) - self.volume_attachments_mock.create.assert_called_once_with( - self.volume.id, {}, self.server.id, None, + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volume.id, ignore_missing=False + ) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.volume_sdk_client.create_attachment.assert_called_once_with( + self.volume.id, + connector={}, + instance=self.server.id, + mode=None, ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_attachment_create_with_connect(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.54') + self.set_volume_api_version('3.54') arglist = [ self.volume.id, self.server.id, '--connect', - '--initiator', 'iqn.1993-08.org.debian:01:cad181614cec', - '--ip', '192.168.1.20', - '--host', 'my-host', - '--platform', 'x86_64', - '--os-type', 'linux2', + '--initiator', + 'iqn.1993-08.org.debian:01:cad181614cec', + '--ip', + '192.168.1.20', + '--host', + 'my-host', + '--platform', + 'x86_64', + '--os-type', + 'linux2', '--multipath', - '--mountpoint', '/dev/vdb', - '--mode', 'null', + '--mountpoint', + '/dev/vdb', + '--mode', + 'null', ] verifylist = [ ('volume', self.volume.id), @@ -146,27 +144,35 @@ def test_volume_attachment_create_with_connect(self): columns, data = self.cmd.take_action(parsed_args) - connect_info = dict([ - ('initiator', 'iqn.1993-08.org.debian:01:cad181614cec'), - ('ip', '192.168.1.20'), - ('host', 'my-host'), - ('platform', 'x86_64'), - ('os_type', 'linux2'), - ('multipath', True), - ('mountpoint', '/dev/vdb'), - ]) + connect_info = dict( + [ + ('initiator', 'iqn.1993-08.org.debian:01:cad181614cec'), + ('ip', '192.168.1.20'), + ('host', 'my-host'), + ('platform', 'x86_64'), + ('os_type', 'linux2'), + ('multipath', True), + ('mountpoint', '/dev/vdb'), + ] + ) - self.volumes_mock.get.assert_called_once_with(self.volume.id) - self.servers_mock.get.assert_called_once_with(self.server.id) - self.volume_attachments_mock.create.assert_called_once_with( - self.volume.id, connect_info, self.server.id, 'null', + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volume.id, ignore_missing=False + ) + self.compute_client.find_server.assert_called_once_with( + self.server.id, ignore_missing=False + ) + self.volume_sdk_client.create_attachment.assert_called_once_with( + self.volume.id, + connector=connect_info, + instance=self.server.id, + mode='null', ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_attachment_create_pre_v327(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.26') + self.set_volume_api_version('3.26') arglist = [ self.volume.id, @@ -179,21 +185,20 @@ def test_volume_attachment_create_pre_v327(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.27 or greater is required', - str(exc)) + '--os-volume-api-version 3.27 or greater is required', str(exc) + ) def test_volume_attachment_create_with_mode_pre_v354(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.53') + self.set_volume_api_version('3.53') arglist = [ self.volume.id, self.server.id, - '--mode', 'rw', + '--mode', + 'rw', ] verifylist = [ ('volume', self.volume.id), @@ -203,21 +208,20 @@ def test_volume_attachment_create_with_mode_pre_v354(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.54 or greater is required', - str(exc)) + '--os-volume-api-version 3.54 or greater is required', str(exc) + ) def test_volume_attachment_create_with_connect_missing_arg(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.54') + self.set_volume_api_version('3.54') arglist = [ self.volume.id, self.server.id, - '--initiator', 'iqn.1993-08.org.debian:01:cad181614cec', + '--initiator', + 'iqn.1993-08.org.debian:01:cad181614cec', ] verifylist = [ ('volume', self.volume.id), @@ -228,28 +232,25 @@ def test_volume_attachment_create_with_connect_missing_arg(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - 'You must specify the --connect option for any', - str(exc)) + 'You must specify the --connect option for any', str(exc) + ) class TestVolumeAttachmentDelete(TestVolumeAttachment): - volume_attachment = volume_fakes.create_one_volume_attachment() def setUp(self): super().setUp() - self.volume_attachments_mock.delete.return_value = None + self.volume_sdk_client.delete_attachment.return_value = None self.cmd = volume_attachment.DeleteVolumeAttachment(self.app, None) def test_volume_attachment_delete(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.27') + self.set_volume_api_version('3.27') arglist = [ self.volume_attachment.id, @@ -261,14 +262,13 @@ def test_volume_attachment_delete(self): result = self.cmd.take_action(parsed_args) - self.volume_attachments_mock.delete.assert_called_once_with( + self.volume_sdk_client.delete_attachment.assert_called_once_with( self.volume_attachment.id, ) self.assertIsNone(result) def test_volume_attachment_delete_pre_v327(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.26') + self.set_volume_api_version('3.26') arglist = [ self.volume_attachment.id, @@ -279,16 +279,14 @@ def test_volume_attachment_delete_pre_v327(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.27 or greater is required', - str(exc)) + '--os-volume-api-version 3.27 or greater is required', str(exc) + ) class TestVolumeAttachmentSet(TestVolumeAttachment): - volume_attachment = volume_fakes.create_one_volume_attachment() columns = ( @@ -315,24 +313,30 @@ class TestVolumeAttachmentSet(TestVolumeAttachment): def setUp(self): super().setUp() - self.volume_attachments_mock.update.return_value = \ + self.volume_sdk_client.update_attachment.return_value = ( self.volume_attachment + ) self.cmd = volume_attachment.SetVolumeAttachment(self.app, None) def test_volume_attachment_set(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.27') + self.set_volume_api_version('3.27') arglist = [ self.volume_attachment.id, - '--initiator', 'iqn.1993-08.org.debian:01:cad181614cec', - '--ip', '192.168.1.20', - '--host', 'my-host', - '--platform', 'x86_64', - '--os-type', 'linux2', + '--initiator', + 'iqn.1993-08.org.debian:01:cad181614cec', + '--ip', + '192.168.1.20', + '--host', + 'my-host', + '--platform', + 'x86_64', + '--os-type', + 'linux2', '--multipath', - '--mountpoint', '/dev/vdb', + '--mountpoint', + '/dev/vdb', ] verifylist = [ ('attachment', self.volume_attachment.id), @@ -348,29 +352,32 @@ def test_volume_attachment_set(self): columns, data = self.cmd.take_action(parsed_args) - connect_info = dict([ - ('initiator', 'iqn.1993-08.org.debian:01:cad181614cec'), - ('ip', '192.168.1.20'), - ('host', 'my-host'), - ('platform', 'x86_64'), - ('os_type', 'linux2'), - ('multipath', True), - ('mountpoint', '/dev/vdb'), - ]) + connect_info = dict( + [ + ('initiator', 'iqn.1993-08.org.debian:01:cad181614cec'), + ('ip', '192.168.1.20'), + ('host', 'my-host'), + ('platform', 'x86_64'), + ('os_type', 'linux2'), + ('multipath', True), + ('mountpoint', '/dev/vdb'), + ] + ) - self.volume_attachments_mock.update.assert_called_once_with( - self.volume_attachment.id, connect_info, + self.volume_sdk_client.update_attachment.assert_called_once_with( + self.volume_attachment.id, + connector=connect_info, ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_attachment_set_pre_v327(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.26') + self.set_volume_api_version('3.26') arglist = [ self.volume_attachment.id, - '--initiator', 'iqn.1993-08.org.debian:01:cad181614cec', + '--initiator', + 'iqn.1993-08.org.debian:01:cad181614cec', ] verifylist = [ ('attachment', self.volume_attachment.id), @@ -379,28 +386,25 @@ def test_volume_attachment_set_pre_v327(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.27 or greater is required', - str(exc)) + '--os-volume-api-version 3.27 or greater is required', str(exc) + ) class TestVolumeAttachmentComplete(TestVolumeAttachment): - volume_attachment = volume_fakes.create_one_volume_attachment() def setUp(self): super().setUp() - self.volume_attachments_mock.complete.return_value = None + self.volume_sdk_client.complete_attachment.return_value = None self.cmd = volume_attachment.CompleteVolumeAttachment(self.app, None) def test_volume_attachment_complete(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.44') + self.set_volume_api_version('3.44') arglist = [ self.volume_attachment.id, @@ -412,14 +416,13 @@ def test_volume_attachment_complete(self): result = self.cmd.take_action(parsed_args) - self.volume_attachments_mock.complete.assert_called_once_with( + self.volume_sdk_client.complete_attachment.assert_called_once_with( self.volume_attachment.id, ) self.assertIsNone(result) def test_volume_attachment_complete_pre_v344(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.43') + self.set_volume_api_version('3.43') arglist = [ self.volume_attachment.id, @@ -430,16 +433,14 @@ def test_volume_attachment_complete_pre_v344(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.44 or greater is required', - str(exc)) + '--os-volume-api-version 3.44 or greater is required', str(exc) + ) class TestVolumeAttachmentList(TestVolumeAttachment): - project = identity_fakes.FakeProject.create_one_project() volume_attachments = volume_fakes.create_volume_attachments() @@ -455,21 +456,22 @@ class TestVolumeAttachmentList(TestVolumeAttachment): volume_attachment.volume_id, volume_attachment.instance, volume_attachment.status, - ) for volume_attachment in volume_attachments + ) + for volume_attachment in volume_attachments ] def setUp(self): super().setUp() self.projects_mock.get.return_value = self.project - self.volume_attachments_mock.list.return_value = \ + self.volume_sdk_client.attachments.return_value = ( self.volume_attachments + ) self.cmd = volume_attachment.ListVolumeAttachment(self.app, None) def test_volume_attachment_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.27') + self.set_volume_api_version('3.27') arglist = [] verifylist = [ @@ -484,7 +486,7 @@ def test_volume_attachment_list(self): columns, data = self.cmd.take_action(parsed_args) - self.volume_attachments_mock.list.assert_called_once_with( + self.volume_sdk_client.attachments.assert_called_once_with( search_opts={ 'all_tenants': False, 'project_id': None, @@ -498,15 +500,19 @@ def test_volume_attachment_list(self): self.assertCountEqual(tuple(self.data), data) def test_volume_attachment_list_with_options(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.27') + self.set_volume_api_version('3.27') arglist = [ - '--project', self.project.name, - '--volume-id', 'volume-id', - '--status', 'attached', - '--marker', 'volume-attachment-id', - '--limit', '2', + '--project', + self.project.name, + '--volume-id', + 'volume-id', + '--status', + 'attached', + '--marker', + 'volume-attachment-id', + '--limit', + '2', ] verifylist = [ ('project', self.project.name), @@ -520,7 +526,7 @@ def test_volume_attachment_list_with_options(self): columns, data = self.cmd.take_action(parsed_args) - self.volume_attachments_mock.list.assert_called_once_with( + self.volume_sdk_client.attachments.assert_called_once_with( search_opts={ 'all_tenants': True, 'project_id': self.project.id, @@ -534,8 +540,7 @@ def test_volume_attachment_list_with_options(self): self.assertCountEqual(tuple(self.data), data) def test_volume_attachment_list_pre_v327(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.26') + self.set_volume_api_version('3.26') arglist = [] verifylist = [ @@ -549,9 +554,8 @@ def test_volume_attachment_list_pre_v327(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.27 or greater is required', - str(exc)) + '--os-volume-api-version 3.27 or greater is required', str(exc) + ) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_backup.py b/openstackclient/tests/unit/volume/v3/test_volume_backup.py new file mode 100644 index 0000000000..86bde785f6 --- /dev/null +++ b/openstackclient/tests/unit/volume/v3/test_volume_backup.py @@ -0,0 +1,917 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.block_storage.v3 import backup as _backup +from openstack.block_storage.v3 import snapshot as _snapshot +from openstack.block_storage.v3 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack.identity.v3 import project as _project +from openstack.test import fakes as sdk_fakes +from osc_lib import exceptions + +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes +from openstackclient.volume.v3 import volume_backup + + +class TestBackupCreate(volume_fakes.TestVolume): + columns = ( + 'id', + 'name', + 'volume_id', + ) + + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, + volume_id=self.volume.id, + snapshot_id=self.snapshot.id, + ) + self.volume_sdk_client.create_backup.return_value = self.backup + + self.data = ( + self.backup.id, + self.backup.name, + self.backup.volume_id, + ) + + self.cmd = volume_backup.CreateVolumeBackup(self.app, None) + + def test_backup_create(self): + arglist = [ + "--name", + self.backup.name, + "--description", + self.backup.description, + "--container", + self.backup.container, + "--force", + "--incremental", + "--snapshot", + self.backup.snapshot_id, + self.backup.volume_id, + ] + verifylist = [ + ("name", self.backup.name), + ("description", self.backup.description), + ("container", self.backup.container), + ("force", True), + ("incremental", True), + ("snapshot", self.backup.snapshot_id), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_backup.assert_called_with( + volume_id=self.backup.volume_id, + container=self.backup.container, + name=self.backup.name, + description=self.backup.description, + force=True, + is_incremental=True, + snapshot_id=self.backup.snapshot_id, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_backup_create_with_properties(self): + self.set_volume_api_version('3.43') + + arglist = [ + "--property", + "foo=bar", + "--property", + "wow=much-cool", + self.backup.volume_id, + ] + verifylist = [ + ("properties", {"foo": "bar", "wow": "much-cool"}), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_backup.assert_called_with( + volume_id=self.backup.volume_id, + container=None, + name=None, + description=None, + force=False, + is_incremental=False, + metadata={"foo": "bar", "wow": "much-cool"}, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_backup_create_with_properties_pre_v343(self): + self.set_volume_api_version('3.42') + + arglist = [ + "--property", + "foo=bar", + "--property", + "wow=much-cool", + self.backup.volume_id, + ] + verifylist = [ + ("properties", {"foo": "bar", "wow": "much-cool"}), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) + + def test_backup_create_with_availability_zone(self): + self.set_volume_api_version('3.51') + + arglist = [ + "--availability-zone", + "my-az", + self.backup.volume_id, + ] + verifylist = [ + ("availability_zone", "my-az"), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_backup.assert_called_with( + volume_id=self.backup.volume_id, + container=None, + name=None, + description=None, + force=False, + is_incremental=False, + availability_zone="my-az", + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_backup_create_with_availability_zone_pre_v351(self): + self.set_volume_api_version('3.50') + + arglist = [ + "--availability-zone", + "my-az", + self.backup.volume_id, + ] + verifylist = [ + ("availability_zone", "my-az"), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.51 or greater", str(exc)) + + def test_backup_create_without_name(self): + arglist = [ + "--description", + self.backup.description, + "--container", + self.backup.container, + self.backup.volume_id, + ] + verifylist = [ + ("description", self.backup.description), + ("container", self.backup.container), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.create_backup.assert_called_with( + volume_id=self.backup.volume_id, + container=self.backup.container, + name=None, + description=self.backup.description, + force=False, + is_incremental=False, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + +class TestBackupDelete(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.backups = list(sdk_fakes.generate_fake_resources(_backup.Backup)) + self.volume_sdk_client.find_backup.side_effect = self.backups + self.volume_sdk_client.delete_backup.return_value = None + + self.cmd = volume_backup.DeleteVolumeBackup(self.app, None) + + def test_backup_delete(self): + arglist = [self.backups[0].id] + verifylist = [("backups", [self.backups[0].id])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.delete_backup.assert_called_with( + self.backups[0].id, ignore_missing=False, force=False + ) + self.assertIsNone(result) + + def test_backup_delete_with_force(self): + arglist = [ + '--force', + self.backups[0].id, + ] + verifylist = [('force', True), ("backups", [self.backups[0].id])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.delete_backup.assert_called_with( + self.backups[0].id, ignore_missing=False, force=True + ) + self.assertIsNone(result) + + def test_delete_multiple_backups(self): + arglist = [] + for b in self.backups: + arglist.append(b.id) + verifylist = [ + ('backups', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + calls = [] + for b in self.backups: + calls.append(mock.call(b.id, ignore_missing=False, force=False)) + self.volume_sdk_client.delete_backup.assert_has_calls(calls) + self.assertIsNone(result) + + def test_delete_multiple_backups_with_exception(self): + arglist = [ + self.backups[0].id, + 'unexist_backup', + ] + verifylist = [ + ('backups', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + find_mock_result = [self.backups[0], exceptions.CommandError] + self.volume_sdk_client.find_backup.side_effect = find_mock_result + + try: + self.cmd.take_action(parsed_args) + self.fail('CommandError should be raised.') + except exceptions.CommandError as e: + self.assertEqual('1 of 2 backups failed to delete.', str(e)) + + self.volume_sdk_client.find_backup.assert_any_call( + self.backups[0].id, ignore_missing=False + ) + self.volume_sdk_client.find_backup.assert_any_call( + 'unexist_backup', ignore_missing=False + ) + + self.assertEqual(2, self.volume_sdk_client.find_backup.call_count) + self.volume_sdk_client.delete_backup.assert_called_once_with( + self.backups[0].id, + ignore_missing=False, + force=False, + ) + + +class TestBackupList(volume_fakes.TestVolume): + columns = ( + 'ID', + 'Name', + 'Description', + 'Status', + 'Size', + 'Incremental', + 'Created At', + ) + columns_long = columns + ( + 'Availability Zone', + 'Volume', + 'Container', + ) + + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.volumes.return_value = [self.volume] + self.backups = list( + sdk_fakes.generate_fake_resources( + _backup.Backup, + attrs={'volume_id': self.volume.id}, + ) + ) + self.volume_sdk_client.backups.return_value = self.backups + self.volume_sdk_client.find_backup.return_value = self.backups[0] + + self.data = [] + for b in self.backups: + self.data.append( + ( + b.id, + b.name, + b.description, + b.status, + b.size, + b.is_incremental, + b.created_at, + ) + ) + self.data_long = [] + for b in self.backups: + self.data_long.append( + ( + b.id, + b.name, + b.description, + b.status, + b.size, + b.is_incremental, + b.created_at, + b.availability_zone, + volume_backup.VolumeIdColumn(b.volume_id), + b.container, + ) + ) + + self.cmd = volume_backup.ListVolumeBackup(self.app, None) + + def test_backup_list_without_options(self): + arglist = [] + verifylist = [ + ("long", False), + ("name", None), + ("status", None), + ("volume", None), + ("marker", None), + ("limit", None), + ('all_projects', False), + ("project", None), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.find_volume.assert_not_called() + self.volume_sdk_client.find_backup.assert_not_called() + self.volume_sdk_client.backups.assert_called_with( + name=None, + status=None, + volume_id=None, + all_tenants=False, + marker=None, + limit=None, + project_id=None, + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, list(data)) + + def test_backup_list_with_options(self): + project = sdk_fakes.generate_fake_resource(_project.Project) + self.identity_sdk_client.find_project.return_value = project + arglist = [ + "--long", + "--name", + self.backups[0].name, + "--status", + "error", + "--volume", + self.volume.id, + "--marker", + self.backups[0].id, + "--all-projects", + "--limit", + "3", + "--project", + project.id, + ] + verifylist = [ + ("long", True), + ("name", self.backups[0].name), + ("status", "error"), + ("volume", self.volume.id), + ("marker", self.backups[0].id), + ('all_projects', True), + ("limit", 3), + ("project", project.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.volume.id, ignore_missing=False + ) + self.volume_sdk_client.find_backup.assert_called_once_with( + self.backups[0].id, ignore_missing=False + ) + self.volume_sdk_client.backups.assert_called_with( + name=self.backups[0].name, + status="error", + volume_id=self.volume.id, + all_tenants=True, + marker=self.backups[0].id, + limit=3, + project_id=project.id, + ) + self.assertEqual(self.columns_long, columns) + self.assertCountEqual(self.data_long, list(data)) + + +class TestBackupRestore(volume_fakes.TestVolume): + columns = ( + "id", + "volume_id", + "volume_name", + ) + + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, volume_id=self.volume.id + ) + self.volume_sdk_client.find_backup.return_value = self.backup + self.volume_sdk_client.create_backup.return_value = self.backup + self.volume_sdk_client.restore_backup.return_value = { + 'id': self.backup['id'], + 'volume_id': self.volume['id'], + 'volume_name': self.volume['name'], + } + + self.data = ( + self.backup.id, + self.volume.id, + self.volume.name, + ) + + self.cmd = volume_backup.RestoreVolumeBackup(self.app, None) + + def test_backup_restore(self): + self.volume_sdk_client.find_volume.side_effect = ( + exceptions.CommandError() + ) + arglist = [self.backup.id] + verifylist = [ + ("backup", self.backup.id), + ("volume", None), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.restore_backup.assert_called_with( + self.backup.id, + volume_id=None, + name=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_backup_restore_with_volume(self): + self.volume_sdk_client.find_volume.side_effect = ( + exceptions.CommandError() + ) + arglist = [ + self.backup.id, + self.backup.volume_id, + ] + verifylist = [ + ("backup", self.backup.id), + ("volume", self.backup.volume_id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.restore_backup.assert_called_with( + self.backup.id, + volume_id=None, + name=self.backup.volume_id, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_backup_restore_with_volume_force(self): + arglist = [ + "--force", + self.backup.id, + self.volume.name, + ] + verifylist = [ + ("force", True), + ("backup", self.backup.id), + ("volume", self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.restore_backup.assert_called_with( + self.backup.id, + volume_id=self.volume.id, + name=None, + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_backup_restore_with_volume_existing(self): + arglist = [ + self.backup.id, + self.volume.name, + ] + verifylist = [ + ("backup", self.backup.id), + ("volume", self.volume.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + +class TestBackupSet(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, metadata={'wow': 'cool'} + ) + self.volume_sdk_client.find_backup.return_value = self.backup + + self.cmd = volume_backup.SetVolumeBackup(self.app, None) + + def test_backup_set_name(self): + self.set_volume_api_version('3.9') + + arglist = [ + '--name', + 'new_name', + self.backup.id, + ] + verifylist = [ + ('name', 'new_name'), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.update_backup.assert_called_once_with( + self.backup, name='new_name' + ) + + def test_backup_set_name_pre_v39(self): + self.set_volume_api_version('3.8') + + arglist = [ + '--name', + 'new_name', + self.backup.id, + ] + verifylist = [ + ('name', 'new_name'), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.9 or greater", str(exc)) + + def test_backup_set_description(self): + self.set_volume_api_version('3.9') + + arglist = [ + '--description', + 'new_description', + self.backup.id, + ] + verifylist = [ + ('name', None), + ('description', 'new_description'), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.update_backup.assert_called_once_with( + self.backup, description='new_description' + ) + + def test_backup_set_description_pre_v39(self): + self.set_volume_api_version('3.8') + + arglist = [ + '--description', + 'new_description', + self.backup.id, + ] + verifylist = [ + ('name', None), + ('description', 'new_description'), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.9 or greater", str(exc)) + + def test_backup_set_state(self): + arglist = ['--state', 'error', self.backup.id] + verifylist = [('state', 'error'), ('backup', self.backup.id)] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.reset_backup_status.assert_called_with( + self.backup, status='error' + ) + + def test_backup_set_state_failed(self): + self.volume_sdk_client.reset_backup_status.side_effect = ( + sdk_exceptions.NotFoundException('foo') + ) + + arglist = ['--state', 'error', self.backup.id] + verifylist = [('state', 'error'), ('backup', self.backup.id)] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertEqual('One or more of the set operations failed', str(exc)) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.reset_backup_status.assert_called_with( + self.backup, status='error' + ) + + def test_backup_set_no_property(self): + self.set_volume_api_version('3.43') + + arglist = [ + '--no-property', + self.backup.id, + ] + verifylist = [ + ('no_property', True), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.update_backup.assert_called_once_with( + self.backup, metadata={} + ) + + def test_backup_set_no_property_pre_v343(self): + self.set_volume_api_version('3.42') + + arglist = [ + '--no-property', + self.backup.id, + ] + verifylist = [ + ('no_property', True), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) + + def test_backup_set_property(self): + self.set_volume_api_version('3.43') + + arglist = [ + '--property', + 'foo=bar', + self.backup.id, + ] + verifylist = [ + ('properties', {'foo': 'bar'}), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.update_backup.assert_called_once_with( + self.backup, metadata={'wow': 'cool', 'foo': 'bar'} + ) + + def test_backup_set_property_pre_v343(self): + self.set_volume_api_version('3.42') + + arglist = [ + '--property', + 'foo=bar', + self.backup.id, + ] + verifylist = [ + ('properties', {'foo': 'bar'}), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) + + +class TestBackupUnset(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.backup = sdk_fakes.generate_fake_resource( + _backup.Backup, metadata={'foo': 'bar', 'wow': 'cool'} + ) + self.volume_sdk_client.find_backup.return_value = self.backup + self.volume_sdk_client.delete_backup_metadata.return_value = None + + self.cmd = volume_backup.UnsetVolumeBackup(self.app, None) + + def test_backup_unset_property(self): + self.set_volume_api_version('3.43') + + arglist = [ + '--property', + 'foo', + self.backup.id, + ] + verifylist = [ + ('properties', ['foo']), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + self.volume_sdk_client.delete_backup_metadata.assert_called_once_with( + self.backup, keys=['wow'] + ) + + def test_backup_unset_property_pre_v343(self): + self.set_volume_api_version('3.42') + + arglist = [ + '--property', + 'foo', + self.backup.id, + ] + verifylist = [ + ('properties', ['foo']), + ('backup', self.backup.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn("--os-volume-api-version 3.43 or greater", str(exc)) + + +class TestBackupShow(volume_fakes.TestVolume): + columns = ( + "availability_zone", + "container", + "created_at", + "data_timestamp", + "description", + "encryption_key_id", + "fail_reason", + "has_dependent_backups", + "id", + "is_incremental", + "metadata", + "name", + "object_count", + "project_id", + "size", + "snapshot_id", + "status", + "updated_at", + "user_id", + "volume_id", + ) + + def setUp(self): + super().setUp() + + self.backup = sdk_fakes.generate_fake_resource(_backup.Backup) + self.volume_sdk_client.find_backup.return_value = self.backup + + self.data = ( + self.backup.availability_zone, + self.backup.container, + self.backup.created_at, + self.backup.data_timestamp, + self.backup.description, + self.backup.encryption_key_id, + self.backup.fail_reason, + self.backup.has_dependent_backups, + self.backup.id, + self.backup.is_incremental, + self.backup.metadata, + self.backup.name, + self.backup.object_count, + self.backup.project_id, + self.backup.size, + self.backup.snapshot_id, + self.backup.status, + self.backup.updated_at, + self.backup.user_id, + self.backup.volume_id, + ) + + self.cmd = volume_backup.ShowVolumeBackup(self.app, None) + + def test_backup_show(self): + arglist = [self.backup.id] + verifylist = [("backup", self.backup.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.find_backup.assert_called_with( + self.backup.id, ignore_missing=False + ) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group.py b/openstackclient/tests/unit/volume/v3/test_volume_group.py index 78717de851..3b64ad95c5 100644 --- a/openstackclient/tests/unit/volume/v3/test_volume_group.py +++ b/openstackclient/tests/unit/volume/v3/test_volume_group.py @@ -12,7 +12,6 @@ from unittest import mock -from cinderclient import api_versions from osc_lib import exceptions from openstackclient.tests.unit import utils as tests_utils @@ -21,27 +20,23 @@ class TestVolumeGroup(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.volume_groups_mock = self.app.client_manager.volume.groups + self.volume_groups_mock = self.volume_client.groups self.volume_groups_mock.reset_mock() - self.volume_group_types_mock = \ - self.app.client_manager.volume.group_types + self.volume_group_types_mock = self.volume_client.group_types self.volume_group_types_mock.reset_mock() - self.volume_types_mock = self.app.client_manager.volume.volume_types + self.volume_types_mock = self.volume_client.volume_types self.volume_types_mock.reset_mock() - self.volume_group_snapshots_mock = \ - self.app.client_manager.volume.group_snapshots + self.volume_group_snapshots_mock = self.volume_client.group_snapshots self.volume_group_snapshots_mock.reset_mock() class TestVolumeGroupCreate(TestVolumeGroup): - fake_volume_type = volume_fakes.create_one_volume_type() fake_volume_group_type = volume_fakes.create_one_volume_group_type() fake_volume_group = volume_fakes.create_one_volume_group( @@ -50,8 +45,9 @@ class TestVolumeGroupCreate(TestVolumeGroup): 'volume_types': [fake_volume_type.id], }, ) - fake_volume_group_snapshot = \ + fake_volume_group_snapshot = ( volume_fakes.create_one_volume_group_snapshot() + ) columns = ( 'ID', @@ -84,24 +80,28 @@ def setUp(self): super().setUp() self.volume_types_mock.get.return_value = self.fake_volume_type - self.volume_group_types_mock.get.return_value = \ + self.volume_group_types_mock.get.return_value = ( self.fake_volume_group_type + ) self.volume_groups_mock.create.return_value = self.fake_volume_group self.volume_groups_mock.get.return_value = self.fake_volume_group - self.volume_groups_mock.create_from_src.return_value = \ + self.volume_groups_mock.create_from_src.return_value = ( self.fake_volume_group - self.volume_group_snapshots_mock.get.return_value = \ + ) + self.volume_group_snapshots_mock.get.return_value = ( self.fake_volume_group_snapshot + ) self.cmd = volume_group.CreateVolumeGroup(self.app, None) def test_volume_group_create(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ - '--volume-group-type', self.fake_volume_group_type.id, - '--volume-type', self.fake_volume_type.id, + '--volume-group-type', + self.fake_volume_group_type.id, + '--volume-type', + self.fake_volume_type.id, ] verifylist = [ ('volume_group_type', self.fake_volume_group_type.id), @@ -115,9 +115,11 @@ def test_volume_group_create(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_types_mock.get.assert_called_once_with( - self.fake_volume_group_type.id) + self.fake_volume_group_type.id + ) self.volume_types_mock.get.assert_called_once_with( - self.fake_volume_type.id) + self.fake_volume_type.id + ) self.volume_groups_mock.create.assert_called_once_with( self.fake_volume_group_type.id, self.fake_volume_type.id, @@ -129,8 +131,7 @@ def test_volume_group_create(self): self.assertCountEqual(self.data, data) def test_volume_group_create__legacy(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ self.fake_volume_group_type.id, @@ -149,9 +150,11 @@ def test_volume_group_create__legacy(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_types_mock.get.assert_called_once_with( - self.fake_volume_group_type.id) + self.fake_volume_group_type.id + ) self.volume_types_mock.get.assert_called_once_with( - self.fake_volume_type.id) + self.fake_volume_type.id + ) self.volume_groups_mock.create.assert_called_once_with( self.fake_volume_group_type.id, self.fake_volume_type.id, @@ -168,11 +171,11 @@ def test_volume_group_create__legacy(self): ) def test_volume_group_create_no_volume_type(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ - '--volume-group-type', self.fake_volume_group_type.id, + '--volume-group-type', + self.fake_volume_group_type.id, ] verifylist = [ ('volume_group_type', self.fake_volume_group_type.id), @@ -183,23 +186,26 @@ def test_volume_group_create_no_volume_type(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--volume-types is a required argument when creating ', - str(exc)) + '--volume-types is a required argument when creating ', str(exc) + ) def test_volume_group_create_with_options(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ - '--volume-group-type', self.fake_volume_group_type.id, - '--volume-type', self.fake_volume_type.id, - '--name', 'foo', - '--description', 'hello, world', - '--availability-zone', 'bar', + '--volume-group-type', + self.fake_volume_group_type.id, + '--volume-type', + self.fake_volume_type.id, + '--name', + 'foo', + '--description', + 'hello, world', + '--availability-zone', + 'bar', ] verifylist = [ ('volume_group_type', self.fake_volume_group_type.id), @@ -213,9 +219,11 @@ def test_volume_group_create_with_options(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_types_mock.get.assert_called_once_with( - self.fake_volume_group_type.id) + self.fake_volume_group_type.id + ) self.volume_types_mock.get.assert_called_once_with( - self.fake_volume_type.id) + self.fake_volume_type.id + ) self.volume_groups_mock.create.assert_called_once_with( self.fake_volume_group_type.id, self.fake_volume_type.id, @@ -227,12 +235,13 @@ def test_volume_group_create_with_options(self): self.assertCountEqual(self.data, data) def test_volume_group_create_pre_v313(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.12') + self.set_volume_api_version('3.12') arglist = [ - '--volume-group-type', self.fake_volume_group_type.id, - '--volume-type', self.fake_volume_type.id, + '--volume-group-type', + self.fake_volume_group_type.id, + '--volume-type', + self.fake_volume_type.id, ] verifylist = [ ('volume_group_type', self.fake_volume_group_type.id), @@ -244,19 +253,18 @@ def test_volume_group_create_pre_v313(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.13 or greater is required', - str(exc)) + '--os-volume-api-version 3.13 or greater is required', str(exc) + ) def test_volume_group_create_from_source_group(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ - '--source-group', self.fake_volume_group.id, + '--source-group', + self.fake_volume_group.id, ] verifylist = [ ('source_group', self.fake_volume_group.id), @@ -266,8 +274,11 @@ def test_volume_group_create_from_source_group(self): columns, data = self.cmd.take_action(parsed_args) self.volume_groups_mock.get.assert_has_calls( - [mock.call(self.fake_volume_group.id), - mock.call(self.fake_volume_group.id)]) + [ + mock.call(self.fake_volume_group.id), + mock.call(self.fake_volume_group.id), + ] + ) self.volume_groups_mock.create_from_src.assert_called_once_with( None, self.fake_volume_group.id, @@ -278,11 +289,11 @@ def test_volume_group_create_from_source_group(self): self.assertCountEqual(self.data, data) def test_volume_group_create_from_group_snapshot(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ - '--group-snapshot', self.fake_volume_group_snapshot.id, + '--group-snapshot', + self.fake_volume_group_snapshot.id, ] verifylist = [ ('group_snapshot', self.fake_volume_group_snapshot.id), @@ -292,9 +303,11 @@ def test_volume_group_create_from_group_snapshot(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_snapshots_mock.get.assert_called_once_with( - self.fake_volume_group_snapshot.id) + self.fake_volume_group_snapshot.id + ) self.volume_groups_mock.get.assert_called_once_with( - self.fake_volume_group.id) + self.fake_volume_group.id + ) self.volume_groups_mock.create_from_src.assert_called_once_with( self.fake_volume_group_snapshot.id, None, @@ -305,11 +318,11 @@ def test_volume_group_create_from_group_snapshot(self): self.assertCountEqual(self.data, data) def test_volume_group_create_from_src_pre_v314(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ - '--source-group', self.fake_volume_group.id, + '--source-group', + self.fake_volume_group.id, ] verifylist = [ ('source_group', self.fake_volume_group.id), @@ -317,38 +330,40 @@ def test_volume_group_create_from_src_pre_v314(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.14 or greater is required', - str(exc)) + '--os-volume-api-version 3.14 or greater is required', str(exc) + ) def test_volume_group_create_from_src_source_group_group_snapshot(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ - '--source-group', self.fake_volume_group.id, - '--group-snapshot', self.fake_volume_group_snapshot.id, + '--source-group', + self.fake_volume_group.id, + '--group-snapshot', + self.fake_volume_group_snapshot.id, ] verifylist = [ ('source_group', self.fake_volume_group.id), ('group_snapshot', self.fake_volume_group_snapshot.id), ] - exc = self.assertRaises(tests_utils.ParserException, - self.check_parser, - self.cmd, - arglist, - verifylist) + exc = self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) self.assertIn( '--group-snapshot: not allowed with argument --source-group', - str(exc)) + str(exc), + ) class TestVolumeGroupDelete(TestVolumeGroup): - fake_volume_group = volume_fakes.create_one_volume_group() def setUp(self): @@ -360,8 +375,7 @@ def setUp(self): self.cmd = volume_group.DeleteVolumeGroup(self.app, None) def test_volume_group_delete(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ self.fake_volume_group.id, @@ -376,13 +390,13 @@ def test_volume_group_delete(self): result = self.cmd.take_action(parsed_args) self.volume_groups_mock.delete.assert_called_once_with( - self.fake_volume_group.id, delete_volumes=True, + self.fake_volume_group.id, + delete_volumes=True, ) self.assertIsNone(result) def test_volume_group_delete_pre_v313(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.12') + self.set_volume_api_version('3.12') arglist = [ self.fake_volume_group.id, @@ -394,16 +408,14 @@ def test_volume_group_delete_pre_v313(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.13 or greater is required', - str(exc)) + '--os-volume-api-version 3.13 or greater is required', str(exc) + ) class TestVolumeGroupSet(TestVolumeGroup): - fake_volume_group = volume_fakes.create_one_volume_group() columns = ( @@ -442,13 +454,14 @@ def setUp(self): self.cmd = volume_group.SetVolumeGroup(self.app, None) def test_volume_group_set(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ self.fake_volume_group.id, - '--name', 'foo', - '--description', 'hello, world', + '--name', + 'foo', + '--description', + 'hello, world', ] verifylist = [ ('group', self.fake_volume_group.id), @@ -460,14 +473,15 @@ def test_volume_group_set(self): columns, data = self.cmd.take_action(parsed_args) self.volume_groups_mock.update.assert_called_once_with( - self.fake_volume_group.id, name='foo', description='hello, world', + self.fake_volume_group.id, + name='foo', + description='hello, world', ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_with_enable_replication_option(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.38') + self.set_volume_api_version('3.38') arglist = [ self.fake_volume_group.id, @@ -482,18 +496,20 @@ def test_volume_group_with_enable_replication_option(self): columns, data = self.cmd.take_action(parsed_args) self.volume_groups_mock.enable_replication.assert_called_once_with( - self.fake_volume_group.id) + self.fake_volume_group.id + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_set_pre_v313(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.12') + self.set_volume_api_version('3.12') arglist = [ self.fake_volume_group.id, - '--name', 'foo', - '--description', 'hello, world', + '--name', + 'foo', + '--description', + 'hello, world', ] verifylist = [ ('group', self.fake_volume_group.id), @@ -503,16 +519,14 @@ def test_volume_group_set_pre_v313(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.13 or greater is required', - str(exc)) + '--os-volume-api-version 3.13 or greater is required', str(exc) + ) def test_volume_group_with_enable_replication_option_pre_v338(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.37') + self.set_volume_api_version('3.37') arglist = [ self.fake_volume_group.id, @@ -525,16 +539,14 @@ def test_volume_group_with_enable_replication_option_pre_v338(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.38 or greater is required', - str(exc)) + '--os-volume-api-version 3.38 or greater is required', str(exc) + ) class TestVolumeGroupList(TestVolumeGroup): - fake_volume_groups = volume_fakes.create_volume_groups() columns = ( @@ -547,7 +559,8 @@ class TestVolumeGroupList(TestVolumeGroup): fake_volume_group.id, fake_volume_group.status, fake_volume_group.name, - ) for fake_volume_group in fake_volume_groups + ) + for fake_volume_group in fake_volume_groups ] def setUp(self): @@ -558,8 +571,7 @@ def setUp(self): self.cmd = volume_group.ListVolumeGroup(self.app, None) def test_volume_group_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ '--all-projects', @@ -580,8 +592,7 @@ def test_volume_group_list(self): self.assertCountEqual(tuple(self.data), data) def test_volume_group_list_pre_v313(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.12') + self.set_volume_api_version('3.12') arglist = [ '--all-projects', @@ -592,16 +603,14 @@ def test_volume_group_list_pre_v313(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.13 or greater is required', - str(exc)) + '--os-volume-api-version 3.13 or greater is required', str(exc) + ) class TestVolumeGroupFailover(TestVolumeGroup): - fake_volume_group = volume_fakes.create_one_volume_group() def setUp(self): @@ -613,13 +622,13 @@ def setUp(self): self.cmd = volume_group.FailoverVolumeGroup(self.app, None) def test_volume_group_failover(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.38') + self.set_volume_api_version('3.38') arglist = [ self.fake_volume_group.id, '--allow-attached-volume', - '--secondary-backend-id', 'foo', + '--secondary-backend-id', + 'foo', ] verifylist = [ ('group', self.fake_volume_group.id), @@ -638,13 +647,13 @@ def test_volume_group_failover(self): self.assertIsNone(result) def test_volume_group_failover_pre_v338(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.37') + self.set_volume_api_version('3.37') arglist = [ self.fake_volume_group.id, '--allow-attached-volume', - '--secondary-backend-id', 'foo', + '--secondary-backend-id', + 'foo', ] verifylist = [ ('group', self.fake_volume_group.id), @@ -654,9 +663,8 @@ def test_volume_group_failover_pre_v338(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.38 or greater is required', - str(exc)) + '--os-volume-api-version 3.38 or greater is required', str(exc) + ) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py b/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py index 2a5a30f07e..23ba4ec97b 100644 --- a/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py +++ b/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py @@ -10,31 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from cinderclient import api_versions +from openstack.block_storage.v3 import group as _group +from openstack.block_storage.v3 import group_snapshot as _group_snapshot +from openstack.test import fakes as sdk_fakes from osc_lib import exceptions from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes from openstackclient.volume.v3 import volume_group_snapshot -class TestVolumeGroupSnapshot(volume_fakes.TestVolume): - - def setUp(self): - super().setUp() - - self.volume_groups_mock = self.app.client_manager.volume.groups - self.volume_groups_mock.reset_mock() - - self.volume_group_snapshots_mock = \ - self.app.client_manager.volume.group_snapshots - self.volume_group_snapshots_mock.reset_mock() - - -class TestVolumeGroupSnapshotCreate(TestVolumeGroupSnapshot): - - fake_volume_group = volume_fakes.create_one_volume_group() - fake_volume_group_snapshot = \ - volume_fakes.create_one_volume_group_snapshot() +class TestVolumeGroupSnapshotCreate(volume_fakes.TestVolume): + fake_volume_group = sdk_fakes.generate_fake_resource(_group.Group) + fake_volume_group_snapshot = sdk_fakes.generate_fake_resource( + _group_snapshot.GroupSnapshot, + ) columns = ( 'ID', @@ -56,18 +45,20 @@ class TestVolumeGroupSnapshotCreate(TestVolumeGroupSnapshot): def setUp(self): super().setUp() - self.volume_groups_mock.get.return_value = self.fake_volume_group - self.volume_group_snapshots_mock.create.return_value = \ + self.volume_sdk_client.find_group.return_value = self.fake_volume_group + self.volume_sdk_client.create_group_snapshot.return_value = ( self.fake_volume_group_snapshot - self.volume_group_snapshots_mock.get.return_value = \ + ) + self.volume_sdk_client.find_group_snapshot.return_value = ( self.fake_volume_group_snapshot + ) self.cmd = volume_group_snapshot.CreateVolumeGroupSnapshot( - self.app, None) + self.app, None + ) def test_volume_group_snapshot_create(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ self.fake_volume_group.id, @@ -81,22 +72,28 @@ def test_volume_group_snapshot_create(self): columns, data = self.cmd.take_action(parsed_args) - self.volume_groups_mock.get.assert_called_once_with( - self.fake_volume_group.id) - self.volume_group_snapshots_mock.create.assert_called_once_with( - self.fake_volume_group.id, None, None, + self.volume_sdk_client.find_group.assert_called_once_with( + self.fake_volume_group.id, + ignore_missing=False, + details=False, + ) + self.volume_sdk_client.create_group_snapshot.assert_called_once_with( + group_id=self.fake_volume_group.id, + name=None, + description=None, ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_snapshot_create_with_options(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ self.fake_volume_group.id, - '--name', 'foo', - '--description', 'hello, world', + '--name', + 'foo', + '--description', + 'hello, world', ] verifylist = [ ('volume_group', self.fake_volume_group.id), @@ -107,17 +104,21 @@ def test_volume_group_snapshot_create_with_options(self): columns, data = self.cmd.take_action(parsed_args) - self.volume_groups_mock.get.assert_called_once_with( - self.fake_volume_group.id) - self.volume_group_snapshots_mock.create.assert_called_once_with( - self.fake_volume_group.id, 'foo', 'hello, world', + self.volume_sdk_client.find_group.assert_called_once_with( + self.fake_volume_group.id, + ignore_missing=False, + details=False, + ) + self.volume_sdk_client.create_group_snapshot.assert_called_once_with( + group_id=self.fake_volume_group.id, + name='foo', + description='hello, world', ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_snapshot_create_pre_v314(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ self.fake_volume_group.id, @@ -132,30 +133,33 @@ def test_volume_group_snapshot_create_pre_v314(self): exc = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args, + ) self.assertIn( '--os-volume-api-version 3.14 or greater is required', - str(exc)) - + str(exc), + ) -class TestVolumeGroupSnapshotDelete(TestVolumeGroupSnapshot): - fake_volume_group_snapshot = \ - volume_fakes.create_one_volume_group_snapshot() +class TestVolumeGroupSnapshotDelete(volume_fakes.TestVolume): + fake_volume_group_snapshot = sdk_fakes.generate_fake_resource( + _group_snapshot.GroupSnapshot, + ) def setUp(self): super().setUp() - self.volume_group_snapshots_mock.get.return_value = \ + self.volume_sdk_client.find_group_snapshot.return_value = ( self.fake_volume_group_snapshot - self.volume_group_snapshots_mock.delete.return_value = None + ) + self.volume_sdk_client.delete_group_snapshot.return_value = None self.cmd = volume_group_snapshot.DeleteVolumeGroupSnapshot( - self.app, None) + self.app, None + ) def test_volume_group_snapshot_delete(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ self.fake_volume_group_snapshot.id, @@ -167,14 +171,13 @@ def test_volume_group_snapshot_delete(self): result = self.cmd.take_action(parsed_args) - self.volume_group_snapshots_mock.delete.assert_called_once_with( + self.volume_sdk_client.delete_group_snapshot.assert_called_once_with( self.fake_volume_group_snapshot.id, ) self.assertIsNone(result) def test_volume_group_snapshot_delete_pre_v314(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') arglist = [ self.fake_volume_group_snapshot.id, @@ -187,15 +190,21 @@ def test_volume_group_snapshot_delete_pre_v314(self): exc = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args, + ) self.assertIn( '--os-volume-api-version 3.14 or greater is required', - str(exc)) - + str(exc), + ) -class TestVolumeGroupSnapshotList(TestVolumeGroupSnapshot): - fake_volume_group_snapshots = volume_fakes.create_volume_group_snapshots() +class TestVolumeGroupSnapshotList(volume_fakes.TestVolume): + fake_volume_group_snapshots = list( + sdk_fakes.generate_fake_resources( + _group_snapshot.GroupSnapshot, + count=3, + ) + ) columns = ( 'ID', @@ -207,21 +216,23 @@ class TestVolumeGroupSnapshotList(TestVolumeGroupSnapshot): fake_volume_group_snapshot.id, fake_volume_group_snapshot.status, fake_volume_group_snapshot.name, - ) for fake_volume_group_snapshot in fake_volume_group_snapshots + ) + for fake_volume_group_snapshot in fake_volume_group_snapshots ] def setUp(self): super().setUp() - self.volume_group_snapshots_mock.list.return_value = \ + self.volume_sdk_client.group_snapshots.return_value = ( self.fake_volume_group_snapshots + ) self.cmd = volume_group_snapshot.ListVolumeGroupSnapshot( - self.app, None) + self.app, None + ) def test_volume_group_snapshot_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.14') + self.set_volume_api_version('3.14') arglist = [ '--all-projects', @@ -233,20 +244,16 @@ def test_volume_group_snapshot_list(self): columns, data = self.cmd.take_action(parsed_args) - self.volume_group_snapshots_mock.list.assert_called_once_with( - search_opts={ - 'all_tenants': True, - }, + self.volume_sdk_client.group_snapshots.assert_called_once_with( + all_projects=True, ) self.assertEqual(self.columns, columns) self.assertCountEqual(tuple(self.data), data) def test_volume_group_snapshot_list_pre_v314(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.13') + self.set_volume_api_version('3.13') - arglist = [ - ] + arglist = [] verifylist = [ ('all_projects', False), ] @@ -255,7 +262,9 @@ def test_volume_group_snapshot_list_pre_v314(self): exc = self.assertRaises( exceptions.CommandError, self.cmd.take_action, - parsed_args) + parsed_args, + ) self.assertIn( '--os-volume-api-version 3.14 or greater is required', - str(exc)) + str(exc), + ) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group_type.py b/openstackclient/tests/unit/volume/v3/test_volume_group_type.py index 34b4e501fb..21db03d77c 100644 --- a/openstackclient/tests/unit/volume/v3/test_volume_group_type.py +++ b/openstackclient/tests/unit/volume/v3/test_volume_group_type.py @@ -12,7 +12,6 @@ from unittest import mock -from cinderclient import api_versions from osc_lib.cli import format_columns from osc_lib import exceptions @@ -21,19 +20,14 @@ class TestVolumeGroupType(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.volume_group_types_mock = \ - self.app.client_manager.volume.group_types + self.volume_group_types_mock = self.volume_client.group_types self.volume_group_types_mock.reset_mock() class TestVolumeGroupTypeCreate(TestVolumeGroupType): - - maxDiff = 2000 - fake_volume_group_type = volume_fakes.create_one_volume_group_type() columns = ( @@ -54,14 +48,14 @@ class TestVolumeGroupTypeCreate(TestVolumeGroupType): def setUp(self): super().setUp() - self.volume_group_types_mock.create.return_value = \ + self.volume_group_types_mock.create.return_value = ( self.fake_volume_group_type + ) self.cmd = volume_group_type.CreateVolumeGroupType(self.app, None) def test_volume_group_type_create(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') arglist = [ self.fake_volume_group_type.name, @@ -76,19 +70,18 @@ def test_volume_group_type_create(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_types_mock.create.assert_called_once_with( - self.fake_volume_group_type.name, - None, - True) + self.fake_volume_group_type.name, None, True + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_type_create_with_options(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') arglist = [ self.fake_volume_group_type.name, - '--description', 'foo', + '--description', + 'foo', '--private', ] verifylist = [ @@ -101,15 +94,13 @@ def test_volume_group_type_create_with_options(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_types_mock.create.assert_called_once_with( - self.fake_volume_group_type.name, - 'foo', - False) + self.fake_volume_group_type.name, 'foo', False + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_type_create_pre_v311(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.10') + self.set_volume_api_version('3.10') arglist = [ self.fake_volume_group_type.name, @@ -122,30 +113,28 @@ def test_volume_group_type_create_pre_v311(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.11 or greater is required', - str(exc)) + '--os-volume-api-version 3.11 or greater is required', str(exc) + ) class TestVolumeGroupTypeDelete(TestVolumeGroupType): - fake_volume_group_type = volume_fakes.create_one_volume_group_type() def setUp(self): super().setUp() - self.volume_group_types_mock.get.return_value = \ + self.volume_group_types_mock.get.return_value = ( self.fake_volume_group_type + ) self.volume_group_types_mock.delete.return_value = None self.cmd = volume_group_type.DeleteVolumeGroupType(self.app, None) def test_volume_group_type_delete(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') arglist = [ self.fake_volume_group_type.id, @@ -163,8 +152,7 @@ def test_volume_group_type_delete(self): self.assertIsNone(result) def test_volume_group_type_delete_pre_v311(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.10') + self.set_volume_api_version('3.10') arglist = [ self.fake_volume_group_type.id, @@ -175,16 +163,14 @@ def test_volume_group_type_delete_pre_v311(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.11 or greater is required', - str(exc)) + '--os-volume-api-version 3.11 or greater is required', str(exc) + ) class TestVolumeGroupTypeSet(TestVolumeGroupType): - fake_volume_group_type = volume_fakes.create_one_volume_group_type( methods={ 'get_keys': {'foo': 'bar'}, @@ -211,25 +197,29 @@ class TestVolumeGroupTypeSet(TestVolumeGroupType): def setUp(self): super().setUp() - self.volume_group_types_mock.get.return_value = \ + self.volume_group_types_mock.get.return_value = ( self.fake_volume_group_type - self.volume_group_types_mock.update.return_value = \ + ) + self.volume_group_types_mock.update.return_value = ( self.fake_volume_group_type + ) self.cmd = volume_group_type.SetVolumeGroupType(self.app, None) def test_volume_group_type_set(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') self.fake_volume_group_type.set_keys.return_value = None arglist = [ self.fake_volume_group_type.id, - '--name', 'foo', - '--description', 'hello, world', + '--name', + 'foo', + '--description', + 'hello, world', '--public', - '--property', 'fizz=buzz', + '--property', + 'fizz=buzz', ] verifylist = [ ('group_type', self.fake_volume_group_type.id), @@ -256,13 +246,13 @@ def test_volume_group_type_set(self): self.assertCountEqual(self.data, data) def test_volume_group_type_with_no_property_option(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') arglist = [ self.fake_volume_group_type.id, '--no-property', - '--property', 'fizz=buzz', + '--property', + 'fizz=buzz', ] verifylist = [ ('group_type', self.fake_volume_group_type.id), @@ -277,21 +267,24 @@ def test_volume_group_type_with_no_property_option(self): columns, data = self.cmd.take_action(parsed_args) self.volume_group_types_mock.get.assert_called_once_with( - self.fake_volume_group_type.id) + self.fake_volume_group_type.id + ) self.fake_volume_group_type.get_keys.assert_called_once_with() self.fake_volume_group_type.unset_keys.assert_called_once_with( - {'foo': 'bar'}.keys()) + {'foo': 'bar'}.keys() + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_type_set_pre_v311(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.10') + self.set_volume_api_version('3.10') arglist = [ self.fake_volume_group_type.id, - '--name', 'foo', - '--description', 'hello, world', + '--name', + 'foo', + '--description', + 'hello, world', ] verifylist = [ ('group_type', self.fake_volume_group_type.id), @@ -304,16 +297,14 @@ def test_volume_group_type_set_pre_v311(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.11 or greater is required', - str(exc)) + '--os-volume-api-version 3.11 or greater is required', str(exc) + ) class TestVolumeGroupTypeUnset(TestVolumeGroupType): - fake_volume_group_type = volume_fakes.create_one_volume_group_type( methods={'unset_keys': None}, ) @@ -336,18 +327,19 @@ class TestVolumeGroupTypeUnset(TestVolumeGroupType): def setUp(self): super().setUp() - self.volume_group_types_mock.get.return_value = \ + self.volume_group_types_mock.get.return_value = ( self.fake_volume_group_type + ) self.cmd = volume_group_type.UnsetVolumeGroupType(self.app, None) def test_volume_group_type_unset(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') arglist = [ self.fake_volume_group_type.id, - '--property', 'fizz', + '--property', + 'fizz', ] verifylist = [ ('group_type', self.fake_volume_group_type.id), @@ -357,22 +349,25 @@ def test_volume_group_type_unset(self): columns, data = self.cmd.take_action(parsed_args) - self.volume_group_types_mock.get.assert_has_calls([ - mock.call(self.fake_volume_group_type.id), - mock.call(self.fake_volume_group_type.id), - ]) + self.volume_group_types_mock.get.assert_has_calls( + [ + mock.call(self.fake_volume_group_type.id), + mock.call(self.fake_volume_group_type.id), + ] + ) self.fake_volume_group_type.unset_keys.assert_called_once_with( - ['fizz']) + ['fizz'] + ) self.assertEqual(self.columns, columns) self.assertCountEqual(self.data, data) def test_volume_group_type_unset_pre_v311(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.10') + self.set_volume_api_version('3.10') arglist = [ self.fake_volume_group_type.id, - '--property', 'fizz', + '--property', + 'fizz', ] verifylist = [ ('group_type', self.fake_volume_group_type.id), @@ -381,16 +376,14 @@ def test_volume_group_type_unset_pre_v311(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.11 or greater is required', - str(exc)) + '--os-volume-api-version 3.11 or greater is required', str(exc) + ) class TestVolumeGroupTypeList(TestVolumeGroupType): - fake_volume_group_types = volume_fakes.create_volume_group_types() columns = ( @@ -405,25 +398,26 @@ class TestVolumeGroupTypeList(TestVolumeGroupType): fake_volume_group_type.name, fake_volume_group_type.is_public, fake_volume_group_type.group_specs, - ) for fake_volume_group_type in fake_volume_group_types + ) + for fake_volume_group_type in fake_volume_group_types ] def setUp(self): super().setUp() - self.volume_group_types_mock.list.return_value = \ + self.volume_group_types_mock.list.return_value = ( self.fake_volume_group_types - self.volume_group_types_mock.default.return_value = \ + ) + self.volume_group_types_mock.default.return_value = ( self.fake_volume_group_types[0] + ) self.cmd = volume_group_type.ListVolumeGroupType(self.app, None) def test_volume_group_type_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') - arglist = [ - ] + arglist = [] verifylist = [ ('show_default', False), ] @@ -436,8 +430,7 @@ def test_volume_group_type_list(self): self.assertCountEqual(tuple(self.data), data) def test_volume_group_type_list_with_default_option(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.11') + self.set_volume_api_version('3.11') arglist = [ '--default', @@ -454,19 +447,15 @@ def test_volume_group_type_list_with_default_option(self): self.assertCountEqual(tuple([self.data[0]]), data) def test_volume_group_type_list_pre_v311(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.10') + self.set_volume_api_version('3.10') - arglist = [ - ] - verifylist = [ - ] + arglist = [] + verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.11 or greater is required', - str(exc)) + '--os-volume-api-version 3.11 or greater is required', str(exc) + ) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_message.py b/openstackclient/tests/unit/volume/v3/test_volume_message.py index 45b0747e1f..4b4f6e41df 100644 --- a/openstackclient/tests/unit/volume/v3/test_volume_message.py +++ b/openstackclient/tests/unit/volume/v3/test_volume_message.py @@ -12,7 +12,6 @@ from unittest.mock import call -from cinderclient import api_versions from osc_lib import exceptions from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes @@ -21,19 +20,17 @@ class TestVolumeMessage(volume_fakes.TestVolume): - def setUp(self): super().setUp() - self.projects_mock = self.app.client_manager.identity.projects + self.projects_mock = self.identity_client.projects self.projects_mock.reset_mock() - self.volume_messages_mock = self.app.client_manager.volume.messages + self.volume_messages_mock = self.volume_client.messages self.volume_messages_mock.reset_mock() class TestVolumeMessageDelete(TestVolumeMessage): - fake_messages = volume_fakes.create_volume_messages(count=2) def setUp(self): @@ -48,8 +45,7 @@ def setUp(self): self.cmd = volume_message.DeleteMessage(self.app, None) def test_message_delete(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.3') + self.set_volume_api_version('3.3') arglist = [ self.fake_messages[0].id, @@ -62,12 +58,12 @@ def test_message_delete(self): result = self.cmd.take_action(parsed_args) self.volume_messages_mock.delete.assert_called_with( - self.fake_messages[0].id) + self.fake_messages[0].id + ) self.assertIsNone(result) def test_message_delete_multiple_messages(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.3') + self.set_volume_api_version('3.3') arglist = [ self.fake_messages[0].id, @@ -87,8 +83,7 @@ def test_message_delete_multiple_messages(self): self.assertIsNone(result) def test_message_delete_multiple_messages_with_exception(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.3') + self.set_volume_api_version('3.3') arglist = [ self.fake_messages[0].id, @@ -101,22 +96,24 @@ def test_message_delete_multiple_messages_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.volume_messages_mock.delete.side_effect = [ - self.fake_messages[0], exceptions.CommandError] + self.fake_messages[0], + exceptions.CommandError, + ] exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertEqual('Failed to delete 1 of 2 messages.', str(exc)) self.volume_messages_mock.delete.assert_any_call( - self.fake_messages[0].id) + self.fake_messages[0].id + ) self.volume_messages_mock.delete.assert_any_call('invalid_message') self.assertEqual(2, self.volume_messages_mock.delete.call_count) def test_message_delete_pre_v33(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.2') + self.set_volume_api_version('3.2') arglist = [ self.fake_messages[0].id, @@ -127,16 +124,14 @@ def test_message_delete_pre_v33(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.3 or greater is required', - str(exc)) + '--os-volume-api-version 3.3 or greater is required', str(exc) + ) class TestVolumeMessageList(TestVolumeMessage): - fake_project = identity_fakes.FakeProject.create_one_project() fake_messages = volume_fakes.create_volume_messages(count=3) @@ -153,17 +148,19 @@ class TestVolumeMessageList(TestVolumeMessage): ) data = [] for fake_message in fake_messages: - data.append(( - fake_message.id, - fake_message.event_id, - fake_message.resource_type, - fake_message.resource_uuid, - fake_message.message_level, - fake_message.user_message, - fake_message.request_id, - fake_message.created_at, - fake_message.guaranteed_until, - )) + data.append( + ( + fake_message.id, + fake_message.event_id, + fake_message.resource_type, + fake_message.resource_uuid, + fake_message.message_level, + fake_message.user_message, + fake_message.request_id, + fake_message.created_at, + fake_message.guaranteed_until, + ) + ) def setUp(self): super().setUp() @@ -174,8 +171,7 @@ def setUp(self): self.cmd = volume_message.ListMessages(self.app, None) def test_message_list(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.3') + self.set_volume_api_version('3.3') arglist = [] verifylist = [ @@ -199,13 +195,15 @@ def test_message_list(self): self.assertCountEqual(self.data, list(data)) def test_message_list_with_options(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.3') + self.set_volume_api_version('3.3') arglist = [ - '--project', self.fake_project.name, - '--marker', self.fake_messages[0].id, - '--limit', '3', + '--project', + self.fake_project.name, + '--marker', + self.fake_messages[0].id, + '--limit', + '3', ] verifylist = [ ('project', self.fake_project.name), @@ -228,8 +226,7 @@ def test_message_list_with_options(self): self.assertCountEqual(self.data, list(data)) def test_message_list_pre_v33(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.2') + self.set_volume_api_version('3.2') arglist = [] verifylist = [ @@ -241,16 +238,14 @@ def test_message_list_pre_v33(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.3 or greater is required', - str(exc)) + '--os-volume-api-version 3.3 or greater is required', str(exc) + ) class TestVolumeMessageShow(TestVolumeMessage): - fake_message = volume_fakes.create_one_volume_message() columns = ( @@ -284,15 +279,10 @@ def setUp(self): self.cmd = volume_message.ShowMessage(self.app, None) def test_message_show(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.3') + self.set_volume_api_version('3.3') - arglist = [ - self.fake_message.id - ] - verifylist = [ - ('message_id', self.fake_message.id) - ] + arglist = [self.fake_message.id] + verifylist = [('message_id', self.fake_message.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) @@ -302,21 +292,15 @@ def test_message_show(self): self.assertEqual(self.data, data) def test_message_show_pre_v33(self): - self.app.client_manager.volume.api_version = \ - api_versions.APIVersion('3.2') + self.set_volume_api_version('3.2') - arglist = [ - self.fake_message.id - ] - verifylist = [ - ('message_id', self.fake_message.id) - ] + arglist = [self.fake_message.id] + verifylist = [('message_id', self.fake_message.id)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) exc = self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args) + exceptions.CommandError, self.cmd.take_action, parsed_args + ) self.assertIn( - '--os-volume-api-version 3.3 or greater is required', - str(exc)) + '--os-volume-api-version 3.3 or greater is required', str(exc) + ) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_snapshot.py b/openstackclient/tests/unit/volume/v3/test_volume_snapshot.py new file mode 100644 index 0000000000..85613603de --- /dev/null +++ b/openstackclient/tests/unit/volume/v3/test_volume_snapshot.py @@ -0,0 +1,795 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.block_storage.v3 import snapshot as _snapshot +from openstack.block_storage.v3 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack.test import fakes as sdk_fakes +from osc_lib.cli import format_columns +from osc_lib import exceptions + +from openstackclient.tests.unit.identity.v3 import fakes as project_fakes +from openstackclient.tests.unit import utils as test_utils +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes +from openstackclient.volume.v3 import volume_snapshot + + +class TestVolumeSnapshotCreate(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.volume_sdk_client.find_volume.return_value = self.volume + self.snapshot = sdk_fakes.generate_fake_resource( + _snapshot.Snapshot, volume_id=self.volume.id + ) + self.volume_sdk_client.create_snapshot.return_value = self.snapshot + self.volume_sdk_client.manage_snapshot.return_value = self.snapshot + + self.columns = ( + 'created_at', + 'description', + 'id', + 'name', + 'properties', + 'size', + 'status', + 'volume_id', + ) + self.data = ( + self.snapshot.created_at, + self.snapshot.description, + self.snapshot.id, + self.snapshot.name, + format_columns.DictColumn(self.snapshot.metadata), + self.snapshot.size, + self.snapshot.status, + self.snapshot.volume_id, + ) + + self.cmd = volume_snapshot.CreateVolumeSnapshot(self.app, None) + + def test_snapshot_create(self): + arglist = [ + "--volume", + self.snapshot.volume_id, + "--description", + self.snapshot.description, + "--force", + '--property', + 'Alpha=a', + '--property', + 'Beta=b', + self.snapshot.name, + ] + verifylist = [ + ("volume", self.snapshot.volume_id), + ("description", self.snapshot.description), + ("force", True), + ('properties', {'Alpha': 'a', 'Beta': 'b'}), + ("snapshot_name", self.snapshot.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.snapshot.volume_id, ignore_missing=False + ) + self.volume_sdk_client.create_snapshot.assert_called_with( + volume_id=self.snapshot.volume_id, + force=True, + name=self.snapshot.name, + description=self.snapshot.description, + metadata={'Alpha': 'a', 'Beta': 'b'}, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_snapshot_create_without_name(self): + arglist = [ + "--volume", + self.snapshot.volume_id, + ] + verifylist = [ + ("volume", self.snapshot.volume_id), + ] + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + def test_snapshot_create_without_volume(self): + arglist = [ + "--description", + self.snapshot.description, + "--force", + self.snapshot.name, + ] + verifylist = [ + ("description", self.snapshot.description), + ("force", True), + ("snapshot_name", self.snapshot.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.find_volume.assert_called_once_with( + self.snapshot.name, ignore_missing=False + ) + self.volume_sdk_client.create_snapshot.assert_called_with( + volume_id=self.snapshot.volume_id, + force=True, + name=self.snapshot.name, + description=self.snapshot.description, + metadata=None, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_snapshot_create_with_remote_source(self): + arglist = [ + '--remote-source', + 'source-name=test_source_name', + '--remote-source', + 'source-id=test_source_id', + '--volume', + self.snapshot.volume_id, + self.snapshot.name, + ] + ref_dict = { + 'source-name': 'test_source_name', + 'source-id': 'test_source_id', + } + verifylist = [ + ('remote_source', ref_dict), + ('volume', self.snapshot.volume_id), + ("snapshot_name", self.snapshot.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + self.volume_sdk_client.find_volume.assert_called_once_with( + self.snapshot.volume_id, ignore_missing=False + ) + self.volume_sdk_client.manage_snapshot.assert_called_with( + volume_id=self.snapshot.volume_id, + ref=ref_dict, + name=self.snapshot.name, + description=None, + metadata=None, + ) + self.volume_sdk_client.create_snapshot.assert_not_called() + + +class TestVolumeSnapshotDelete(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.snapshots = list( + sdk_fakes.generate_fake_resources(_snapshot.Snapshot) + ) + self.volume_sdk_client.find_snapshot.side_effect = self.snapshots + self.volume_sdk_client.delete_snapshot.return_value = None + self.volume_sdk_client.unmanage_snapshot.return_value = None + + self.cmd = volume_snapshot.DeleteVolumeSnapshot(self.app, None) + + def test_snapshot_delete(self): + arglist = [self.snapshots[0].id] + verifylist = [("snapshots", [self.snapshots[0].id])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_snapshot.assert_called_once_with( + self.snapshots[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_snapshot.assert_called_once_with( + self.snapshots[0].id, force=False + ) + + def test_snapshot_delete_with_force(self): + arglist = ['--force', self.snapshots[0].id] + verifylist = [('force', True), ("snapshots", [self.snapshots[0].id])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_snapshot.assert_called_once_with( + self.snapshots[0].id, ignore_missing=False + ) + self.volume_sdk_client.delete_snapshot.assert_called_once_with( + self.snapshots[0].id, force=True + ) + + def test_delete_multiple_snapshots(self): + arglist = [] + for s in self.snapshots: + arglist.append(s.id) + verifylist = [ + ('snapshots', arglist), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.find_snapshot.assert_has_calls( + [mock.call(x.id, ignore_missing=False) for x in self.snapshots] + ) + self.volume_sdk_client.delete_snapshot.assert_has_calls( + [mock.call(x.id, force=False) for x in self.snapshots] + ) + + def test_delete_multiple_snapshots_with_exception(self): + self.volume_sdk_client.find_snapshot.side_effect = [ + self.snapshots[0], + sdk_exceptions.NotFoundException(), + ] + + arglist = [ + self.snapshots[0].id, + 'unexist_snapshot', + ] + verifylist = [ + ('snapshots', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual('1 of 2 snapshots failed to delete.', str(exc)) + + self.volume_sdk_client.find_snapshot.assert_has_calls( + [ + mock.call(self.snapshots[0].id, ignore_missing=False), + mock.call('unexist_snapshot', ignore_missing=False), + ] + ) + self.volume_sdk_client.delete_snapshot.assert_has_calls( + [ + mock.call(self.snapshots[0].id, force=False), + ] + ) + + def test_snapshot_delete_remote(self): + arglist = ['--remote', self.snapshots[0].id] + verifylist = [('remote', True), ("snapshots", [self.snapshots[0].id])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.unmanage_snapshot.assert_called_with( + self.snapshots[0].id + ) + + def test_snapshot_delete_with_remote_force(self): + arglist = ['--remote', '--force', self.snapshots[0].id] + verifylist = [ + ('remote', True), + ('force', True), + ("snapshots", [self.snapshots[0].id]), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + "The --force option is not supported with the --remote parameter.", + str(exc), + ) + + def test_delete_multiple_snapshots_remote(self): + arglist = ['--remote'] + for s in self.snapshots: + arglist.append(s.id) + verifylist = [('remote', True), ('snapshots', arglist[1:])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_sdk_client.unmanage_snapshot.assert_has_calls( + [mock.call(s.id) for s in self.snapshots] + ) + + +class TestVolumeSnapshotList(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volume = sdk_fakes.generate_fake_resource(_volume.Volume) + self.snapshots = list( + sdk_fakes.generate_fake_resources( + _snapshot.Snapshot, attrs={'volume_id': self.volume.name} + ) + ) + self.project = project_fakes.FakeProject.create_one_project() + self.volume_sdk_client.volumes.return_value = [self.volume] + self.volume_sdk_client.find_volume.return_value = self.volume + self.volume_sdk_client.snapshots.return_value = self.snapshots + self.project_mock = self.identity_client.projects + self.project_mock.get.return_value = self.project + + self.columns = ("ID", "Name", "Description", "Status", "Size") + self.columns_long = self.columns + ( + "Created At", + "Volume", + "Properties", + ) + + self.data = [] + self.data_long = [] + for s in self.snapshots: + self.data.append( + ( + s.id, + s.name, + s.description, + s.status, + s.size, + ) + ) + self.data_long.append( + ( + s.id, + s.name, + s.description, + s.status, + s.size, + s.created_at, + volume_snapshot.VolumeIdColumn( + s.volume_id, volume_cache={self.volume.id: self.volume} + ), + format_columns.DictColumn(s.metadata), + ) + ) + + self.cmd = volume_snapshot.ListVolumeSnapshot(self.app, None) + + def test_snapshot_list_without_options(self): + arglist = [] + verifylist = [('all_projects', False), ('long', False)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=None, + status=None, + project_id=None, + volume_id=None, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + + def test_snapshot_list_with_options(self): + arglist = [ + "--long", + "--limit", + "2", + "--project", + self.project.id, + "--marker", + self.snapshots[0].id, + ] + verifylist = [ + ("long", True), + ("limit", 2), + ("project", self.project.id), + ("marker", self.snapshots[0].id), + ('all_projects', False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=2, + marker=self.snapshots[0].id, + all_projects=True, + project_id=self.project.id, + name=None, + status=None, + volume_id=None, + ) + self.assertEqual(self.columns_long, columns) + self.assertEqual(self.data_long, list(data)) + + def test_snapshot_list_all_projects(self): + arglist = [ + '--all-projects', + ] + verifylist = [('long', False), ('all_projects', True)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=True, + name=None, + status=None, + project_id=None, + volume_id=None, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + + def test_snapshot_list_name_option(self): + arglist = [ + '--name', + self.snapshots[0].name, + ] + verifylist = [ + ('all_projects', False), + ('long', False), + ('name', self.snapshots[0].name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=self.snapshots[0].name, + status=None, + project_id=None, + volume_id=None, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + + def test_snapshot_list_status_option(self): + arglist = [ + '--status', + 'available', + ] + verifylist = [ + ('all_projects', False), + ('long', False), + ('status', 'available'), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=None, + status='available', + project_id=None, + volume_id=None, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + + def test_snapshot_list_volumeid_option(self): + arglist = [ + '--volume', + self.volume.id, + ] + verifylist = [ + ('all_projects', False), + ('long', False), + ('volume', self.volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + + self.volume_sdk_client.snapshots.assert_called_once_with( + limit=None, + marker=None, + all_projects=False, + name=None, + status=None, + project_id=None, + volume_id=self.volume.id, + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, list(data)) + + def test_snapshot_list_negative_limit(self): + arglist = [ + "--limit", + "-2", + ] + verifylist = [ + ("limit", -2), + ] + self.assertRaises( + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + +class TestVolumeSnapshotSet(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.snapshot = sdk_fakes.generate_fake_resource( + _snapshot.Snapshot, metadata={'foo': 'bar'} + ) + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.volume_sdk_client.set_snapshot_metadata.return_value = None + self.volume_sdk_client.update_snapshot.return_value = None + # Get the command object to mock + self.cmd = volume_snapshot.SetVolumeSnapshot(self.app, None) + + def test_snapshot_set_no_option(self): + arglist = [ + self.snapshot.id, + ] + verifylist = [ + ("snapshot", self.snapshot.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + parsed_args.snapshot, ignore_missing=False + ) + self.volume_sdk_client.reset_snapshot_status.assert_not_called() + self.volume_sdk_client.update_snapshot.assert_not_called() + self.volume_sdk_client.set_snapshot_metadata.assert_not_called() + + def test_snapshot_set_name_and_property(self): + arglist = [ + "--name", + "new_snapshot", + "--property", + "x=y", + "--property", + "foo=foo", + self.snapshot.id, + ] + verifylist = [ + ("name", "new_snapshot"), + ("properties", {"x": "y", "foo": "foo"}), + ("snapshot", self.snapshot.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.volume_sdk_client.update_snapshot.assert_called_with( + self.snapshot.id, name="new_snapshot" + ) + self.volume_sdk_client.set_snapshot_metadata.assert_called_with( + self.snapshot.id, x="y", foo="foo" + ) + + def test_snapshot_set_with_no_property(self): + arglist = [ + "--no-property", + self.snapshot.id, + ] + verifylist = [ + ("no_property", True), + ("snapshot", self.snapshot.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + parsed_args.snapshot, ignore_missing=False + ) + self.volume_sdk_client.reset_snapshot_status.assert_not_called() + self.volume_sdk_client.update_snapshot.assert_not_called() + self.volume_sdk_client.set_snapshot_metadata.assert_not_called() + self.volume_sdk_client.delete_snapshot_metadata.assert_called_with( + self.snapshot.id, keys=["foo"] + ) + + def test_snapshot_set_with_no_property_and_property(self): + arglist = [ + "--no-property", + "--property", + "foo_1=bar_1", + self.snapshot.id, + ] + verifylist = [ + ("no_property", True), + ("properties", {"foo_1": "bar_1"}), + ("snapshot", self.snapshot.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.volume_sdk_client.find_snapshot.assert_called_once_with( + parsed_args.snapshot, ignore_missing=False + ) + self.volume_sdk_client.reset_snapshot_status.assert_not_called() + self.volume_sdk_client.update_snapshot.assert_not_called() + self.volume_sdk_client.delete_snapshot_metadata.assert_called_with( + self.snapshot.id, keys=["foo"] + ) + self.volume_sdk_client.set_snapshot_metadata.assert_called_once_with( + self.snapshot.id, + foo_1="bar_1", + ) + + def test_snapshot_set_state_to_error(self): + arglist = ["--state", "error", self.snapshot.id] + verifylist = [("state", "error"), ("snapshot", self.snapshot.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.volume_sdk_client.reset_snapshot_status.assert_called_with( + self.snapshot.id, "error" + ) + + def test_volume_set_state_failed(self): + self.volume_sdk_client.reset_snapshot_status.side_effect = ( + exceptions.CommandError() + ) + arglist = ['--state', 'error', self.snapshot.id] + verifylist = [('state', 'error'), ('snapshot', self.snapshot.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + self.assertEqual('One or more of the set operations failed', str(exc)) + self.volume_sdk_client.reset_snapshot_status.assert_called_once_with( + self.snapshot.id, 'error' + ) + + def test_volume_set_name_and_state_failed(self): + self.volume_sdk_client.reset_snapshot_status.side_effect = ( + exceptions.CommandError() + ) + arglist = [ + '--state', + 'error', + "--name", + "new_snapshot", + self.snapshot.id, + ] + verifylist = [ + ('state', 'error'), + ("name", "new_snapshot"), + ('snapshot', self.snapshot.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + self.assertEqual('One or more of the set operations failed', str(exc)) + self.volume_sdk_client.update_snapshot.assert_called_once_with( + self.snapshot.id, name="new_snapshot" + ) + self.volume_sdk_client.reset_snapshot_status.assert_called_once_with( + self.snapshot.id, 'error' + ) + + +class TestVolumeSnapshotShow(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + + self.columns = ( + 'created_at', + 'description', + 'id', + 'name', + 'properties', + 'size', + 'status', + 'volume_id', + ) + self.data = ( + self.snapshot.created_at, + self.snapshot.description, + self.snapshot.id, + self.snapshot.name, + format_columns.DictColumn(self.snapshot.metadata), + self.snapshot.size, + self.snapshot.status, + self.snapshot.volume_id, + ) + + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + + self.cmd = volume_snapshot.ShowVolumeSnapshot(self.app, None) + + def test_snapshot_show(self): + arglist = [self.snapshot.id] + verifylist = [("snapshot", self.snapshot.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_sdk_client.find_snapshot.assert_called_with( + self.snapshot.id, ignore_missing=False + ) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + +class TestVolumeSnapshotUnset(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.snapshot = sdk_fakes.generate_fake_resource(_snapshot.Snapshot) + self.volume_sdk_client.find_snapshot.return_value = self.snapshot + self.volume_sdk_client.delete_snapshot_metadata.return_value = None + + self.cmd = volume_snapshot.UnsetVolumeSnapshot(self.app, None) + + def test_snapshot_unset(self): + arglist = [ + "--property", + "foo", + self.snapshot.id, + ] + verifylist = [ + ("properties", ["foo"]), + ("snapshot", self.snapshot.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.assertIsNone(result) + self.volume_sdk_client.delete_snapshot_metadata.assert_called_with( + self.snapshot.id, keys=["foo"] + ) diff --git a/openstackclient/tests/unit/volume/v1/test_transfer_request.py b/openstackclient/tests/unit/volume/v3/test_volume_transfer_request.py similarity index 75% rename from openstackclient/tests/unit/volume/v1/test_transfer_request.py rename to openstackclient/tests/unit/volume/v3/test_volume_transfer_request.py index 97700fbb7f..ffe59db659 100644 --- a/openstackclient/tests/unit/volume/v1/test_transfer_request.py +++ b/openstackclient/tests/unit/volume/v3/test_volume_transfer_request.py @@ -18,26 +18,25 @@ from osc_lib import exceptions from osc_lib import utils -from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes -from openstackclient.volume.v1 import volume_transfer_request +from openstackclient.tests.unit import utils as test_utils +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes +from openstackclient.volume.v3 import volume_transfer_request -class TestTransfer(volume_fakes.TestVolumev1): - +class TestTransfer(volume_fakes.TestVolume): def setUp(self): super().setUp() # Get a shortcut to the TransferManager Mock - self.transfer_mock = self.app.client_manager.volume.transfers + self.transfer_mock = self.volume_client.transfers self.transfer_mock.reset_mock() # Get a shortcut to the VolumeManager Mock - self.volumes_mock = self.app.client_manager.volume.volumes + self.volumes_mock = self.volume_client.volumes self.volumes_mock.reset_mock() class TestTransferAccept(TestTransfer): - columns = ( 'id', 'name', @@ -59,11 +58,13 @@ def setUp(self): # Get the command object to test self.cmd = volume_transfer_request.AcceptTransferRequest( - self.app, None) + self.app, None + ) def test_transfer_accept(self): arglist = [ - '--auth-key', 'key_value', + '--auth-key', + 'key_value', self.volume_transfer.id, ] verifylist = [ @@ -91,17 +92,17 @@ def test_transfer_accept_no_option(self): verifylist = [ ('transfer_request', self.volume_transfer.id), ] - parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( - exceptions.CommandError, - self.cmd.take_action, - parsed_args, + test_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, ) class TestTransferCreate(TestTransfer): - volume = volume_fakes.create_one_volume() columns = ( @@ -135,7 +136,8 @@ def setUp(self): # Get the command object to test self.cmd = volume_transfer_request.CreateTransferRequest( - self.app, None) + self.app, None + ) def test_transfer_create_without_name(self): arglist = [ @@ -148,14 +150,14 @@ def test_transfer_create_without_name(self): columns, data = self.cmd.take_action(parsed_args) - self.transfer_mock.create.assert_called_once_with( - self.volume.id, None) + self.transfer_mock.create.assert_called_once_with(self.volume.id, None) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_transfer_create_with_name(self): arglist = [ - '--name', self.volume_transfer.name, + '--name', + self.volume_transfer.name, self.volume.id, ] verifylist = [ @@ -167,13 +169,57 @@ def test_transfer_create_with_name(self): columns, data = self.cmd.take_action(parsed_args) self.transfer_mock.create.assert_called_once_with( - self.volume.id, self.volume_transfer.name,) + self.volume.id, + self.volume_transfer.name, + ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) + def test_transfer_create_with_no_snapshots(self): + self.set_volume_api_version('3.55') -class TestTransferDelete(TestTransfer): + arglist = [ + '--no-snapshots', + self.volume.id, + ] + verifylist = [ + ('name', None), + ('snapshots', False), + ('volume', self.volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.transfer_mock.create.assert_called_once_with( + self.volume.id, None, no_snapshots=True + ) + self.assertEqual(self.columns, columns) + self.assertEqual(self.data, data) + + def test_transfer_create_pre_v355(self): + self.set_volume_api_version('3.54') + + arglist = [ + '--no-snapshots', + self.volume.id, + ] + verifylist = [ + ('name', None), + ('snapshots', False), + ('volume', self.volume.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, self.cmd.take_action, parsed_args + ) + self.assertIn( + '--os-volume-api-version 3.55 or greater is required', str(exc) + ) + + +class TestTransferDelete(TestTransfer): volume_transfers = volume_fakes.create_transfers(count=2) def setUp(self): @@ -186,21 +232,19 @@ def setUp(self): # Get the command object to mock self.cmd = volume_transfer_request.DeleteTransferRequest( - self.app, None) + self.app, None + ) def test_transfer_delete(self): - arglist = [ - self.volume_transfers[0].id - ] - verifylist = [ - ("transfer_request", [self.volume_transfers[0].id]) - ] + arglist = [self.volume_transfers[0].id] + verifylist = [("transfer_request", [self.volume_transfers[0].id])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.transfer_mock.delete.assert_called_with( - self.volume_transfers[0].id) + self.volume_transfers[0].id + ) self.assertIsNone(result) def test_delete_multiple_transfers(self): @@ -232,17 +276,21 @@ def test_delete_multiple_transfers_with_exception(self): parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.volume_transfers[0], exceptions.CommandError] - with mock.patch.object(utils, 'find_resource', - side_effect=find_mock_result) as find_mock: + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: - self.assertEqual('1 of 2 volume transfer requests failed ' - 'to delete', str(e)) + self.assertEqual( + '1 of 2 volume transfer requests failed to delete', + str(e), + ) find_mock.assert_any_call( - self.transfer_mock, self.volume_transfers[0].id) + self.transfer_mock, self.volume_transfers[0].id + ) find_mock.assert_any_call(self.transfer_mock, 'unexist_transfer') self.assertEqual(2, find_mock.call_count) @@ -252,7 +300,6 @@ def test_delete_multiple_transfers_with_exception(self): class TestTransferList(TestTransfer): - # The Transfers to be listed volume_transfers = volume_fakes.create_one_transfer() @@ -283,28 +330,25 @@ def test_transfer_list_without_argument(self): # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) - datalist = (( - self.volume_transfers.id, - self.volume_transfers.name, - self.volume_transfers.volume_id, - ), ) + datalist = ( + ( + self.volume_transfers.id, + self.volume_transfers.name, + self.volume_transfers.volume_id, + ), + ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list volume_transfers self.transfer_mock.list.assert_called_with( - detailed=True, - search_opts={'all_tenants': 0} + detailed=True, search_opts={'all_tenants': 0} ) def test_transfer_list_with_argument(self): - arglist = [ - "--all-projects" - ] - verifylist = [ - ("all_projects", True) - ] + arglist = ["--all-projects"] + verifylist = [("all_projects", True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) @@ -322,24 +366,24 @@ def test_transfer_list_with_argument(self): # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) - datalist = (( - self.volume_transfers.id, - self.volume_transfers.name, - self.volume_transfers.volume_id, - ), ) + datalist = ( + ( + self.volume_transfers.id, + self.volume_transfers.name, + self.volume_transfers.volume_id, + ), + ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list volume_transfers self.transfer_mock.list.assert_called_with( - detailed=True, - search_opts={'all_tenants': 1} + detailed=True, search_opts={'all_tenants': 1} ) class TestTransferShow(TestTransfer): - columns = ( 'created_at', 'id', @@ -351,7 +395,7 @@ def setUp(self): super().setUp() self.volume_transfer = volume_fakes.create_one_transfer( - attrs={'created_at': 'time'} + attrs={'created_at': 'time'}, ) self.data = ( self.volume_transfer.created_at, @@ -363,8 +407,7 @@ def setUp(self): self.transfer_mock.get.return_value = self.volume_transfer # Get the command object to test - self.cmd = volume_transfer_request.ShowTransferRequest( - self.app, None) + self.cmd = volume_transfer_request.ShowTransferRequest(self.app, None) def test_transfer_show(self): arglist = [ @@ -377,7 +420,6 @@ def test_transfer_show(self): columns, data = self.cmd.take_action(parsed_args) - self.transfer_mock.get.assert_called_once_with( - self.volume_transfer.id) + self.transfer_mock.get.assert_called_once_with(self.volume_transfer.id) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) diff --git a/openstackclient/tests/unit/volume/v3/test_volume_type.py b/openstackclient/tests/unit/volume/v3/test_volume_type.py new file mode 100644 index 0000000000..828f8b0902 --- /dev/null +++ b/openstackclient/tests/unit/volume/v3/test_volume_type.py @@ -0,0 +1,1109 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +from unittest.mock import call + +from osc_lib.cli import format_columns +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes +from openstackclient.tests.unit import utils as tests_utils +from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes +from openstackclient.volume.v3 import volume_type + + +class TestType(volume_fakes.TestVolume): + def setUp(self): + super().setUp() + + self.volume_types_mock = self.volume_client.volume_types + self.volume_types_mock.reset_mock() + + self.volume_type_access_mock = self.volume_client.volume_type_access + self.volume_type_access_mock.reset_mock() + + self.volume_encryption_types_mock = ( + self.volume_client.volume_encryption_types + ) + self.volume_encryption_types_mock.reset_mock() + + self.projects_mock = self.identity_client.projects + self.projects_mock.reset_mock() + + +class TestTypeCreate(TestType): + def setUp(self): + super().setUp() + + self.new_volume_type = volume_fakes.create_one_volume_type( + methods={'set_keys': None}, + ) + self.project = identity_fakes.FakeProject.create_one_project() + self.columns = ( + 'description', + 'id', + 'is_public', + 'name', + ) + self.data = ( + self.new_volume_type.description, + self.new_volume_type.id, + True, + self.new_volume_type.name, + ) + + self.volume_types_mock.create.return_value = self.new_volume_type + self.projects_mock.get.return_value = self.project + # Get the command object to test + self.cmd = volume_type.CreateVolumeType(self.app, None) + + def test_type_create_public(self): + arglist = [ + "--description", + self.new_volume_type.description, + "--public", + self.new_volume_type.name, + ] + verifylist = [ + ("description", self.new_volume_type.description), + ("is_public", True), + ("name", self.new_volume_type.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.create.assert_called_with( + self.new_volume_type.name, + description=self.new_volume_type.description, + is_public=True, + ) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_type_create_private(self): + arglist = [ + "--description", + self.new_volume_type.description, + "--private", + "--project", + self.project.id, + self.new_volume_type.name, + ] + verifylist = [ + ("description", self.new_volume_type.description), + ("is_public", False), + ("project", self.project.id), + ("name", self.new_volume_type.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.create.assert_called_with( + self.new_volume_type.name, + description=self.new_volume_type.description, + is_public=False, + ) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_type_create_with_properties(self): + arglist = [ + '--property', + 'myprop=myvalue', + # this combination isn't viable server-side but is okay for testing + '--multiattach', + '--cacheable', + '--replicated', + '--availability-zone', + 'az1', + self.new_volume_type.name, + ] + verifylist = [ + ('properties', {'myprop': 'myvalue'}), + ('multiattach', True), + ('cacheable', True), + ('replicated', True), + ('availability_zones', ['az1']), + ('name', self.new_volume_type.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.create.assert_called_with( + self.new_volume_type.name, description=None + ) + self.new_volume_type.set_keys.assert_called_once_with( + { + 'myprop': 'myvalue', + 'multiattach': ' True', + 'cacheable': ' True', + 'replication_enabled': ' True', + 'RESKEY:availability_zones': 'az1', + } + ) + + self.columns += ('properties',) + self.data += (format_columns.DictColumn(None),) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_public_type_create_with_project_public(self): + arglist = [ + '--project', + self.project.id, + self.new_volume_type.name, + ] + verifylist = [ + ('is_public', None), + ('project', self.project.id), + ('name', self.new_volume_type.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + + def test_type_create_with_encryption(self): + encryption_info = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': '128', + 'control_location': 'front-end', + } + encryption_type = volume_fakes.create_one_encryption_volume_type( + attrs=encryption_info, + ) + self.new_volume_type = volume_fakes.create_one_volume_type( + attrs={'encryption': encryption_info}, + ) + self.volume_types_mock.create.return_value = self.new_volume_type + self.volume_encryption_types_mock.create.return_value = encryption_type + encryption_columns = ( + 'description', + 'encryption', + 'id', + 'is_public', + 'name', + ) + encryption_data = ( + self.new_volume_type.description, + format_columns.DictColumn(encryption_info), + self.new_volume_type.id, + True, + self.new_volume_type.name, + ) + arglist = [ + '--encryption-provider', + 'LuksEncryptor', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-key-size', + '128', + '--encryption-control-location', + 'front-end', + self.new_volume_type.name, + ] + verifylist = [ + ('encryption_provider', 'LuksEncryptor'), + ('encryption_cipher', 'aes-xts-plain64'), + ('encryption_key_size', 128), + ('encryption_control_location', 'front-end'), + ('name', self.new_volume_type.name), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.create.assert_called_with( + self.new_volume_type.name, + description=None, + ) + body = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } + self.volume_encryption_types_mock.create.assert_called_with( + self.new_volume_type, + body, + ) + self.assertEqual(encryption_columns, columns) + self.assertCountEqual(encryption_data, data) + + +class TestTypeDelete(TestType): + volume_types = volume_fakes.create_volume_types(count=2) + + def setUp(self): + super().setUp() + + self.volume_types_mock.get = volume_fakes.get_volume_types( + self.volume_types, + ) + self.volume_types_mock.delete.return_value = None + + # Get the command object to mock + self.cmd = volume_type.DeleteVolumeType(self.app, None) + + def test_type_delete(self): + arglist = [self.volume_types[0].id] + verifylist = [("volume_types", [self.volume_types[0].id])] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + self.volume_types_mock.delete.assert_called_with(self.volume_types[0]) + self.assertIsNone(result) + + def test_delete_multiple_types(self): + arglist = [] + for t in self.volume_types: + arglist.append(t.id) + verifylist = [ + ('volume_types', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + result = self.cmd.take_action(parsed_args) + + calls = [] + for t in self.volume_types: + calls.append(call(t)) + self.volume_types_mock.delete.assert_has_calls(calls) + self.assertIsNone(result) + + def test_delete_multiple_types_with_exception(self): + arglist = [ + self.volume_types[0].id, + 'unexist_type', + ] + verifylist = [ + ('volume_types', arglist), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + find_mock_result = [self.volume_types[0], exceptions.CommandError] + with mock.patch.object( + utils, 'find_resource', side_effect=find_mock_result + ) as find_mock: + try: + self.cmd.take_action(parsed_args) + self.fail('CommandError should be raised.') + except exceptions.CommandError as e: + self.assertEqual( + '1 of 2 volume types failed to delete.', str(e) + ) + find_mock.assert_any_call( + self.volume_types_mock, self.volume_types[0].id + ) + find_mock.assert_any_call(self.volume_types_mock, 'unexist_type') + + self.assertEqual(2, find_mock.call_count) + self.volume_types_mock.delete.assert_called_once_with( + self.volume_types[0] + ) + + +class TestTypeList(TestType): + volume_types = volume_fakes.create_volume_types() + + columns = [ + "ID", + "Name", + "Is Public", + ] + columns_long = columns + ["Description", "Properties"] + data_with_default_type = [(volume_types[0].id, volume_types[0].name, True)] + data = [] + for t in volume_types: + data.append( + ( + t.id, + t.name, + t.is_public, + ) + ) + data_long = [] + for t in volume_types: + data_long.append( + ( + t.id, + t.name, + t.is_public, + t.description, + format_columns.DictColumn(t.extra_specs), + ) + ) + + def setUp(self): + super().setUp() + + self.volume_types_mock.list.return_value = self.volume_types + self.volume_types_mock.default.return_value = self.volume_types[0] + # get the command to test + self.cmd = volume_type.ListVolumeType(self.app, None) + + def test_type_list_without_options(self): + arglist = [] + verifylist = [ + ("long", False), + ("is_public", None), + ("default", False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.list.assert_called_once_with( + search_opts={}, is_public=None + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, list(data)) + + def test_type_list_with_options(self): + arglist = [ + "--long", + "--public", + ] + verifylist = [ + ("long", True), + ("is_public", True), + ("default", False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.list.assert_called_once_with( + search_opts={}, is_public=True + ) + self.assertEqual(self.columns_long, columns) + self.assertCountEqual(self.data_long, list(data)) + + def test_type_list_with_private_option(self): + arglist = [ + "--private", + ] + verifylist = [ + ("long", False), + ("is_public", False), + ("default", False), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.list.assert_called_once_with( + search_opts={}, is_public=False + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, list(data)) + + def test_type_list_with_default_option(self): + arglist = [ + "--default", + ] + verifylist = [ + ("encryption_type", False), + ("long", False), + ("is_public", None), + ("default", True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.default.assert_called_once_with() + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data_with_default_type, list(data)) + + def test_type_list_with_properties(self): + self.set_volume_api_version('3.52') + + arglist = [ + "--property", + "foo=bar", + "--multiattach", + "--cacheable", + "--replicated", + "--availability-zone", + "az1", + ] + verifylist = [ + ("encryption_type", False), + ("long", False), + ("is_public", None), + ("default", False), + ("properties", {"foo": "bar"}), + ("multiattach", True), + ("cacheable", True), + ("replicated", True), + ("availability_zones", ["az1"]), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.list.assert_called_once_with( + search_opts={ + "extra_specs": { + "foo": "bar", + "multiattach": " True", + "cacheable": " True", + "replication_enabled": " True", + "RESKEY:availability_zones": "az1", + } + }, + is_public=None, + ) + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, list(data)) + + def test_type_list_with_properties_pre_v352(self): + self.set_volume_api_version('3.51') + + arglist = [ + "--property", + "foo=bar", + ] + verifylist = [ + ("encryption_type", False), + ("long", False), + ("is_public", None), + ("default", False), + ("properties", {"foo": "bar"}), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertIn( + '--os-volume-api-version 3.52 or greater is required', + str(exc), + ) + + def test_type_list_with_encryption(self): + encryption_type = volume_fakes.create_one_encryption_volume_type( + attrs={'volume_type_id': self.volume_types[0].id}, + ) + encryption_info = { + 'provider': 'LuksEncryptor', + 'cipher': None, + 'key_size': None, + 'control_location': 'front-end', + } + encryption_columns = self.columns + [ + "Encryption", + ] + encryption_data = [] + encryption_data.append( + ( + self.volume_types[0].id, + self.volume_types[0].name, + self.volume_types[0].is_public, + volume_type.EncryptionInfoColumn( + self.volume_types[0].id, + {self.volume_types[0].id: encryption_info}, + ), + ) + ) + encryption_data.append( + ( + self.volume_types[1].id, + self.volume_types[1].name, + self.volume_types[1].is_public, + volume_type.EncryptionInfoColumn(self.volume_types[1].id, {}), + ) + ) + + self.volume_encryption_types_mock.list.return_value = [encryption_type] + arglist = [ + "--encryption-type", + ] + verifylist = [ + ("encryption_type", True), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_encryption_types_mock.list.assert_called_once_with() + self.volume_types_mock.list.assert_called_once_with( + search_opts={}, is_public=None + ) + self.assertEqual(encryption_columns, columns) + self.assertCountEqual(encryption_data, list(data)) + + +class TestTypeSet(TestType): + def setUp(self): + super().setUp() + + self.project = identity_fakes.FakeProject.create_one_project() + self.projects_mock.get.return_value = self.project + + self.volume_type = volume_fakes.create_one_volume_type( + methods={'set_keys': None}, + ) + self.volume_types_mock.get.return_value = self.volume_type + self.volume_encryption_types_mock.create.return_value = None + self.volume_encryption_types_mock.update.return_value = None + + self.cmd = volume_type.SetVolumeType(self.app, None) + + def test_type_set(self): + arglist = [ + '--name', + 'new_name', + '--description', + 'new_description', + '--private', + self.volume_type.id, + ] + verifylist = [ + ('name', 'new_name'), + ('description', 'new_description'), + ('properties', None), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + + kwargs = { + 'name': 'new_name', + 'description': 'new_description', + 'is_public': False, + } + self.volume_types_mock.update.assert_called_with( + self.volume_type.id, **kwargs + ) + self.assertIsNone(result) + + self.volume_type_access_mock.add_project_access.assert_not_called() + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() + + def test_type_set_property(self): + arglist = [ + '--property', + 'myprop=myvalue', + # this combination isn't viable server-side but is okay for testing + '--multiattach', + '--cacheable', + '--replicated', + '--availability-zone', + 'az1', + self.volume_type.id, + ] + verifylist = [ + ('name', None), + ('description', None), + ('properties', {'myprop': 'myvalue'}), + ('multiattach', True), + ('cacheable', True), + ('replicated', True), + ('availability_zones', ['az1']), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_type.set_keys.assert_called_once_with( + { + 'myprop': 'myvalue', + 'multiattach': ' True', + 'cacheable': ' True', + 'replication_enabled': ' True', + 'RESKEY:availability_zones': 'az1', + } + ) + self.volume_type_access_mock.add_project_access.assert_not_called() + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() + + def test_type_set_with_empty_project(self): + arglist = [ + '--project', + '', + self.volume_type.id, + ] + verifylist = [ + ('project', ''), + ('volume_type', self.volume_type.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_not_called() + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() + + def test_type_set_with_project(self): + arglist = [ + '--project', + self.project.id, + self.volume_type.id, + ] + verifylist = [ + ('project', self.project.id), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_called_with( + self.volume_type.id, + self.project.id, + ) + self.volume_encryption_types_mock.update.assert_not_called() + self.volume_encryption_types_mock.create.assert_not_called() + + def test_type_set_with_new_encryption(self): + self.volume_encryption_types_mock.update.side_effect = ( + exceptions.NotFound('NotFound') + ) + arglist = [ + '--encryption-provider', + 'LuksEncryptor', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-key-size', + '128', + '--encryption-control-location', + 'front-end', + self.volume_type.id, + ] + verifylist = [ + ('encryption_provider', 'LuksEncryptor'), + ('encryption_cipher', 'aes-xts-plain64'), + ('encryption_key_size', 128), + ('encryption_control_location', 'front-end'), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + body = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } + self.volume_encryption_types_mock.update.assert_called_with( + self.volume_type, + body, + ) + self.volume_encryption_types_mock.create.assert_called_with( + self.volume_type, + body, + ) + + @mock.patch.object(utils, 'find_resource') + def test_type_set_with_existing_encryption(self, mock_find): + mock_find.side_effect = [self.volume_type, "existing_encryption_type"] + arglist = [ + '--encryption-provider', + 'LuksEncryptor', + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-control-location', + 'front-end', + self.volume_type.id, + ] + verifylist = [ + ('encryption_provider', 'LuksEncryptor'), + ('encryption_cipher', 'aes-xts-plain64'), + ('encryption_control_location', 'front-end'), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_not_called() + body = { + 'provider': 'LuksEncryptor', + 'cipher': 'aes-xts-plain64', + 'control_location': 'front-end', + } + self.volume_encryption_types_mock.update.assert_called_with( + self.volume_type, + body, + ) + self.volume_encryption_types_mock.create.assert_not_called() + + def test_type_set_new_encryption_without_provider(self): + self.volume_encryption_types_mock.update.side_effect = ( + exceptions.NotFound('NotFound') + ) + arglist = [ + '--encryption-cipher', + 'aes-xts-plain64', + '--encryption-key-size', + '128', + '--encryption-control-location', + 'front-end', + self.volume_type.id, + ] + verifylist = [ + ('encryption_cipher', 'aes-xts-plain64'), + ('encryption_key_size', 128), + ('encryption_control_location', 'front-end'), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + exc = self.assertRaises( + exceptions.CommandError, + self.cmd.take_action, + parsed_args, + ) + self.assertEqual( + "Command Failed: One or more of the operations failed", + str(exc), + ) + + self.volume_type.set_keys.assert_not_called() + self.volume_type_access_mock.add_project_access.assert_not_called() + body = { + 'cipher': 'aes-xts-plain64', + 'key_size': 128, + 'control_location': 'front-end', + } + self.volume_encryption_types_mock.update.assert_called_with( + self.volume_type, + body, + ) + self.volume_encryption_types_mock.create.assert_not_called() + + +class TestTypeShow(TestType): + columns = ( + 'access_project_ids', + 'description', + 'id', + 'is_public', + 'name', + 'properties', + ) + + def setUp(self): + super().setUp() + + self.volume_type = volume_fakes.create_one_volume_type() + self.data = ( + None, + self.volume_type.description, + self.volume_type.id, + True, + self.volume_type.name, + format_columns.DictColumn(self.volume_type.extra_specs), + ) + + self.volume_types_mock.get.return_value = self.volume_type + + # Get the command object to test + self.cmd = volume_type.ShowVolumeType(self.app, None) + + def test_type_show(self): + arglist = [self.volume_type.id] + verifylist = [ + ("encryption_type", False), + ("volume_type", self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.get.assert_called_with(self.volume_type.id) + + self.assertEqual(self.columns, columns) + self.assertCountEqual(self.data, data) + + def test_type_show_with_access(self): + arglist = [self.volume_type.id] + verifylist = [("volume_type", self.volume_type.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + private_type = volume_fakes.create_one_volume_type( + attrs={'is_public': False}, + ) + type_access_list = volume_fakes.create_one_type_access() + with mock.patch.object( + self.volume_types_mock, + 'get', + return_value=private_type, + ): + with mock.patch.object( + self.volume_type_access_mock, + 'list', + return_value=[type_access_list], + ): + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.get.assert_called_once_with( + self.volume_type.id + ) + self.volume_type_access_mock.list.assert_called_once_with( + private_type.id + ) + + self.assertEqual(self.columns, columns) + private_type_data = ( + format_columns.ListColumn([type_access_list.project_id]), + private_type.description, + private_type.id, + private_type.is_public, + private_type.name, + format_columns.DictColumn(private_type.extra_specs), + ) + self.assertCountEqual(private_type_data, data) + + def test_type_show_with_list_access_exec(self): + arglist = [self.volume_type.id] + verifylist = [("volume_type", self.volume_type.id)] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + private_type = volume_fakes.create_one_volume_type( + attrs={'is_public': False}, + ) + with mock.patch.object( + self.volume_types_mock, 'get', return_value=private_type + ): + with mock.patch.object( + self.volume_type_access_mock, 'list', side_effect=Exception() + ): + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.get.assert_called_once_with( + self.volume_type.id + ) + self.volume_type_access_mock.list.assert_called_once_with( + private_type.id + ) + + self.assertEqual(self.columns, columns) + private_type_data = ( + None, + private_type.description, + private_type.id, + private_type.is_public, + private_type.name, + format_columns.DictColumn(private_type.extra_specs), + ) + self.assertCountEqual(private_type_data, data) + + def test_type_show_with_encryption(self): + encryption_type = volume_fakes.create_one_encryption_volume_type() + encryption_info = { + 'provider': 'LuksEncryptor', + 'cipher': None, + 'key_size': None, + 'control_location': 'front-end', + } + self.volume_type = volume_fakes.create_one_volume_type( + attrs={'encryption': encryption_info}, + ) + self.volume_types_mock.get.return_value = self.volume_type + self.volume_encryption_types_mock.get.return_value = encryption_type + encryption_columns = ( + 'access_project_ids', + 'description', + 'encryption', + 'id', + 'is_public', + 'name', + 'properties', + ) + encryption_data = ( + None, + self.volume_type.description, + format_columns.DictColumn(encryption_info), + self.volume_type.id, + True, + self.volume_type.name, + format_columns.DictColumn(self.volume_type.extra_specs), + ) + arglist = ['--encryption-type', self.volume_type.id] + verifylist = [ + ('encryption_type', True), + ("volume_type", self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + columns, data = self.cmd.take_action(parsed_args) + self.volume_types_mock.get.assert_called_with(self.volume_type.id) + self.volume_encryption_types_mock.get.assert_called_with( + self.volume_type.id + ) + self.assertEqual(encryption_columns, columns) + self.assertCountEqual(encryption_data, data) + + +class TestTypeUnset(TestType): + project = identity_fakes.FakeProject.create_one_project() + volume_type = volume_fakes.create_one_volume_type( + methods={'unset_keys': None}, + ) + + def setUp(self): + super().setUp() + + self.volume_types_mock.get.return_value = self.volume_type + + # Return a project + self.projects_mock.get.return_value = self.project + + # Get the command object to test + self.cmd = volume_type.UnsetVolumeType(self.app, None) + + def test_type_unset(self): + arglist = [ + '--property', + 'property', + '--property', + 'multi_property', + self.volume_type.id, + ] + verifylist = [ + ('properties', ['property', 'multi_property']), + ('volume_type', self.volume_type.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volume_type.unset_keys.assert_called_once_with( + ['property', 'multi_property'] + ) + self.assertIsNone(result) + + def test_type_unset_project_access(self): + arglist = [ + '--project', + self.project.id, + self.volume_type.id, + ] + verifylist = [ + ('project', self.project.id), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + + self.volume_type_access_mock.remove_project_access.assert_called_with( + self.volume_type.id, + self.project.id, + ) + + def test_type_unset_not_called_without_project_argument(self): + arglist = [ + '--project', + '', + self.volume_type.id, + ] + verifylist = [ + ('encryption_type', False), + ('project', ''), + ('volume_type', self.volume_type.id), + ] + + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.assertIsNone(result) + self.volume_encryption_types_mock.delete.assert_not_called() + self.assertFalse( + self.volume_type_access_mock.remove_project_access.called + ) + + def test_type_unset_failed_with_missing_volume_type_argument(self): + arglist = [ + '--project', + 'identity_fakes.project_id', + ] + verifylist = [ + ('project', 'identity_fakes.project_id'), + ] + + self.assertRaises( + tests_utils.ParserException, + self.check_parser, + self.cmd, + arglist, + verifylist, + ) + + def test_type_unset_encryption_type(self): + arglist = [ + '--encryption-type', + self.volume_type.id, + ] + verifylist = [ + ('encryption_type', True), + ('volume_type', self.volume_type.id), + ] + parsed_args = self.check_parser(self.cmd, arglist, verifylist) + + result = self.cmd.take_action(parsed_args) + self.volume_encryption_types_mock.delete.assert_called_with( + self.volume_type + ) + self.assertIsNone(result) + + +class TestColumns(TestType): + def test_encryption_info_column_with_info(self): + fake_volume_type = volume_fakes.create_one_volume_type() + type_id = fake_volume_type.id + + encryption_info = { + 'provider': 'LuksEncryptor', + 'cipher': None, + 'key_size': None, + 'control_location': 'front-end', + } + col = volume_type.EncryptionInfoColumn( + type_id, {type_id: encryption_info} + ) + self.assertEqual( + utils.format_dict(encryption_info), col.human_readable() + ) + self.assertEqual(encryption_info, col.machine_readable()) + + def test_encryption_info_column_without_info(self): + fake_volume_type = volume_fakes.create_one_volume_type() + type_id = fake_volume_type.id + + col = volume_type.EncryptionInfoColumn(type_id, {}) + self.assertEqual('-', col.human_readable()) + self.assertIsNone(col.machine_readable()) diff --git a/openstackclient/volume/client.py b/openstackclient/volume/client.py index 0712fa7b00..dbef055fac 100644 --- a/openstackclient/volume/client.py +++ b/openstackclient/volume/client.py @@ -20,16 +20,14 @@ from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) DEFAULT_API_VERSION = '3' API_VERSION_OPTION = 'os_volume_api_version' -API_NAME = "volume" +API_NAME = 'volume' API_VERSIONS = { - "1": "cinderclient.v1.client.Client", - "2": "cinderclient.v2.client.Client", - "3": "cinderclient.v3.client.Client", + '2': 'cinderclient.v2.client.Client', + '3': 'cinderclient.v3.client.Client', } # Save the microversion if in use @@ -45,11 +43,6 @@ def make_client(instance): from cinderclient.v3 import volume_snapshots from cinderclient.v3 import volumes - # Check whether the available cinderclient supports v1 or v2 - try: - from cinderclient.v1 import services # noqa - except Exception: - del API_VERSIONS['1'] try: from cinderclient.v2 import services # noqa except Exception: @@ -60,6 +53,7 @@ def make_client(instance): else: version = instance._api_version[API_NAME] from cinderclient import api_versions + # convert to APIVersion object version = api_versions.get_api_version(version) @@ -69,9 +63,7 @@ def make_client(instance): volume_snapshots.Snapshot.NAME_ATTR = 'display_name' volume_client = utils.get_client_class( - API_NAME, - version.ver_major, - API_VERSIONS + API_NAME, version.ver_major, API_VERSIONS ) LOG.debug('Instantiating volume client: %s', volume_client) @@ -84,7 +76,8 @@ def make_client(instance): kwargs = utils.build_kwargs_dict('endpoint_type', instance.interface) endpoint_override = instance.sdk_connection.config.get_endpoint( - 'block-storage') + 'block-storage' + ) client = volume_client( session=instance.session, @@ -93,7 +86,7 @@ def make_client(instance): region_name=instance.region_name, endpoint_override=endpoint_override, api_version=version, - **kwargs + **kwargs, ) return client @@ -105,8 +98,8 @@ def build_option_parser(parser): '--os-volume-api-version', metavar='', default=utils.env('OS_VOLUME_API_VERSION'), - help=_('Volume API version, default=%s ' - '(Env: OS_VOLUME_API_VERSION)') % DEFAULT_API_VERSION + help=_('Volume API version, default=%s (Env: OS_VOLUME_API_VERSION)') + % DEFAULT_API_VERSION, ) return parser @@ -127,21 +120,18 @@ def check_api_version(check_version): global _volume_api_version - # Copy some logic from novaclient 3.3.0 for basic version detection - # NOTE(dtroyer): This is only enough to resume operations using API - # version 3.0 or any valid version supplied by the user. _volume_api_version = api_versions.get_api_version(check_version) # Bypass X.latest format microversion if not _volume_api_version.is_latest(): - if _volume_api_version > api_versions.APIVersion("3.0"): + if _volume_api_version > api_versions.APIVersion('3.0'): if not _volume_api_version.matches( api_versions.MIN_VERSION, api_versions.MAX_VERSION, ): - msg = _("versions supported by client: %(min)s - %(max)s") % { - "min": api_versions.MIN_VERSION, - "max": api_versions.MAX_VERSION, + msg = _('versions supported by client: %(min)s - %(max)s') % { + 'min': api_versions.MIN_VERSION, + 'max': api_versions.MAX_VERSION, } raise exceptions.CommandError(msg) diff --git a/openstackclient/volume/v1/__init__.py b/openstackclient/volume/v1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/openstackclient/volume/v1/qos_specs.py b/openstackclient/volume/v1/qos_specs.py deleted file mode 100644 index 79dff1c677..0000000000 --- a/openstackclient/volume/v1/qos_specs.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2015 iWeb Technologies Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Volume v1 QoS action implementations""" - -import logging - -from osc_lib.cli import format_columns -from osc_lib.cli import parseractions -from osc_lib.command import command -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class AssociateQos(command.Command): - _description = _("Associate a QoS specification to a volume type") - - def get_parser(self, prog_name): - parser = super(AssociateQos, self).get_parser(prog_name) - parser.add_argument( - 'qos_spec', - metavar='', - help=_('QoS specification to modify (name or ID)'), - ) - parser.add_argument( - 'volume_type', - metavar='', - help=_('Volume type to associate the QoS (name or ID)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) - volume_type = utils.find_resource(volume_client.volume_types, - parsed_args.volume_type) - - volume_client.qos_specs.associate(qos_spec.id, volume_type.id) - - -class CreateQos(command.ShowOne): - _description = _("Create new QoS specification") - - def get_parser(self, prog_name): - parser = super(CreateQos, self).get_parser(prog_name) - parser.add_argument( - 'name', - metavar='', - help=_('New QoS specification name'), - ) - consumer_choices = ['front-end', 'back-end', 'both'] - parser.add_argument( - '--consumer', - metavar='', - choices=consumer_choices, - default='both', - help=(_('Consumer of the QoS. Valid consumers: %s ' - "(defaults to 'both')") % - utils.format_list(consumer_choices)) - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Set a QoS specification property ' - '(repeat option to set multiple properties)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - specs = {} - specs.update({'consumer': parsed_args.consumer}) - - if parsed_args.property: - specs.update(parsed_args.property) - - qos_spec = volume_client.qos_specs.create(parsed_args.name, specs) - qos_spec._info.update( - {'properties': - format_columns.DictColumn(qos_spec._info.pop('specs'))} - ) - return zip(*sorted(qos_spec._info.items())) - - -class DeleteQos(command.Command): - _description = _("Delete QoS specification") - - def get_parser(self, prog_name): - parser = super(DeleteQos, self).get_parser(prog_name) - parser.add_argument( - 'qos_specs', - metavar='', - nargs="+", - help=_('QoS specification(s) to delete (name or ID)'), - ) - parser.add_argument( - '--force', - action='store_true', - default=False, - help=_("Allow to delete in-use QoS specification(s)") - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - result = 0 - - for i in parsed_args.qos_specs: - try: - qos_spec = utils.find_resource(volume_client.qos_specs, i) - volume_client.qos_specs.delete(qos_spec.id, parsed_args.force) - except Exception as e: - result += 1 - LOG.error(_("Failed to delete QoS specification with " - "name or ID '%(qos)s': %(e)s"), - {'qos': i, 'e': e}) - - if result > 0: - total = len(parsed_args.qos_specs) - msg = (_("%(result)s of %(total)s QoS specifications failed" - " to delete.") % {'result': result, 'total': total}) - raise exceptions.CommandError(msg) - - -class DisassociateQos(command.Command): - _description = _("Disassociate a QoS specification from a volume type") - - def get_parser(self, prog_name): - parser = super(DisassociateQos, self).get_parser(prog_name) - parser.add_argument( - 'qos_spec', - metavar='', - help=_('QoS specification to modify (name or ID)'), - ) - volume_type_group = parser.add_mutually_exclusive_group() - volume_type_group.add_argument( - '--volume-type', - metavar='', - help=_('Volume type to disassociate the QoS from (name or ID)'), - ) - volume_type_group.add_argument( - '--all', - action='store_true', - default=False, - help=_('Disassociate the QoS from every volume type'), - ) - - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) - - if parsed_args.volume_type: - volume_type = utils.find_resource(volume_client.volume_types, - parsed_args.volume_type) - volume_client.qos_specs.disassociate(qos_spec.id, volume_type.id) - elif parsed_args.all: - volume_client.qos_specs.disassociate_all(qos_spec.id) - - -class ListQos(command.Lister): - _description = _("List QoS specifications") - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - qos_specs_list = volume_client.qos_specs.list() - - for qos in qos_specs_list: - try: - qos_associations = volume_client.qos_specs.get_associations( - qos, - ) - if qos_associations: - associations = [ - association.name for association in qos_associations - ] - qos._info.update({'associations': associations}) - except Exception as ex: - if type(ex).__name__ == 'NotFound': - qos._info.update({'associations': None}) - else: - raise - - display_columns = ( - 'ID', 'Name', 'Consumer', 'Associations', 'Properties') - columns = ('ID', 'Name', 'Consumer', 'Associations', 'Specs') - return (display_columns, - (utils.get_dict_properties( - s._info, columns, - formatters={ - 'Specs': format_columns.DictColumn, - 'Associations': format_columns.ListColumn - }, - ) for s in qos_specs_list)) - - -class SetQos(command.Command): - _description = _("Set QoS specification properties") - - def get_parser(self, prog_name): - parser = super(SetQos, self).get_parser(prog_name) - parser.add_argument( - 'qos_spec', - metavar='', - help=_('QoS specification to modify (name or ID)'), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Property to add or modify for this QoS specification ' - '(repeat option to set multiple properties)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) - - if parsed_args.property: - volume_client.qos_specs.set_keys(qos_spec.id, - parsed_args.property) - - -class ShowQos(command.ShowOne): - _description = _("Display QoS specification details") - - def get_parser(self, prog_name): - parser = super(ShowQos, self).get_parser(prog_name) - parser.add_argument( - 'qos_spec', - metavar='', - help=_('QoS specification to display (name or ID)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) - - qos_associations = volume_client.qos_specs.get_associations(qos_spec) - if qos_associations: - associations = [association.name - for association in qos_associations] - qos_spec._info.update({ - 'associations': format_columns.ListColumn(associations) - }) - qos_spec._info.update( - {'properties': - format_columns.DictColumn(qos_spec._info.pop('specs'))}) - - return zip(*sorted(qos_spec._info.items())) - - -class UnsetQos(command.Command): - _description = _("Unset QoS specification properties") - - def get_parser(self, prog_name): - parser = super(UnsetQos, self).get_parser(prog_name) - parser.add_argument( - 'qos_spec', - metavar='', - help=_('QoS specification to modify (name or ID)'), - ) - parser.add_argument( - '--property', - metavar='', - action='append', - help=_('Property to remove from the QoS specification. ' - '(repeat option to unset multiple properties)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) - - if parsed_args.property: - volume_client.qos_specs.unset_keys(qos_spec.id, - parsed_args.property) diff --git a/openstackclient/volume/v1/service.py b/openstackclient/volume/v1/service.py deleted file mode 100644 index d468c6ff1e..0000000000 --- a/openstackclient/volume/v1/service.py +++ /dev/null @@ -1,130 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Service action implementations""" - -from osc_lib.command import command -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.i18n import _ - - -class ListService(command.Lister): - _description = _("List service command") - - def get_parser(self, prog_name): - parser = super(ListService, self).get_parser(prog_name) - parser.add_argument( - "--host", - metavar="", - help=_("List services on specified host (name only)") - ) - parser.add_argument( - "--service", - metavar="", - help=_("List only specified service (name only)") - ) - parser.add_argument( - "--long", - action="store_true", - default=False, - help=_("List additional fields in output") - ) - return parser - - def take_action(self, parsed_args): - service_client = self.app.client_manager.volume - - if parsed_args.long: - columns = [ - "Binary", - "Host", - "Zone", - "Status", - "State", - "Updated At", - "Disabled Reason" - ] - else: - columns = [ - "Binary", - "Host", - "Zone", - "Status", - "State", - "Updated At" - ] - - data = service_client.services.list(parsed_args.host, - parsed_args.service) - return (columns, - (utils.get_item_properties( - s, columns, - ) for s in data)) - - -class SetService(command.Command): - _description = _("Set volume service properties") - - def get_parser(self, prog_name): - parser = super(SetService, self).get_parser(prog_name) - parser.add_argument( - "host", - metavar="", - help=_("Name of host") - ) - parser.add_argument( - "service", - metavar="", - help=_("Name of service (Binary name)") - ) - enabled_group = parser.add_mutually_exclusive_group() - enabled_group.add_argument( - "--enable", - action="store_true", - help=_("Enable volume service") - ) - enabled_group.add_argument( - "--disable", - action="store_true", - help=_("Disable volume service") - ) - parser.add_argument( - "--disable-reason", - metavar="", - help=_("Reason for disabling the service " - "(should be used with --disable option)") - ) - return parser - - def take_action(self, parsed_args): - if parsed_args.disable_reason and not parsed_args.disable: - msg = _("Cannot specify option --disable-reason without " - "--disable specified.") - raise exceptions.CommandError(msg) - - service_client = self.app.client_manager.volume - if parsed_args.enable: - service_client.services.enable( - parsed_args.host, parsed_args.service) - if parsed_args.disable: - if parsed_args.disable_reason: - service_client.services.disable_log_reason( - parsed_args.host, - parsed_args.service, - parsed_args.disable_reason) - else: - service_client.services.disable( - parsed_args.host, parsed_args.service) diff --git a/openstackclient/volume/v1/volume.py b/openstackclient/volume/v1/volume.py deleted file mode 100644 index 198b890f68..0000000000 --- a/openstackclient/volume/v1/volume.py +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright 2012-2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Volume v1 Volume action implementations""" - -import argparse -import functools -import logging - -from cliff import columns as cliff_columns -from osc_lib.cli import format_columns -from osc_lib.cli import parseractions -from osc_lib.command import command -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class AttachmentsColumn(cliff_columns.FormattableColumn): - """Formattable column for attachments column. - - Unlike the parent FormattableColumn class, the initializer of the - class takes server_cache as the second argument. - osc_lib.utils.get_item_properties instantiate cliff FormattableColumn - object with a single parameter "column value", so you need to pass - a partially initialized class like - ``functools.partial(AttachmentsColumn, server_cache)``. - """ - - def __init__(self, value, server_cache=None): - super(AttachmentsColumn, self).__init__(value) - self._server_cache = server_cache or {} - - def human_readable(self): - """Return a formatted string of a volume's attached instances - - :rtype: a string of formatted instances - """ - - msg = '' - for attachment in self._value: - server = attachment['server_id'] - if server in self._server_cache.keys(): - server = self._server_cache[server].name - device = attachment['device'] - msg += 'Attached to %s on %s ' % (server, device) - return msg - - -def _check_size_arg(args): - """Check whether --size option is required or not. - - Require size parameter only in case when snapshot or source - volume is not specified. - """ - - if ((args.snapshot or args.source) - is None and args.size is None): - msg = _("--size is a required option if snapshot " - "or source volume is not specified.") - raise exceptions.CommandError(msg) - - -class CreateVolume(command.ShowOne): - _description = _("Create new volume") - - def get_parser(self, prog_name): - parser = super(CreateVolume, self).get_parser(prog_name) - parser.add_argument( - 'name', - metavar='', - help=_('Volume name'), - ) - parser.add_argument( - '--size', - metavar='', - type=int, - help=_("Volume size in GB (Required unless --snapshot or " - "--source is specified)"), - ) - parser.add_argument( - '--type', - metavar='', - help=_("Set the type of volume"), - ) - source_group = parser.add_mutually_exclusive_group() - source_group.add_argument( - '--image', - metavar='', - help=_('Use as source of volume (name or ID)'), - ) - source_group.add_argument( - '--snapshot', - metavar='', - help=_('Use as source of volume (name or ID)'), - ) - source_group.add_argument( - '--snapshot-id', - metavar='', - help=argparse.SUPPRESS, - ) - source_group.add_argument( - '--source', - metavar='', - help=_('Volume to clone (name or ID)'), - ) - parser.add_argument( - '--description', - metavar='', - help=_('Volume description'), - ) - parser.add_argument( - '--user', - metavar='', - help=_('Specify an alternate user (name or ID)'), - ) - parser.add_argument( - '--project', - metavar='', - help=_('Specify an alternate project (name or ID)'), - ) - parser.add_argument( - '--availability-zone', - metavar='', - help=_('Create volume in '), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Set a property on this volume ' - '(repeat option to set multiple properties)'), - ) - bootable_group = parser.add_mutually_exclusive_group() - bootable_group.add_argument( - "--bootable", - action="store_true", - help=_("Mark volume as bootable") - ) - bootable_group.add_argument( - "--non-bootable", - action="store_true", - help=_("Mark volume as non-bootable (default)") - ) - readonly_group = parser.add_mutually_exclusive_group() - readonly_group.add_argument( - "--read-only", - action="store_true", - help=_("Set volume to read-only access mode") - ) - readonly_group.add_argument( - "--read-write", - action="store_true", - help=_("Set volume to read-write access mode (default)") - ) - - return parser - - def take_action(self, parsed_args): - _check_size_arg(parsed_args) - identity_client = self.app.client_manager.identity - image_client = self.app.client_manager.image - volume_client = self.app.client_manager.volume - - source_volume = None - if parsed_args.source: - source_volume = utils.find_resource( - volume_client.volumes, - parsed_args.source, - ).id - - project = None - if parsed_args.project: - project = utils.find_resource( - identity_client.tenants, - parsed_args.project, - ).id - - user = None - if parsed_args.user: - user = utils.find_resource( - identity_client.users, - parsed_args.user, - ).id - - image = None - if parsed_args.image: - image = utils.find_resource( - image_client.images, - parsed_args.image, - ).id - - snapshot = parsed_args.snapshot or parsed_args.snapshot_id - - volume = volume_client.volumes.create( - parsed_args.size, - snapshot, - source_volume, - parsed_args.name, - parsed_args.description, - parsed_args.type, - user, - project, - parsed_args.availability_zone, - parsed_args.property, - image, - ) - - if parsed_args.bootable or parsed_args.non_bootable: - try: - if utils.wait_for_status( - volume_client.volumes.get, - volume.id, - success_status=['available'], - error_status=['error'], - sleep_time=1 - ): - volume_client.volumes.set_bootable( - volume.id, - parsed_args.bootable - ) - else: - msg = _( - "Volume status is not available for setting boot " - "state" - ) - raise exceptions.CommandError(msg) - except Exception as e: - LOG.error(_("Failed to set volume bootable property: %s"), e) - if parsed_args.read_only or parsed_args.read_write: - try: - if utils.wait_for_status( - volume_client.volumes.get, - volume.id, - success_status=['available'], - error_status=['error'], - sleep_time=1 - ): - volume_client.volumes.update_readonly_flag( - volume.id, - parsed_args.read_only - ) - else: - msg = _( - "Volume status is not available for setting it" - "read only." - ) - raise exceptions.CommandError(msg) - except Exception as e: - LOG.error(_("Failed to set volume read-only access " - "mode flag: %s"), e) - - # Map 'metadata' column to 'properties' - volume._info.update( - { - 'properties': - format_columns.DictColumn(volume._info.pop('metadata')), - 'type': volume._info.pop('volume_type'), - }, - ) - # Replace "display_name" by "name", keep consistent in v1 and v2 - if 'display_name' in volume._info: - volume._info.update({'name': volume._info.pop('display_name')}) - volume_info = utils.backward_compat_col_showone( - volume._info, parsed_args.columns, {'display_name': 'name'} - ) - - return zip(*sorted(volume_info.items())) - - -class DeleteVolume(command.Command): - _description = _("Delete volume(s)") - - def get_parser(self, prog_name): - parser = super(DeleteVolume, self).get_parser(prog_name) - parser.add_argument( - 'volumes', - metavar='', - nargs="+", - help=_('Volume(s) to delete (name or ID)'), - ) - parser.add_argument( - '--force', - action='store_true', - default=False, - help=_('Attempt forced removal of volume(s), regardless of state ' - '(defaults to False)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - result = 0 - - for i in parsed_args.volumes: - try: - volume_obj = utils.find_resource( - volume_client.volumes, i) - if parsed_args.force: - volume_client.volumes.force_delete(volume_obj.id) - else: - volume_client.volumes.delete(volume_obj.id) - except Exception as e: - result += 1 - LOG.error(_("Failed to delete volume with " - "name or ID '%(volume)s': %(e)s"), - {'volume': i, 'e': e}) - - if result > 0: - total = len(parsed_args.volumes) - msg = (_("%(result)s of %(total)s volumes failed " - "to delete.") % {'result': result, 'total': total}) - raise exceptions.CommandError(msg) - - -class ListVolume(command.Lister): - _description = _("List volumes") - - def get_parser(self, prog_name): - parser = super(ListVolume, self).get_parser(prog_name) - parser.add_argument( - '--name', - metavar='', - help=_('Filter results by volume name'), - ) - parser.add_argument( - '--status', - metavar='', - help=_('Filter results by status'), - ) - parser.add_argument( - '--all-projects', - action='store_true', - default=False, - help=_('Include all projects (admin only)'), - ) - parser.add_argument( - '--long', - action='store_true', - default=False, - help=_('List additional fields in output'), - ) - parser.add_argument( - '--offset', - type=int, - action=parseractions.NonNegativeAction, - metavar='', - help=_('Index from which to start listing volumes'), - ) - parser.add_argument( - '--limit', - type=int, - action=parseractions.NonNegativeAction, - metavar='', - help=_('Maximum number of volumes to display'), - ) - return parser - - def take_action(self, parsed_args): - - volume_client = self.app.client_manager.volume - compute_client = self.app.client_manager.compute - - if parsed_args.long: - columns = ( - 'ID', - 'Display Name', - 'Status', - 'Size', - 'Volume Type', - 'Bootable', - 'Attachments', - 'Metadata', - ) - column_headers = ( - 'ID', - 'Name', - 'Status', - 'Size', - 'Type', - 'Bootable', - 'Attached to', - 'Properties', - ) - else: - columns = ( - 'ID', - 'Display Name', - 'Status', - 'Size', - 'Attachments', - ) - column_headers = ( - 'ID', - 'Name', - 'Status', - 'Size', - 'Attached to', - ) - - # Cache the server list - server_cache = {} - try: - for s in compute_client.servers.list(): - server_cache[s.id] = s - except Exception: - # Just forget it if there's any trouble - pass - AttachmentsColumnWithCache = functools.partial( - AttachmentsColumn, server_cache=server_cache) - - search_opts = { - 'all_tenants': parsed_args.all_projects, - 'display_name': parsed_args.name, - 'status': parsed_args.status, - } - - if parsed_args.offset: - search_opts['offset'] = parsed_args.offset - - data = volume_client.volumes.list( - search_opts=search_opts, - limit=parsed_args.limit, - ) - column_headers = utils.backward_compat_col_lister( - column_headers, parsed_args.columns, {'Display Name': 'Name'}) - - return (column_headers, - (utils.get_item_properties( - s, columns, - formatters={'Metadata': format_columns.DictColumn, - 'Attachments': AttachmentsColumnWithCache}, - ) for s in data)) - - -class MigrateVolume(command.Command): - _description = _("Migrate volume to a new host") - - def get_parser(self, prog_name): - parser = super(MigrateVolume, self).get_parser(prog_name) - parser.add_argument( - 'volume', - metavar="", - help=_("Volume to migrate (name or ID)") - ) - parser.add_argument( - '--host', - metavar="", - required=True, - help=_("Destination host (takes the form: host@backend-name#pool)") - ) - parser.add_argument( - '--force-host-copy', - action="store_true", - help=_("Enable generic host-based force-migration, " - "which bypasses driver optimizations") - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = utils.find_resource(volume_client.volumes, parsed_args.volume) - volume_client.volumes.migrate_volume(volume.id, parsed_args.host, - parsed_args.force_host_copy,) - - -class SetVolume(command.Command): - _description = _("Set volume properties") - - def get_parser(self, prog_name): - parser = super(SetVolume, self).get_parser(prog_name) - parser.add_argument( - 'volume', - metavar='', - help=_('Volume to modify (name or ID)'), - ) - parser.add_argument( - '--name', - metavar='', - help=_('New volume name'), - ) - parser.add_argument( - '--description', - metavar='', - help=_('New volume description'), - ) - parser.add_argument( - '--size', - metavar='', - type=int, - help=_('Extend volume size in GB'), - ) - parser.add_argument( - "--no-property", - dest="no_property", - action="store_true", - help=_("Remove all properties from " - "(specify both --no-property and --property to " - "remove the current properties before setting " - "new properties.)"), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Set a property on this volume ' - '(repeat option to set multiple properties)'), - ) - bootable_group = parser.add_mutually_exclusive_group() - bootable_group.add_argument( - "--bootable", - action="store_true", - help=_("Mark volume as bootable") - ) - bootable_group.add_argument( - "--non-bootable", - action="store_true", - help=_("Mark volume as non-bootable") - ) - readonly_group = parser.add_mutually_exclusive_group() - readonly_group.add_argument( - "--read-only", - action="store_true", - help=_("Set volume to read-only access mode") - ) - readonly_group.add_argument( - "--read-write", - action="store_true", - help=_("Set volume to read-write access mode") - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = utils.find_resource(volume_client.volumes, parsed_args.volume) - - result = 0 - if parsed_args.size: - try: - if volume.status != 'available': - msg = (_("Volume is in %s state, it must be available " - "before size can be extended") % volume.status) - raise exceptions.CommandError(msg) - if parsed_args.size <= volume.size: - msg = (_("New size must be greater than %s GB") - % volume.size) - raise exceptions.CommandError(msg) - volume_client.volumes.extend(volume.id, parsed_args.size) - except Exception as e: - LOG.error(_("Failed to set volume size: %s"), e) - result += 1 - - if parsed_args.no_property: - try: - volume_client.volumes.delete_metadata( - volume.id, volume.metadata.keys()) - except Exception as e: - LOG.error(_("Failed to clean volume properties: %s"), e) - result += 1 - - if parsed_args.property: - try: - volume_client.volumes.set_metadata( - volume.id, - parsed_args.property) - except Exception as e: - LOG.error(_("Failed to set volume property: %s"), e) - result += 1 - if parsed_args.bootable or parsed_args.non_bootable: - try: - volume_client.volumes.set_bootable( - volume.id, parsed_args.bootable) - except Exception as e: - LOG.error(_("Failed to set volume bootable property: %s"), e) - result += 1 - if parsed_args.read_only or parsed_args.read_write: - try: - volume_client.volumes.update_readonly_flag( - volume.id, - parsed_args.read_only) - except Exception as e: - LOG.error(_("Failed to set volume read-only access " - "mode flag: %s"), e) - result += 1 - kwargs = {} - if parsed_args.name: - kwargs['display_name'] = parsed_args.name - if parsed_args.description: - kwargs['display_description'] = parsed_args.description - if kwargs: - try: - volume_client.volumes.update(volume.id, **kwargs) - except Exception as e: - LOG.error(_("Failed to update volume display name " - "or display description: %s"), e) - result += 1 - - if result > 0: - raise exceptions.CommandError(_("One or more of the " - "set operations failed")) - - -class ShowVolume(command.ShowOne): - _description = _("Show volume details") - - def get_parser(self, prog_name): - parser = super(ShowVolume, self).get_parser(prog_name) - parser.add_argument( - 'volume', - metavar='', - help=_('Volume to display (name or ID)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = utils.find_resource(volume_client.volumes, parsed_args.volume) - # Map 'metadata' column to 'properties' - volume._info.update( - { - 'properties': - format_columns.DictColumn(volume._info.pop('metadata')), - 'type': volume._info.pop('volume_type'), - }, - ) - if 'os-vol-tenant-attr:tenant_id' in volume._info: - volume._info.update( - {'project_id': volume._info.pop( - 'os-vol-tenant-attr:tenant_id')} - ) - # Replace "display_name" by "name", keep consistent in v1 and v2 - if 'display_name' in volume._info: - volume._info.update({'name': volume._info.pop('display_name')}) - - volume_info = utils.backward_compat_col_showone( - volume._info, parsed_args.columns, {'display_name': 'name'} - ) - - return zip(*sorted(volume_info.items())) - - -class UnsetVolume(command.Command): - _description = _("Unset volume properties") - - def get_parser(self, prog_name): - parser = super(UnsetVolume, self).get_parser(prog_name) - parser.add_argument( - 'volume', - metavar='', - help=_('Volume to modify (name or ID)'), - ) - parser.add_argument( - '--property', - metavar='', - action='append', - help=_('Remove a property from volume ' - '(repeat option to remove multiple properties)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = utils.find_resource( - volume_client.volumes, parsed_args.volume) - - if parsed_args.property: - volume_client.volumes.delete_metadata( - volume.id, - parsed_args.property, - ) diff --git a/openstackclient/volume/v1/volume_backup.py b/openstackclient/volume/v1/volume_backup.py deleted file mode 100644 index 790cb46341..0000000000 --- a/openstackclient/volume/v1/volume_backup.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2012-2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Volume v1 Backup action implementations""" - -import copy -import functools -import logging - -from cliff import columns as cliff_columns -from osc_lib.command import command -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class VolumeIdColumn(cliff_columns.FormattableColumn): - """Formattable column for volume ID column. - - Unlike the parent FormattableColumn class, the initializer of the - class takes volume_cache as the second argument. - osc_lib.utils.get_item_properties instantiate cliff FormattableColumn - object with a single parameter "column value", so you need to pass - a partially initialized class like - ``functools.partial(VolumeIdColumn, volume_cache)``. - """ - - def __init__(self, value, volume_cache=None): - super(VolumeIdColumn, self).__init__(value) - self._volume_cache = volume_cache or {} - - def human_readable(self): - """Return a volume name if available - - :rtype: either the volume ID or name - """ - volume_id = self._value - volume = volume_id - if volume_id in self._volume_cache.keys(): - volume = self._volume_cache[volume_id].display_name - return volume - - -class CreateVolumeBackup(command.ShowOne): - _description = _("Create new volume backup") - - def get_parser(self, prog_name): - parser = super(CreateVolumeBackup, self).get_parser(prog_name) - parser.add_argument( - 'volume', - metavar='', - help=_('Volume to backup (name or ID)'), - ) - parser.add_argument( - '--container', - metavar='', - required=False, - help=_('Optional backup container name'), - ) - parser.add_argument( - '--name', - metavar='', - help=_('Name of the backup'), - ) - parser.add_argument( - '--description', - metavar='', - help=_('Description of the backup'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume_id = utils.find_resource(volume_client.volumes, - parsed_args.volume).id - backup = volume_client.backups.create( - volume_id, - parsed_args.container, - parsed_args.name, - parsed_args.description - ) - - backup._info.pop('links') - return zip(*sorted(backup._info.items())) - - -class DeleteVolumeBackup(command.Command): - _description = _("Delete volume backup(s)") - - def get_parser(self, prog_name): - parser = super(DeleteVolumeBackup, self).get_parser(prog_name) - parser.add_argument( - 'backups', - metavar='', - nargs="+", - help=_('Backup(s) to delete (name or ID)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - result = 0 - - for i in parsed_args.backups: - try: - backup_id = utils.find_resource( - volume_client.backups, i).id - volume_client.backups.delete(backup_id) - except Exception as e: - result += 1 - LOG.error(_("Failed to delete backup with " - "name or ID '%(backup)s': %(e)s"), - {'backup': i, 'e': e}) - - if result > 0: - total = len(parsed_args.backups) - msg = (_("%(result)s of %(total)s backups failed " - "to delete.") % {'result': result, 'total': total}) - raise exceptions.CommandError(msg) - - -class ListVolumeBackup(command.Lister): - _description = _("List volume backups") - - def get_parser(self, prog_name): - parser = super(ListVolumeBackup, self).get_parser(prog_name) - parser.add_argument( - '--long', - action='store_true', - default=False, - help=_('List additional fields in output'), - ) - parser.add_argument( - "--name", - metavar="", - help=_("Filters results by the backup name") - ) - parser.add_argument( - "--status", - metavar="", - choices=['creating', 'available', 'deleting', - 'error', 'restoring', 'error_restoring'], - help=_("Filters results by the backup status " - "('creating', 'available', 'deleting', " - "'error', 'restoring' or 'error_restoring')") - ) - parser.add_argument( - "--volume", - metavar="", - help=_("Filters results by the volume which they " - "backup (name or ID)") - ) - parser.add_argument( - '--all-projects', - action='store_true', - default=False, - help=_('Include all projects (admin only)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - - if parsed_args.long: - columns = ['ID', 'Name', 'Description', 'Status', 'Size', - 'Availability Zone', 'Volume ID', 'Container'] - column_headers = copy.deepcopy(columns) - column_headers[6] = 'Volume' - else: - columns = ['ID', 'Name', 'Description', 'Status', 'Size'] - column_headers = columns - - # Cache the volume list - volume_cache = {} - try: - for s in volume_client.volumes.list(): - volume_cache[s.id] = s - except Exception: - # Just forget it if there's any trouble - pass - VolumeIdColumnWithCache = functools.partial(VolumeIdColumn, - volume_cache=volume_cache) - - filter_volume_id = None - if parsed_args.volume: - filter_volume_id = utils.find_resource(volume_client.volumes, - parsed_args.volume).id - search_opts = { - 'name': parsed_args.name, - 'status': parsed_args.status, - 'volume_id': filter_volume_id, - 'all_tenants': parsed_args.all_projects, - } - data = volume_client.backups.list( - search_opts=search_opts, - ) - - return (column_headers, - (utils.get_item_properties( - s, columns, - formatters={'Volume ID': VolumeIdColumnWithCache}, - ) for s in data)) - - -class RestoreVolumeBackup(command.Command): - _description = _("Restore volume backup") - - def get_parser(self, prog_name): - parser = super(RestoreVolumeBackup, self).get_parser(prog_name) - parser.add_argument( - 'backup', - metavar='', - help=_('Backup to restore (name or ID)') - ) - parser.add_argument( - 'volume', - metavar='', - nargs='?', - help=_('Volume to restore to (name or ID) (default to None)') - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - backup = utils.find_resource( - volume_client.backups, parsed_args.backup, - ) - volume_id = None - if parsed_args.volume is not None: - volume_id = utils.find_resource( - volume_client.volumes, - parsed_args.volume, - ).id - return volume_client.restores.restore(backup.id, volume_id) - - -class ShowVolumeBackup(command.ShowOne): - _description = _("Display volume backup details") - - def get_parser(self, prog_name): - parser = super(ShowVolumeBackup, self).get_parser(prog_name) - parser.add_argument( - 'backup', - metavar='', - help=_('Backup to display (name or ID)') - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - backup = utils.find_resource(volume_client.backups, - parsed_args.backup) - backup._info.pop('links') - return zip(*sorted(backup._info.items())) diff --git a/openstackclient/volume/v1/volume_snapshot.py b/openstackclient/volume/v1/volume_snapshot.py deleted file mode 100644 index 2d1f0359fc..0000000000 --- a/openstackclient/volume/v1/volume_snapshot.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright 2012-2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Volume v1 Snapshot action implementations""" - -import copy -import functools -import logging - -from cliff import columns as cliff_columns -from osc_lib.cli import format_columns -from osc_lib.cli import parseractions -from osc_lib.command import command -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class VolumeIdColumn(cliff_columns.FormattableColumn): - """Formattable column for volume ID column. - - Unlike the parent FormattableColumn class, the initializer of the - class takes volume_cache as the second argument. - osc_lib.utils.get_item_properties instantiate cliff FormattableColumn - object with a single parameter "column value", so you need to pass - a partially initialized class like - ``functools.partial(VolumeIdColumn, volume_cache)``. - """ - - def __init__(self, value, volume_cache=None): - super(VolumeIdColumn, self).__init__(value) - self._volume_cache = volume_cache or {} - - def human_readable(self): - """Return a volume name if available - - :rtype: either the volume ID or name - """ - volume_id = self._value - volume = volume_id - if volume_id in self._volume_cache.keys(): - volume = self._volume_cache[volume_id].display_name - return volume - - -class CreateVolumeSnapshot(command.ShowOne): - _description = _("Create new volume snapshot") - - def get_parser(self, prog_name): - parser = super(CreateVolumeSnapshot, self).get_parser(prog_name) - parser.add_argument( - 'snapshot_name', - metavar='', - help=_('Name of the new snapshot'), - ) - parser.add_argument( - '--volume', - metavar='', - help=_('Volume to snapshot (name or ID) ' - '(default is )'), - ) - parser.add_argument( - '--description', - metavar='', - help=_('Description of the snapshot'), - ) - parser.add_argument( - '--force', - dest='force', - action='store_true', - default=False, - help=_('Create a snapshot attached to an instance. ' - 'Default is False'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = parsed_args.volume - if not parsed_args.volume: - volume = parsed_args.snapshot_name - volume_id = utils.find_resource(volume_client.volumes, - volume).id - snapshot = volume_client.volume_snapshots.create( - volume_id, - parsed_args.force, - parsed_args.snapshot_name, - parsed_args.description - ) - - snapshot._info.update( - {'properties': - format_columns.DictColumn(snapshot._info.pop('metadata'))} - ) - - return zip(*sorted(snapshot._info.items())) - - -class DeleteVolumeSnapshot(command.Command): - _description = _("Delete volume snapshot(s)") - - def get_parser(self, prog_name): - parser = super(DeleteVolumeSnapshot, self).get_parser(prog_name) - parser.add_argument( - 'snapshots', - metavar='', - nargs="+", - help=_('Snapshot(s) to delete (name or ID)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - result = 0 - - for i in parsed_args.snapshots: - try: - snapshot_id = utils.find_resource( - volume_client.volume_snapshots, i).id - volume_client.volume_snapshots.delete(snapshot_id) - except Exception as e: - result += 1 - LOG.error(_("Failed to delete snapshot with " - "name or ID '%(snapshot)s': %(e)s"), - {'snapshot': i, 'e': e}) - - if result > 0: - total = len(parsed_args.snapshots) - msg = (_("%(result)s of %(total)s snapshots failed " - "to delete.") % {'result': result, 'total': total}) - raise exceptions.CommandError(msg) - - -class ListVolumeSnapshot(command.Lister): - _description = _("List volume snapshots") - - def get_parser(self, prog_name): - parser = super(ListVolumeSnapshot, self).get_parser(prog_name) - parser.add_argument( - '--all-projects', - action='store_true', - default=False, - help=_('Include all projects (admin only)'), - ) - parser.add_argument( - '--long', - action='store_true', - default=False, - help=_('List additional fields in output'), - ) - parser.add_argument( - '--name', - metavar='', - default=None, - help=_('Filters results by a name.') - ) - parser.add_argument( - '--status', - metavar='', - choices=['available', 'error', 'creating', 'deleting', - 'error_deleting'], - help=_("Filters results by a status. " - "('available', 'error', 'creating', 'deleting'" - " or 'error_deleting')") - ) - parser.add_argument( - '--volume', - metavar='', - default=None, - help=_('Filters results by a volume (name or ID).') - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - - if parsed_args.long: - columns = ['ID', 'Display Name', 'Display Description', 'Status', - 'Size', 'Created At', 'Volume ID', 'Metadata'] - column_headers = copy.deepcopy(columns) - column_headers[6] = 'Volume' - column_headers[7] = 'Properties' - else: - columns = ['ID', 'Display Name', 'Display Description', 'Status', - 'Size'] - column_headers = copy.deepcopy(columns) - - # Always update Name and Description - column_headers[1] = 'Name' - column_headers[2] = 'Description' - - # Cache the volume list - volume_cache = {} - try: - for s in volume_client.volumes.list(): - volume_cache[s.id] = s - except Exception: - # Just forget it if there's any trouble - pass - VolumeIdColumnWithCache = functools.partial(VolumeIdColumn, - volume_cache=volume_cache) - - volume_id = None - if parsed_args.volume: - volume_id = utils.find_resource( - volume_client.volumes, parsed_args.volume).id - - search_opts = { - 'all_tenants': parsed_args.all_projects, - 'display_name': parsed_args.name, - 'status': parsed_args.status, - 'volume_id': volume_id, - } - - data = volume_client.volume_snapshots.list( - search_opts=search_opts) - return (column_headers, - (utils.get_item_properties( - s, columns, - formatters={'Metadata': format_columns.DictColumn, - 'Volume ID': VolumeIdColumnWithCache}, - ) for s in data)) - - -class SetVolumeSnapshot(command.Command): - _description = _("Set volume snapshot properties") - - def get_parser(self, prog_name): - parser = super(SetVolumeSnapshot, self).get_parser(prog_name) - parser.add_argument( - 'snapshot', - metavar='', - help=_('Snapshot to modify (name or ID)') - ) - parser.add_argument( - '--name', - metavar='', - help=_('New snapshot name') - ) - parser.add_argument( - '--description', - metavar='', - help=_('New snapshot description') - ) - parser.add_argument( - "--no-property", - dest="no_property", - action="store_true", - help=_("Remove all properties from " - "(specify both --no-property and --property to " - "remove the current properties before setting " - "new properties.)"), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Property to add/change for this snapshot ' - '(repeat option to set multiple properties)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - snapshot = utils.find_resource(volume_client.volume_snapshots, - parsed_args.snapshot) - - result = 0 - if parsed_args.no_property: - try: - key_list = snapshot.metadata.keys() - volume_client.volume_snapshots.delete_metadata( - snapshot.id, - list(key_list), - ) - except Exception as e: - LOG.error(_("Failed to clean snapshot properties: %s"), e) - result += 1 - - if parsed_args.property: - try: - volume_client.volume_snapshots.set_metadata( - snapshot.id, parsed_args.property) - except Exception as e: - LOG.error(_("Failed to set snapshot property: %s"), e) - result += 1 - - kwargs = {} - if parsed_args.name: - kwargs['display_name'] = parsed_args.name - if parsed_args.description: - kwargs['display_description'] = parsed_args.description - if kwargs: - try: - snapshot.update(**kwargs) - except Exception as e: - LOG.error(_("Failed to update snapshot display name " - "or display description: %s"), e) - result += 1 - - if result > 0: - raise exceptions.CommandError(_("One or more of the " - "set operations failed")) - - -class ShowVolumeSnapshot(command.ShowOne): - _description = _("Display volume snapshot details") - - def get_parser(self, prog_name): - parser = super(ShowVolumeSnapshot, self).get_parser(prog_name) - parser.add_argument( - 'snapshot', - metavar='', - help=_('Snapshot to display (name or ID)') - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - snapshot = utils.find_resource(volume_client.volume_snapshots, - parsed_args.snapshot) - - snapshot._info.update( - {'properties': - format_columns.DictColumn(snapshot._info.pop('metadata'))} - ) - - return zip(*sorted(snapshot._info.items())) - - -class UnsetVolumeSnapshot(command.Command): - _description = _("Unset volume snapshot properties") - - def get_parser(self, prog_name): - parser = super(UnsetVolumeSnapshot, self).get_parser(prog_name) - parser.add_argument( - 'snapshot', - metavar='', - help=_('Snapshot to modify (name or ID)'), - ) - parser.add_argument( - '--property', - metavar='', - action='append', - help=_('Property to remove from snapshot ' - '(repeat option to remove multiple properties)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - snapshot = utils.find_resource( - volume_client.volume_snapshots, parsed_args.snapshot) - - if parsed_args.property: - volume_client.volume_snapshots.delete_metadata( - snapshot.id, - parsed_args.property, - ) diff --git a/openstackclient/volume/v1/volume_type.py b/openstackclient/volume/v1/volume_type.py deleted file mode 100644 index c584943e10..0000000000 --- a/openstackclient/volume/v1/volume_type.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright 2012-2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Volume v1 Type action implementations""" - -import functools -import logging - -from cliff import columns as cliff_columns -from osc_lib.cli import format_columns -from osc_lib.cli import parseractions -from osc_lib.command import command -from osc_lib import exceptions -from osc_lib import utils - -from openstackclient.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class EncryptionInfoColumn(cliff_columns.FormattableColumn): - """Formattable column for encryption info column. - - Unlike the parent FormattableColumn class, the initializer of the - class takes encryption_data as the second argument. - osc_lib.utils.get_item_properties instantiate cliff FormattableColumn - object with a single parameter "column value", so you need to pass - a partially initialized class like - ``functools.partial(EncryptionInfoColumn encryption_data)``. - """ - - def __init__(self, value, encryption_data=None): - super(EncryptionInfoColumn, self).__init__(value) - self._encryption_data = encryption_data or {} - - def _get_encryption_info(self): - type_id = self._value - return self._encryption_data.get(type_id) - - def human_readable(self): - encryption_info = self._get_encryption_info() - if encryption_info: - return utils.format_dict(encryption_info) - else: - return '-' - - def machine_readable(self): - return self._get_encryption_info() - - -def _create_encryption_type(volume_client, volume_type, parsed_args): - if not parsed_args.encryption_provider: - msg = _("'--encryption-provider' should be specified while " - "creating a new encryption type") - raise exceptions.CommandError(msg) - # set the default of control location while creating - control_location = 'front-end' - if parsed_args.encryption_control_location: - control_location = parsed_args.encryption_control_location - body = { - 'provider': parsed_args.encryption_provider, - 'cipher': parsed_args.encryption_cipher, - 'key_size': parsed_args.encryption_key_size, - 'control_location': control_location - } - encryption = volume_client.volume_encryption_types.create( - volume_type, body) - return encryption - - -class CreateVolumeType(command.ShowOne): - _description = _("Create new volume type") - - def get_parser(self, prog_name): - parser = super(CreateVolumeType, self).get_parser(prog_name) - parser.add_argument( - 'name', - metavar='', - help=_('Volume type name'), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Set a property on this volume type ' - '(repeat option to set multiple properties)'), - ) - # TODO(Huanxuan Ao): Add choices for each "--encryption-*" option. - parser.add_argument( - '--encryption-provider', - metavar='', - help=_('Set the encryption provider format for ' - 'this volume type (e.g "luks" or "plain") (admin only) ' - '(This option is required when setting encryption type ' - 'of a volume. Consider using other encryption options ' - 'such as: "--encryption-cipher", "--encryption-key-size" ' - 'and "--encryption-control-location")'), - ) - parser.add_argument( - '--encryption-cipher', - metavar='', - help=_('Set the encryption algorithm or mode for this ' - 'volume type (e.g "aes-xts-plain64") (admin only)'), - ) - parser.add_argument( - '--encryption-key-size', - metavar='', - type=int, - help=_('Set the size of the encryption key of this ' - 'volume type (e.g "128" or "256") (admin only)'), - ) - parser.add_argument( - '--encryption-control-location', - metavar='', - choices=['front-end', 'back-end'], - help=_('Set the notional service where the encryption is ' - 'performed ("front-end" or "back-end") (admin only) ' - '(The default value for this option is "front-end" ' - 'when setting encryption type of a volume. Consider ' - 'using other encryption options such as: ' - '"--encryption-cipher", "--encryption-key-size" and ' - '"--encryption-provider")'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume_type = volume_client.volume_types.create(parsed_args.name) - volume_type._info.pop('extra_specs') - if parsed_args.property: - result = volume_type.set_keys(parsed_args.property) - volume_type._info.update( - {'properties': format_columns.DictColumn(result)}) - if (parsed_args.encryption_provider or - parsed_args.encryption_cipher or - parsed_args.encryption_key_size or - parsed_args.encryption_control_location): - try: - # create new encryption - encryption = _create_encryption_type( - volume_client, volume_type, parsed_args) - except Exception as e: - LOG.error(_("Failed to set encryption information for this " - "volume type: %s"), e) - # add encryption info in result - encryption._info.pop("volume_type_id", None) - volume_type._info.update( - {'encryption': format_columns.DictColumn(encryption._info)}) - volume_type._info.pop("os-volume-type-access:is_public", None) - - return zip(*sorted(volume_type._info.items())) - - -class DeleteVolumeType(command.Command): - _description = _("Delete volume type(s)") - - def get_parser(self, prog_name): - parser = super(DeleteVolumeType, self).get_parser(prog_name) - parser.add_argument( - 'volume_types', - metavar='', - nargs='+', - help=_('Volume type(s) to delete (name or ID)'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - result = 0 - - for volume_type in parsed_args.volume_types: - try: - vol_type = utils.find_resource(volume_client.volume_types, - volume_type) - - volume_client.volume_types.delete(vol_type) - except Exception as e: - result += 1 - LOG.error(_("Failed to delete volume type with " - "name or ID '%(volume_type)s': %(e)s") - % {'volume_type': volume_type, 'e': e}) - - if result > 0: - total = len(parsed_args.volume_types) - msg = (_("%(result)s of %(total)s volume types failed " - "to delete.") % {'result': result, 'total': total}) - raise exceptions.CommandError(msg) - - -class ListVolumeType(command.Lister): - _description = _("List volume types") - - def get_parser(self, prog_name): - parser = super(ListVolumeType, self).get_parser(prog_name) - parser.add_argument( - '--long', - action='store_true', - default=False, - help=_('List additional fields in output') - ) - parser.add_argument( - "--encryption-type", - action="store_true", - help=_("Display encryption information for each volume type " - "(admin only)"), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - if parsed_args.long: - columns = ['ID', 'Name', 'Is Public', 'Extra Specs'] - column_headers = ['ID', 'Name', 'Is Public', 'Properties'] - else: - columns = ['ID', 'Name', 'Is Public'] - column_headers = ['ID', 'Name', 'Is Public'] - data = volume_client.volume_types.list() - - formatters = {'Extra Specs': format_columns.DictColumn} - - if parsed_args.encryption_type: - encryption = {} - for d in volume_client.volume_encryption_types.list(): - volume_type_id = d._info['volume_type_id'] - # remove some redundant information - del_key = [ - 'deleted', - 'created_at', - 'updated_at', - 'deleted_at', - 'volume_type_id' - ] - for key in del_key: - d._info.pop(key, None) - # save the encryption information with their volume type ID - encryption[volume_type_id] = d._info - # We need to get volume type ID, then show encryption - # information according to the ID, so use "id" to keep - # difference to the real "ID" column. - columns += ['id'] - column_headers += ['Encryption'] - - _EncryptionInfoColumn = functools.partial( - EncryptionInfoColumn, encryption_data=encryption) - formatters['id'] = _EncryptionInfoColumn - - return (column_headers, - (utils.get_item_properties( - s, columns, - formatters=formatters, - ) for s in data)) - - -class SetVolumeType(command.Command): - _description = _("Set volume type properties") - - def get_parser(self, prog_name): - parser = super(SetVolumeType, self).get_parser(prog_name) - parser.add_argument( - 'volume_type', - metavar='', - help=_('Volume type to modify (name or ID)'), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - help=_('Set a property on this volume type ' - '(repeat option to set multiple properties)'), - ) - # TODO(Huanxuan Ao): Add choices for each "--encryption-*" option. - parser.add_argument( - '--encryption-provider', - metavar='', - help=_('Set the encryption provider format for ' - 'this volume type (e.g "luks" or "plain") (admin only) ' - '(This option is required when setting encryption type ' - 'of a volume. Consider using other encryption options ' - 'such as: "--encryption-cipher", "--encryption-key-size" ' - 'and "--encryption-control-location")'), - ) - parser.add_argument( - '--encryption-cipher', - metavar='', - help=_('Set the encryption algorithm or mode for this ' - 'volume type (e.g "aes-xts-plain64") (admin only)'), - ) - parser.add_argument( - '--encryption-key-size', - metavar='', - type=int, - help=_('Set the size of the encryption key of this ' - 'volume type (e.g "128" or "256") (admin only)'), - ) - parser.add_argument( - '--encryption-control-location', - metavar='', - choices=['front-end', 'back-end'], - help=_('Set the notional service where the encryption is ' - 'performed ("front-end" or "back-end") (admin only) ' - '(The default value for this option is "front-end" ' - 'when setting encryption type of a volume. Consider ' - 'using other encryption options such as: ' - '"--encryption-cipher", "--encryption-key-size" and ' - '"--encryption-provider")'), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume_type = utils.find_resource( - volume_client.volume_types, parsed_args.volume_type) - - result = 0 - if parsed_args.property: - try: - volume_type.set_keys(parsed_args.property) - except Exception as e: - LOG.error(_("Failed to set volume type property: %s"), e) - result += 1 - - if (parsed_args.encryption_provider or - parsed_args.encryption_cipher or - parsed_args.encryption_key_size or - parsed_args.encryption_control_location): - try: - _create_encryption_type( - volume_client, volume_type, parsed_args) - except Exception as e: - LOG.error(_("Failed to set encryption information for this " - "volume type: %s"), e) - result += 1 - - if result > 0: - raise exceptions.CommandError(_("Command Failed: One or more of" - " the operations failed")) - - -class ShowVolumeType(command.ShowOne): - _description = _("Display volume type details") - - def get_parser(self, prog_name): - parser = super(ShowVolumeType, self).get_parser(prog_name) - parser.add_argument( - "volume_type", - metavar="", - help=_("Volume type to display (name or ID)") - ) - parser.add_argument( - "--encryption-type", - action="store_true", - help=_("Display encryption information of this volume type " - "(admin only)"), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume_type = utils.find_resource( - volume_client.volume_types, parsed_args.volume_type) - properties = format_columns.DictColumn( - volume_type._info.pop('extra_specs')) - volume_type._info.update({'properties': properties}) - if parsed_args.encryption_type: - # show encryption type information for this volume type - try: - encryption = volume_client.volume_encryption_types.get( - volume_type.id) - encryption._info.pop("volume_type_id", None) - volume_type._info.update( - {'encryption': - format_columns.DictColumn(encryption._info)}) - except Exception as e: - LOG.error(_("Failed to display the encryption information " - "of this volume type: %s"), e) - volume_type._info.pop("os-volume-type-access:is_public", None) - return zip(*sorted(volume_type._info.items())) - - -class UnsetVolumeType(command.Command): - _description = _("Unset volume type properties") - - def get_parser(self, prog_name): - parser = super(UnsetVolumeType, self).get_parser(prog_name) - parser.add_argument( - 'volume_type', - metavar='', - help=_('Volume type to modify (name or ID)'), - ) - parser.add_argument( - '--property', - metavar='', - action='append', - help=_('Remove a property from this volume type ' - '(repeat option to remove multiple properties)'), - ) - parser.add_argument( - "--encryption-type", - action="store_true", - help=_("Remove the encryption type for this volume type " - "(admin only)"), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume_type = utils.find_resource( - volume_client.volume_types, - parsed_args.volume_type, - ) - - result = 0 - if parsed_args.property: - try: - volume_type.unset_keys(parsed_args.property) - except Exception as e: - LOG.error(_("Failed to unset volume type property: %s"), e) - result += 1 - if parsed_args.encryption_type: - try: - volume_client.volume_encryption_types.delete(volume_type) - except Exception as e: - LOG.error(_("Failed to remove the encryption type for this " - "volume type: %s"), e) - result += 1 - - if result > 0: - raise exceptions.CommandError(_("Command Failed: One or more of" - " the operations failed")) diff --git a/openstackclient/volume/v2/backup_record.py b/openstackclient/volume/v2/backup_record.py index 0d3af64168..93492f87f7 100644 --- a/openstackclient/volume/v2/backup_record.py +++ b/openstackclient/volume/v2/backup_record.py @@ -16,9 +16,9 @@ import logging -from osc_lib.command import command from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -26,17 +26,19 @@ class ExportBackupRecord(command.ShowOne): - _description = _("""Export volume backup details. + _description = _( + """Export volume backup details. Backup information can be imported into a new service instance to be able to -restore.""") +restore.""" + ) def get_parser(self, prog_name): - parser = super(ExportBackupRecord, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "backup", metavar="", - help=_("Backup to export (name or ID)") + help=_("Backup to export (name or ID)"), ) return parser @@ -55,29 +57,31 @@ def take_action(self, parsed_args): class ImportBackupRecord(command.ShowOne): - _description = _("""Import volume backup details. + _description = _( + """Import volume backup details. Exported backup details contain the metadata necessary to restore to a new or -rebuilt service instance""") +rebuilt service instance""" + ) def get_parser(self, prog_name): - parser = super(ImportBackupRecord, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "backup_service", metavar="", - help=_("Backup service containing the backup.") + help=_("Backup service containing the backup."), ) parser.add_argument( "backup_metadata", metavar="", - help=_("Encoded backup metadata from export.") + help=_("Encoded backup metadata from export."), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume backup_data = volume_client.backups.import_record( - parsed_args.backup_service, - parsed_args.backup_metadata) + parsed_args.backup_service, parsed_args.backup_metadata + ) backup_data.pop('links', None) return zip(*sorted(backup_data.items())) diff --git a/openstackclient/volume/v2/consistency_group.py b/openstackclient/volume/v2/consistency_group.py index 77da6f64c7..4910bb129e 100644 --- a/openstackclient/volume/v2/consistency_group.py +++ b/openstackclient/volume/v2/consistency_group.py @@ -18,10 +18,10 @@ import logging from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -33,14 +33,14 @@ def _find_volumes(parsed_args_volumes, volume_client): uuid = '' for volume in parsed_args_volumes: try: - volume_id = utils.find_resource( - volume_client.volumes, volume).id + volume_id = utils.find_resource(volume_client.volumes, volume).id uuid += volume_id + ',' except Exception as e: result += 1 - LOG.error(_("Failed to find volume with " - "name or ID '%(volume)s':%(e)s") - % {'volume': volume, 'e': e}) + LOG.error( + _("Failed to find volume with name or ID '%(volume)s':%(e)s") + % {'volume': volume, 'e': e} + ) return result, uuid @@ -49,7 +49,7 @@ class AddVolumeToConsistencyGroup(command.Command): _description = _("Add volume(s) to consistency group") def get_parser(self, prog_name): - parser = super(AddVolumeToConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consistency_group', metavar="", @@ -59,8 +59,10 @@ def get_parser(self, prog_name): 'volumes', metavar='', nargs='+', - help=_('Volume(s) to add to (name or ID) ' - '(repeat option to add multiple volumes)'), + help=_( + 'Volume(s) to add to (name or ID) ' + '(repeat option to add multiple volumes)' + ), ) return parser @@ -70,23 +72,26 @@ def take_action(self, parsed_args): if result > 0: total = len(parsed_args.volumes) - LOG.error(_("%(result)s of %(total)s volumes failed " - "to add.") % {'result': result, 'total': total}) + LOG.error( + _("%(result)s of %(total)s volumes failed to add.") + % {'result': result, 'total': total} + ) if add_uuid: add_uuid = add_uuid.rstrip(',') consistency_group_id = utils.find_resource( - volume_client.consistencygroups, - parsed_args.consistency_group).id + volume_client.consistencygroups, parsed_args.consistency_group + ).id volume_client.consistencygroups.update( - consistency_group_id, add_volumes=add_uuid) + consistency_group_id, add_volumes=add_uuid + ) class CreateConsistencyGroup(command.ShowOne): _description = _("Create new consistency group.") def get_parser(self, prog_name): - parser = super(CreateConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "name", metavar="", @@ -143,18 +148,20 @@ def take_action(self, parsed_args): volume_client = self.app.client_manager.volume if parsed_args.volume_type: volume_type_id = utils.find_resource( - volume_client.volume_types, - parsed_args.volume_type).id + volume_client.volume_types, parsed_args.volume_type + ).id consistency_group = volume_client.consistencygroups.create( volume_type_id, name=parsed_args.name, description=parsed_args.description, - availability_zone=parsed_args.availability_zone + availability_zone=parsed_args.availability_zone, ) else: if parsed_args.availability_zone: - msg = _("'--availability-zone' option will not work " - "if creating consistency group from source") + msg = _( + "'--availability-zone' option will not work " + "if creating consistency group from source" + ) LOG.warning(msg) consistency_group_id = None @@ -186,7 +193,7 @@ class DeleteConsistencyGroup(command.Command): _description = _("Delete consistency group(s).") def get_parser(self, prog_name): - parser = super(DeleteConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consistency_groups', metavar='', @@ -208,19 +215,26 @@ def take_action(self, parsed_args): for i in parsed_args.consistency_groups: try: consistency_group_id = utils.find_resource( - volume_client.consistencygroups, i).id + volume_client.consistencygroups, i + ).id volume_client.consistencygroups.delete( - consistency_group_id, parsed_args.force) + consistency_group_id, parsed_args.force + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete consistency group with " - "name or ID '%(consistency_group)s':%(e)s") - % {'consistency_group': i, 'e': e}) + LOG.error( + _( + "Failed to delete consistency group with " + "name or ID '%(consistency_group)s':%(e)s" + ) + % {'consistency_group': i, 'e': e} + ) if result > 0: total = len(parsed_args.consistency_groups) - msg = (_("%(result)s of %(total)s consistency groups failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s consistency groups failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -228,45 +242,58 @@ class ListConsistencyGroup(command.Lister): _description = _("List consistency groups.") def get_parser(self, prog_name): - parser = super(ListConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', action="store_true", - help=_('Show details for all projects. Admin only. ' - '(defaults to False)') + help=_( + 'Show details for all projects. Admin only. ' + '(defaults to False)' + ), ) parser.add_argument( '--long', action="store_true", - help=_('List additional fields in output') + help=_('List additional fields in output'), ) return parser def take_action(self, parsed_args): if parsed_args.long: - columns = ['ID', 'Status', 'Availability Zone', - 'Name', 'Description', 'Volume Types'] + columns = [ + 'ID', + 'Status', + 'Availability Zone', + 'Name', + 'Description', + 'Volume Types', + ] else: columns = ['ID', 'Status', 'Name'] volume_client = self.app.client_manager.volume consistency_groups = volume_client.consistencygroups.list( detailed=True, - search_opts={'all_tenants': parsed_args.all_projects} + search_opts={'all_tenants': parsed_args.all_projects}, ) - return (columns, ( - utils.get_item_properties( - s, columns, - formatters={'Volume Types': format_columns.ListColumn}) - for s in consistency_groups)) + return ( + columns, + ( + utils.get_item_properties( + s, + columns, + formatters={'Volume Types': format_columns.ListColumn}, + ) + for s in consistency_groups + ), + ) class RemoveVolumeFromConsistencyGroup(command.Command): _description = _("Remove volume(s) from consistency group") def get_parser(self, prog_name): - parser = \ - super(RemoveVolumeFromConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consistency_group', metavar="", @@ -276,8 +303,10 @@ def get_parser(self, prog_name): 'volumes', metavar='', nargs='+', - help=_('Volume(s) to remove from (name or ID) ' - '(repeat option to remove multiple volumes)'), + help=_( + 'Volume(s) to remove from (name or ID) ' + '(repeat option to remove multiple volumes)' + ), ) return parser @@ -287,27 +316,30 @@ def take_action(self, parsed_args): if result > 0: total = len(parsed_args.volumes) - LOG.error(_("%(result)s of %(total)s volumes failed " - "to remove.") % {'result': result, 'total': total}) + LOG.error( + _("%(result)s of %(total)s volumes failed to remove.") + % {'result': result, 'total': total} + ) if remove_uuid: remove_uuid = remove_uuid.rstrip(',') consistency_group_id = utils.find_resource( - volume_client.consistencygroups, - parsed_args.consistency_group).id + volume_client.consistencygroups, parsed_args.consistency_group + ).id volume_client.consistencygroups.update( - consistency_group_id, remove_volumes=remove_uuid) + consistency_group_id, remove_volumes=remove_uuid + ) class SetConsistencyGroup(command.Command): _description = _("Set consistency group properties") def get_parser(self, prog_name): - parser = super(SetConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'consistency_group', metavar='', - help=_('Consistency group to modify (name or ID)') + help=_('Consistency group to modify (name or ID)'), ) parser.add_argument( '--name', @@ -330,27 +362,28 @@ def take_action(self, parsed_args): kwargs['description'] = parsed_args.description if kwargs: consistency_group_id = utils.find_resource( - volume_client.consistencygroups, - parsed_args.consistency_group).id + volume_client.consistencygroups, parsed_args.consistency_group + ).id volume_client.consistencygroups.update( - consistency_group_id, **kwargs) + consistency_group_id, **kwargs + ) class ShowConsistencyGroup(command.ShowOne): _description = _("Display consistency group details.") def get_parser(self, prog_name): - parser = super(ShowConsistencyGroup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "consistency_group", metavar="", - help=_("Consistency group to display (name or ID)") + help=_("Consistency group to display (name or ID)"), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume consistency_group = utils.find_resource( - volume_client.consistencygroups, - parsed_args.consistency_group) + volume_client.consistencygroups, parsed_args.consistency_group + ) return zip(*sorted(consistency_group._info.items())) diff --git a/openstackclient/volume/v2/consistency_group_snapshot.py b/openstackclient/volume/v2/consistency_group_snapshot.py index 7d5ba82fb2..23c3f1034d 100644 --- a/openstackclient/volume/v2/consistency_group_snapshot.py +++ b/openstackclient/volume/v2/consistency_group_snapshot.py @@ -16,10 +16,10 @@ import logging -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -30,24 +30,25 @@ class CreateConsistencyGroupSnapshot(command.ShowOne): _description = _("Create new consistency group snapshot.") def get_parser(self, prog_name): - parser = super( - CreateConsistencyGroupSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "snapshot_name", metavar="", nargs="?", - help=_("Name of new consistency group snapshot (default to None)") + help=_("Name of new consistency group snapshot (default to None)"), ) parser.add_argument( "--consistency-group", metavar="", - help=_("Consistency group to snapshot (name or ID) " - "(default to be the same as )") + help=_( + "Consistency group to snapshot (name or ID) " + "(default to be the same as )" + ), ) parser.add_argument( "--description", metavar="", - help=_("Description of this consistency group snapshot") + help=_("Description of this consistency group snapshot"), ) return parser @@ -59,8 +60,8 @@ def take_action(self, parsed_args): # will be the same as the new consistency group snapshot name consistency_group = parsed_args.snapshot_name consistency_group_id = utils.find_resource( - volume_client.consistencygroups, - consistency_group).id + volume_client.consistencygroups, consistency_group + ).id consistency_group_snapshot = volume_client.cgsnapshots.create( consistency_group_id, name=parsed_args.snapshot_name, @@ -74,13 +75,12 @@ class DeleteConsistencyGroupSnapshot(command.Command): _description = _("Delete consistency group snapshot(s).") def get_parser(self, prog_name): - parser = super( - DeleteConsistencyGroupSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "consistency_group_snapshot", metavar="", nargs="+", - help=_("Consistency group snapshot(s) to delete (name or ID)") + help=_("Consistency group snapshot(s) to delete (name or ID)"), ) return parser @@ -90,20 +90,27 @@ def take_action(self, parsed_args): for snapshot in parsed_args.consistency_group_snapshot: try: - snapshot_id = utils.find_resource(volume_client.cgsnapshots, - snapshot).id + snapshot_id = utils.find_resource( + volume_client.cgsnapshots, snapshot + ).id volume_client.cgsnapshots.delete(snapshot_id) except Exception as e: result += 1 - LOG.error(_("Failed to delete consistency group snapshot " - "with name or ID '%(snapshot)s': %(e)s") - % {'snapshot': snapshot, 'e': e}) + LOG.error( + _( + "Failed to delete consistency group snapshot " + "with name or ID '%(snapshot)s': %(e)s" + ) + % {'snapshot': snapshot, 'e': e} + ) if result > 0: total = len(parsed_args.consistency_group_snapshot) - msg = (_("%(result)s of %(total)s consistency group snapshots " - "failed to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s consistency group snapshots " + "failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -111,38 +118,51 @@ class ListConsistencyGroupSnapshot(command.Lister): _description = _("List consistency group snapshots.") def get_parser(self, prog_name): - parser = super( - ListConsistencyGroupSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', action="store_true", - help=_('Show detail for all projects (admin only) ' - '(defaults to False)') + help=_( + 'Show detail for all projects (admin only) (defaults to False)' + ), ) parser.add_argument( '--long', action="store_true", - help=_('List additional fields in output') + help=_('List additional fields in output'), ) parser.add_argument( '--status', metavar="", - choices=['available', 'error', 'creating', 'deleting', - 'error_deleting'], - help=_('Filters results by a status ("available", "error", ' - '"creating", "deleting" or "error_deleting")') + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'error_deleting', + ], + help=_( + 'Filters results by a status ("available", "error", ' + '"creating", "deleting" or "error_deleting")' + ), ) parser.add_argument( '--consistency-group', metavar="", - help=_('Filters results by a consistency group (name or ID)') + help=_('Filters results by a consistency group (name or ID)'), ) return parser def take_action(self, parsed_args): if parsed_args.long: - columns = ['ID', 'Status', 'ConsistencyGroup ID', - 'Name', 'Description', 'Created At'] + columns = [ + 'ID', + 'Status', + 'ConsistencyGroup ID', + 'Name', + 'Description', + 'Created At', + ] else: columns = ['ID', 'Status', 'Name'] volume_client = self.app.client_manager.volume @@ -162,28 +182,30 @@ def take_action(self, parsed_args): search_opts=search_opts, ) - return (columns, ( - utils.get_item_properties( - s, columns) - for s in consistency_group_snapshots)) + return ( + columns, + ( + utils.get_item_properties(s, columns) + for s in consistency_group_snapshots + ), + ) class ShowConsistencyGroupSnapshot(command.ShowOne): _description = _("Display consistency group snapshot details") def get_parser(self, prog_name): - parser = super( - ShowConsistencyGroupSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "consistency_group_snapshot", metavar="", - help=_("Consistency group snapshot to display (name or ID)") + help=_("Consistency group snapshot to display (name or ID)"), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume consistency_group_snapshot = utils.find_resource( - volume_client.cgsnapshots, - parsed_args.consistency_group_snapshot) + volume_client.cgsnapshots, parsed_args.consistency_group_snapshot + ) return zip(*sorted(consistency_group_snapshot._info.items())) diff --git a/openstackclient/volume/v2/qos_specs.py b/openstackclient/volume/v2/qos_specs.py index e6e6b9f8b7..39aa99eb42 100644 --- a/openstackclient/volume/v2/qos_specs.py +++ b/openstackclient/volume/v2/qos_specs.py @@ -19,10 +19,10 @@ from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -33,7 +33,7 @@ class AssociateQos(command.Command): _description = _("Associate a QoS specification to a volume type") def get_parser(self, prog_name): - parser = super(AssociateQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_spec', metavar='', @@ -48,10 +48,12 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) - volume_type = utils.find_resource(volume_client.volume_types, - parsed_args.volume_type) + qos_spec = utils.find_resource( + volume_client.qos_specs, parsed_args.qos_spec + ) + volume_type = utils.find_resource( + volume_client.volume_types, parsed_args.volume_type + ) volume_client.qos_specs.associate(qos_spec.id, volume_type.id) @@ -60,7 +62,7 @@ class CreateQos(command.ShowOne): _description = _("Create new QoS specification") def get_parser(self, prog_name): - parser = super(CreateQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'name', metavar='', @@ -72,16 +74,22 @@ def get_parser(self, prog_name): metavar='', choices=consumer_choices, default='both', - help=(_('Consumer of the QoS. Valid consumers: %s ' - "(defaults to 'both')") % - utils.format_list(consumer_choices)) + help=( + _( + 'Consumer of the QoS. Valid consumers: %s ' + "(defaults to 'both')" + ) + % utils.format_list(consumer_choices) + ), ) parser.add_argument( '--property', metavar='', action=parseractions.KeyValueAction, - help=_('Set a QoS specification property ' - '(repeat option to set multiple properties)'), + help=_( + 'Set a QoS specification property ' + '(repeat option to set multiple properties)' + ), ) return parser @@ -96,8 +104,11 @@ def take_action(self, parsed_args): qos_spec = volume_client.qos_specs.create(parsed_args.name, specs) qos_spec._info.update( - {'properties': - format_columns.DictColumn(qos_spec._info.pop('specs'))} + { + 'properties': format_columns.DictColumn( + qos_spec._info.pop('specs') + ) + } ) return zip(*sorted(qos_spec._info.items())) @@ -106,7 +117,7 @@ class DeleteQos(command.Command): _description = _("Delete QoS specification") def get_parser(self, prog_name): - parser = super(DeleteQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_specs', metavar='', @@ -117,7 +128,7 @@ def get_parser(self, prog_name): '--force', action='store_true', default=False, - help=_("Allow to delete in-use QoS specification(s)") + help=_("Allow to delete in-use QoS specification(s)"), ) return parser @@ -131,14 +142,19 @@ def take_action(self, parsed_args): volume_client.qos_specs.delete(qos_spec.id, parsed_args.force) except Exception as e: result += 1 - LOG.error(_("Failed to delete QoS specification with " - "name or ID '%(qos)s': %(e)s") - % {'qos': i, 'e': e}) + LOG.error( + _( + "Failed to delete QoS specification with " + "name or ID '%(qos)s': %(e)s" + ) + % {'qos': i, 'e': e} + ) if result > 0: total = len(parsed_args.qos_specs) - msg = (_("%(result)s of %(total)s QoS specifications failed" - " to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s QoS specifications failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -146,7 +162,7 @@ class DisassociateQos(command.Command): _description = _("Disassociate a QoS specification from a volume type") def get_parser(self, prog_name): - parser = super(DisassociateQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_spec', metavar='', @@ -169,12 +185,14 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) + qos_spec = utils.find_resource( + volume_client.qos_specs, parsed_args.qos_spec + ) if parsed_args.volume_type: - volume_type = utils.find_resource(volume_client.volume_types, - parsed_args.volume_type) + volume_type = utils.find_resource( + volume_client.volume_types, parsed_args.volume_type + ) volume_client.qos_specs.disassociate(qos_spec.id, volume_type.id) elif parsed_args.all: volume_client.qos_specs.disassociate_all(qos_spec.id) @@ -204,53 +222,97 @@ def take_action(self, parsed_args): raise display_columns = ( - 'ID', 'Name', 'Consumer', 'Associations', 'Properties') + 'ID', + 'Name', + 'Consumer', + 'Associations', + 'Properties', + ) columns = ('ID', 'Name', 'Consumer', 'Associations', 'Specs') - return (display_columns, - (utils.get_dict_properties( - s._info, columns, + return ( + display_columns, + ( + utils.get_dict_properties( + s._info, + columns, formatters={ 'Specs': format_columns.DictColumn, - 'Associations': format_columns.ListColumn + 'Associations': format_columns.ListColumn, }, - ) for s in qos_specs_list)) + ) + for s in qos_specs_list + ), + ) class SetQos(command.Command): _description = _("Set QoS specification properties") def get_parser(self, prog_name): - parser = super(SetQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_spec', metavar='', help=_('QoS specification to modify (name or ID)'), ) + parser.add_argument( + '--no-property', + dest='no_property', + action='store_true', + help=_( + 'Remove all properties from ' + '(specify both --no-property and --property to remove the ' + 'current properties before setting new properties)' + ), + ) parser.add_argument( '--property', metavar='', action=parseractions.KeyValueAction, - help=_('Property to add or modify for this QoS specification ' - '(repeat option to set multiple properties)'), + help=_( + 'Property to add or modify for this QoS specification ' + '(repeat option to set multiple properties)' + ), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) + qos_spec = utils.find_resource( + volume_client.qos_specs, parsed_args.qos_spec + ) + + result = 0 + if parsed_args.no_property: + try: + key_list = list(qos_spec._info['specs'].keys()) + volume_client.qos_specs.unset_keys(qos_spec.id, key_list) + except Exception as e: + LOG.error(_("Failed to clean qos properties: %s"), e) + result += 1 if parsed_args.property: - volume_client.qos_specs.set_keys(qos_spec.id, - parsed_args.property) + try: + volume_client.qos_specs.set_keys( + qos_spec.id, + parsed_args.property, + ) + except Exception as e: + LOG.error(_("Failed to set qos property: %s"), e) + result += 1 + + if result > 0: + raise exceptions.CommandError( + _("One or more of the set operations failed") + ) class ShowQos(command.ShowOne): _description = _("Display QoS specification details") def get_parser(self, prog_name): - parser = super(ShowQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_spec', metavar='', @@ -260,19 +322,25 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) + qos_spec = utils.find_resource( + volume_client.qos_specs, parsed_args.qos_spec + ) qos_associations = volume_client.qos_specs.get_associations(qos_spec) if qos_associations: - associations = [association.name - for association in qos_associations] - qos_spec._info.update({ - 'associations': format_columns.ListColumn(associations) - }) + associations = [ + association.name for association in qos_associations + ] + qos_spec._info.update( + {'associations': format_columns.ListColumn(associations)} + ) qos_spec._info.update( - {'properties': - format_columns.DictColumn(qos_spec._info.pop('specs'))}) + { + 'properties': format_columns.DictColumn( + qos_spec._info.pop('specs') + ) + } + ) return zip(*sorted(qos_spec._info.items())) @@ -281,7 +349,7 @@ class UnsetQos(command.Command): _description = _("Unset QoS specification properties") def get_parser(self, prog_name): - parser = super(UnsetQos, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'qos_spec', metavar='', @@ -292,16 +360,20 @@ def get_parser(self, prog_name): metavar='', action='append', default=[], - help=_('Property to remove from the QoS specification. ' - '(repeat option to unset multiple properties)'), + help=_( + 'Property to remove from the QoS specification. ' + '(repeat option to unset multiple properties)' + ), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - qos_spec = utils.find_resource(volume_client.qos_specs, - parsed_args.qos_spec) + qos_spec = utils.find_resource( + volume_client.qos_specs, parsed_args.qos_spec + ) if parsed_args.property: - volume_client.qos_specs.unset_keys(qos_spec.id, - parsed_args.property) + volume_client.qos_specs.unset_keys( + qos_spec.id, parsed_args.property + ) diff --git a/openstackclient/volume/v2/service.py b/openstackclient/volume/v2/service.py index d468c6ff1e..7777e7e638 100644 --- a/openstackclient/volume/v2/service.py +++ b/openstackclient/volume/v2/service.py @@ -14,10 +14,10 @@ """Service action implementations""" -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -25,106 +25,115 @@ class ListService(command.Lister): _description = _("List service command") def get_parser(self, prog_name): - parser = super(ListService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--host", metavar="", - help=_("List services on specified host (name only)") + help=_("List services on specified host (name only)"), ) parser.add_argument( "--service", metavar="", - help=_("List only specified service (name only)") + help=_("List only specified service (name only)"), ) parser.add_argument( "--long", action="store_true", default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser def take_action(self, parsed_args): - service_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume + + columns: tuple[str, ...] = ( + "binary", + "host", + "availability_zone", + "status", + "state", + "updated_at", + ) + column_names: tuple[str, ...] = ( + "Binary", + "Host", + "Zone", + "Status", + "State", + "Updated At", + ) if parsed_args.long: - columns = [ - "Binary", - "Host", - "Zone", - "Status", - "State", - "Updated At", - "Disabled Reason" - ] - else: - columns = [ - "Binary", - "Host", - "Zone", - "Status", - "State", - "Updated At" - ] - - data = service_client.services.list(parsed_args.host, - parsed_args.service) - return (columns, - (utils.get_item_properties( - s, columns, - ) for s in data)) + columns += ("disabled_reason",) + column_names += ("Disabled Reason",) + + data = volume_client.services( + host=parsed_args.host, binary=parsed_args.service + ) + return ( + column_names, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) class SetService(command.Command): _description = _("Set volume service properties") def get_parser(self, prog_name): - parser = super(SetService, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "host", metavar="", - help=_("Name of host") + help=_("Name of host"), ) parser.add_argument( "service", metavar="", - help=_("Name of service (Binary name)") + help=_("Name of service (Binary name)"), ) enabled_group = parser.add_mutually_exclusive_group() enabled_group.add_argument( - "--enable", - action="store_true", - help=_("Enable volume service") + "--enable", action="store_true", help=_("Enable volume service") ) enabled_group.add_argument( - "--disable", - action="store_true", - help=_("Disable volume service") + "--disable", action="store_true", help=_("Disable volume service") ) parser.add_argument( "--disable-reason", metavar="", - help=_("Reason for disabling the service " - "(should be used with --disable option)") + help=_( + "Reason for disabling the service " + "(should be used with --disable option)" + ), ) return parser def take_action(self, parsed_args): if parsed_args.disable_reason and not parsed_args.disable: - msg = _("Cannot specify option --disable-reason without " - "--disable specified.") + msg = _( + "Cannot specify option --disable-reason without " + "--disable specified." + ) raise exceptions.CommandError(msg) - service_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume + + service = volume_client.find_service( + parsed_args.service, ignore_missing=False, host=parsed_args.host + ) + if parsed_args.enable: - service_client.services.enable( - parsed_args.host, parsed_args.service) + service.enable(volume_client) + if parsed_args.disable: - if parsed_args.disable_reason: - service_client.services.disable_log_reason( - parsed_args.host, - parsed_args.service, - parsed_args.disable_reason) - else: - service_client.services.disable( - parsed_args.host, parsed_args.service) + service.disable( + volume_client, + reason=parsed_args.disable_reason, + ) diff --git a/openstackclient/volume/v2/volume.py b/openstackclient/volume/v2/volume.py index a5e5a6703b..61cce04f7b 100644 --- a/openstackclient/volume/v2/volume.py +++ b/openstackclient/volume/v2/volume.py @@ -18,14 +18,19 @@ import copy import functools import logging +import typing as ty from cliff import columns as cliff_columns +from openstack.block_storage.v2 import volume as _volume +from openstack import exceptions as sdk_exceptions from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient.api import volume_v2 +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -33,7 +38,30 @@ LOG = logging.getLogger(__name__) -class AttachmentsColumn(cliff_columns.FormattableColumn): +class KeyValueHintAction(argparse.Action): + """Uses KeyValueAction or KeyValueAppendAction based on the given key""" + + APPEND_KEYS = ('same_host', 'different_host') + + def __init__(self, *args, **kwargs): + self._key_value_action = parseractions.KeyValueAction(*args, **kwargs) + self._key_value_append_action = parseractions.KeyValueAppendAction( + *args, **kwargs + ) + super().__init__(*args, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + if values.startswith(self.APPEND_KEYS): + self._key_value_append_action( + parser, namespace, values, option_string=option_string + ) + else: + self._key_value_action( + parser, namespace, values, option_string=option_string + ) + + +class AttachmentsColumn(cliff_columns.FormattableColumn[list[ty.Any]]): """Formattable column for attachments column. Unlike the parent FormattableColumn class, the initializer of the @@ -45,7 +73,7 @@ class takes server_cache as the second argument. """ def __init__(self, value, server_cache=None): - super(AttachmentsColumn, self).__init__(value) + super().__init__(value) self._server_cache = server_cache or {} def human_readable(self): @@ -60,27 +88,69 @@ def human_readable(self): if server in self._server_cache.keys(): server = self._server_cache[server].name device = attachment['device'] - msg += 'Attached to %s on %s ' % (server, device) + msg += f'Attached to {server} on {device} ' return msg -def _check_size_arg(args): - """Check whether --size option is required or not. - - Require size parameter only in case when snapshot or source - volume is not specified. - """ +def _format_volume(volume: _volume.Volume) -> dict[str, ty.Any]: + # Some columns returned by openstacksdk should not be shown because they're + # either irrelevant or duplicates + ignored_columns = { + # computed columns + 'location', + # create-only columns + 'OS-SCH-HNT:scheduler_hints', + 'imageRef', + # unnecessary columns + 'links', + } + optional_columns = { + # only present if part of a consistency group + 'consistencygroup_id', + # only present if there are image properties associated + 'volume_image_metadata', + } + + info = volume.to_dict(original_names=True) + data = {} + for key, value in info.items(): + if key in ignored_columns: + continue + + if key in optional_columns: + if info[key] is None: + continue + + data[key] = value + + data.update( + { + 'properties': format_columns.DictColumn(data.pop('metadata')), + 'type': data.pop('volume_type'), + } + ) - if ((args.snapshot or args.source or args.backup) - is None and args.size is None): - msg = _("--size is a required option if snapshot, backup " - "or source volume are not specified.") - raise exceptions.CommandError(msg) + return data class CreateVolume(command.ShowOne): _description = _("Create new volume") + @staticmethod + def _check_size_arg(args): + """Check whether --size option is required or not. + + Require size parameter only in case when snapshot or source + volume is not specified. + """ + + if (args.snapshot or args.source) is None and args.size is None: + msg = _( + "--size is a required option if --snapshot or --source are " + "not specified" + ) + raise exceptions.CommandError(msg) + def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( @@ -93,8 +163,10 @@ def get_parser(self, prog_name): "--size", metavar="", type=int, - help=_("Volume size in GB (required unless --snapshot, " - "--source or --backup is specified)"), + help=_( + "Volume size in GB (required unless --snapshot or " + "--source specified)" + ), ) parser.add_argument( "--type", @@ -117,12 +189,6 @@ def get_parser(self, prog_name): metavar="", help=_("Volume to clone (name or ID)"), ) - source_group.add_argument( - "--backup", - metavar="", - help=_("Restore backup to a volume (name or ID) " - "(supported by --os-volume-api-version 3.47 or later)"), - ) source_group.add_argument( "--source-replicated", metavar="", @@ -147,81 +213,91 @@ def get_parser(self, prog_name): "--property", metavar="", action=parseractions.KeyValueAction, - help=_("Set a property to this volume " - "(repeat option to set multiple properties)"), + dest="properties", + help=_( + "Set a property to this volume " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( "--hint", metavar="", - action=parseractions.KeyValueAction, - help=_("Arbitrary scheduler hint key-value pairs to help boot " - "an instance (repeat option to set multiple hints)"), + action=KeyValueHintAction, + help=_( + "Arbitrary scheduler hint key-value pairs to help creating " + "a volume. Repeat the option to set multiple hints. " + "'same_host' and 'different_host' get values appended when " + "repeated, all other keys take the last given value" + ), ) bootable_group = parser.add_mutually_exclusive_group() bootable_group.add_argument( "--bootable", action="store_true", - help=_("Mark volume as bootable") + dest="bootable", + default=None, + help=_("Mark volume as bootable"), ) bootable_group.add_argument( "--non-bootable", - action="store_true", - help=_("Mark volume as non-bootable (default)") + action="store_false", + dest="bootable", + default=None, + help=_("Mark volume as non-bootable (default)"), ) readonly_group = parser.add_mutually_exclusive_group() readonly_group.add_argument( "--read-only", action="store_true", - help=_("Set volume to read-only access mode") + dest="read_only", + default=None, + help=_("Set volume to read-only access mode"), ) readonly_group.add_argument( "--read-write", - action="store_true", - help=_("Set volume to read-write access mode (default)") + action="store_false", + dest="read_only", + default=None, + help=_("Set volume to read-write access mode (default)"), ) return parser def take_action(self, parsed_args): - _check_size_arg(parsed_args) + self._check_size_arg(parsed_args) # size is validated in the above call to # _check_size_arg where we check that size # should be passed if we are not creating a - # volume from snapshot, backup or source volume + # volume from snapshot or source volume size = parsed_args.size - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume image_client = self.app.client_manager.image - if parsed_args.backup and not ( - volume_client.api_version.matches('3.47')): - msg = _("--os-volume-api-version 3.47 or greater is required " - "to create a volume from backup.") - raise exceptions.CommandError(msg) - source_volume = None if parsed_args.source: - source_volume_obj = utils.find_resource( - volume_client.volumes, - parsed_args.source) + source_volume_obj = volume_client.find_volume( + parsed_args.source, ignore_missing=False + ) source_volume = source_volume_obj.id size = max(size or 0, source_volume_obj.size) consistency_group = None if parsed_args.consistency_group: - consistency_group = utils.find_resource( - volume_client.consistencygroups, - parsed_args.consistency_group).id + consistency_group = volume_v2.find_consistency_group( + volume_client, parsed_args.consistency_group + )['id'] image = None if parsed_args.image: - image = image_client.find_image(parsed_args.image, - ignore_missing=False).id + image = image_client.find_image( + parsed_args.image, ignore_missing=False + ).id snapshot = None if parsed_args.snapshot: - snapshot_obj = utils.find_resource( - volume_client.volume_snapshots, - parsed_args.snapshot) + snapshot_obj = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) snapshot = snapshot_obj.id # Cinder requires a value for size when creating a volume # even if creating from a snapshot. Cinder will create the @@ -231,63 +307,51 @@ def take_action(self, parsed_args): # snapshot size. size = max(size or 0, snapshot_obj.size) - backup = None - if parsed_args.backup: - backup_obj = utils.find_resource( - volume_client.backups, - parsed_args.backup) - backup = backup_obj.id - # As above - size = max(size or 0, backup_obj.size) - - volume = volume_client.volumes.create( + volume = volume_client.create_volume( size=size, snapshot_id=snapshot, name=parsed_args.name, description=parsed_args.description, volume_type=parsed_args.type, availability_zone=parsed_args.availability_zone, - metadata=parsed_args.property, - imageRef=image, - source_volid=source_volume, - consistencygroup_id=consistency_group, + metadata=parsed_args.properties, + image_id=image, + source_volume_id=source_volume, + consistency_group_id=consistency_group, scheduler_hints=parsed_args.hint, - backup_id=backup, ) - if parsed_args.bootable or parsed_args.non_bootable: + if parsed_args.bootable is not None: try: if utils.wait_for_status( - volume_client.volumes.get, + volume_client.get_volume, volume.id, success_status=['available'], error_status=['error'], - sleep_time=1 + sleep_time=1, ): - volume_client.volumes.set_bootable( - volume.id, - parsed_args.bootable + volume_client.set_volume_bootable_status( + volume, parsed_args.bootable ) else: msg = _( - "Volume status is not available for setting boot " - "state" + "Volume status is not available for setting boot state" ) raise exceptions.CommandError(msg) except Exception as e: LOG.error(_("Failed to set volume bootable property: %s"), e) - if parsed_args.read_only or parsed_args.read_write: + + if parsed_args.read_only is not None: try: if utils.wait_for_status( - volume_client.volumes.get, + volume_client.get_volume, volume.id, success_status=['available'], error_status=['error'], - sleep_time=1 + sleep_time=1, ): - volume_client.volumes.update_readonly_flag( - volume.id, - parsed_args.read_only + volume_client.set_volume_readonly( + volume, parsed_args.read_only ) else: msg = _( @@ -296,70 +360,74 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) except Exception as e: - LOG.error(_("Failed to set volume read-only access " - "mode flag: %s"), e) - - # Remove key links from being displayed - volume._info.update( - { - 'properties': - format_columns.DictColumn(volume._info.pop('metadata')), - 'type': volume._info.pop('volume_type') - } - ) - volume._info.pop("links", None) - return zip(*sorted(volume._info.items())) + LOG.error( + _("Failed to set volume read-only access mode flag: %s"), + e, + ) + + data = _format_volume(volume) + return zip(*sorted(data.items())) class DeleteVolume(command.Command): _description = _("Delete volume(s)") def get_parser(self, prog_name): - parser = super(DeleteVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "volumes", metavar="", nargs="+", - help=_("Volume(s) to delete (name or ID)") + help=_("Volume(s) to delete (name or ID)"), ) group = parser.add_mutually_exclusive_group() group.add_argument( "--force", action="store_true", - help=_("Attempt forced removal of volume(s), regardless of state " - "(defaults to False)") + help=_( + "Attempt forced removal of volume(s), regardless of state " + "(defaults to False)" + ), ) group.add_argument( "--purge", action="store_true", - help=_("Remove any snapshots along with volume(s) " - "(defaults to False)") + help=_( + "Remove any snapshots along with volume(s) (defaults to False)" + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume result = 0 - for i in parsed_args.volumes: + for volume in parsed_args.volumes: try: - volume_obj = utils.find_resource( - volume_client.volumes, i) - if parsed_args.force: - volume_client.volumes.force_delete(volume_obj.id) - else: - volume_client.volumes.delete(volume_obj.id, - cascade=parsed_args.purge) + volume_obj = volume_client.find_volume( + volume, ignore_missing=False + ) + volume_client.delete_volume( + volume_obj.id, + force=parsed_args.force, + cascade=parsed_args.purge, + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete volume with " - "name or ID '%(volume)s': %(e)s"), - {'volume': i, 'e': e}) + LOG.error( + _( + "Failed to delete volume with " + "name or ID '%(volume)s': %(e)s" + ), + {'volume': volume, 'e': e}, + ) if result > 0: total = len(parsed_args.volumes) - msg = (_("%(result)s of %(total)s volumes failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s volumes failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -367,17 +435,17 @@ class ListVolume(command.Lister): _description = _("List volumes") def get_parser(self, prog_name): - parser = super(ListVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--project', metavar='', - help=_('Filter results by project (name or ID) (admin only)') + help=_('Filter results by project (name or ID) (admin only)'), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( '--user', metavar='', - help=_('Filter results by user (name or ID) (admin only)') + help=_('Filter results by user (name or ID) (admin only)'), ) identity_common.add_user_domain_option_to_parser(parser) parser.add_argument( @@ -402,22 +470,10 @@ def get_parser(self, prog_name): default=False, help=_('List additional fields in output'), ) - parser.add_argument( - '--marker', - metavar='', - help=_('The last volume ID of the previous page'), - ) - parser.add_argument( - '--limit', - type=int, - action=parseractions.NonNegativeAction, - metavar='', - help=_('Maximum number of volumes to display'), - ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume identity_client = self.app.client_manager.identity @@ -447,30 +503,19 @@ def take_action(self, parsed_args): column_headers = copy.deepcopy(columns) column_headers[4] = 'Attached to' - # Cache the server list - server_cache = {} - try: - compute_client = self.app.client_manager.compute - for s in compute_client.servers.list(): - server_cache[s.id] = s - except Exception: - # Just forget it if there's any trouble - pass - AttachmentsColumnWithCache = functools.partial( - AttachmentsColumn, server_cache=server_cache) - project_id = None if parsed_args.project: project_id = identity_common.find_project( identity_client, parsed_args.project, - parsed_args.project_domain).id + parsed_args.project_domain, + ).id user_id = None if parsed_args.user: - user_id = identity_common.find_user(identity_client, - parsed_args.user, - parsed_args.user_domain).id + user_id = identity_common.find_user( + identity_client, parsed_args.user, parsed_args.user_domain + ).id # set value of 'all_tenants' when using project option all_projects = bool(parsed_args.project) or parsed_args.all_projects @@ -488,61 +533,103 @@ def take_action(self, parsed_args): marker=parsed_args.marker, limit=parsed_args.limit, ) + + do_server_list = False + + for vol in data: + if vol.status == 'in-use': + do_server_list = True + break + + # Cache the server list + server_cache = {} + if do_server_list: + try: + compute_client = self.app.client_manager.compute + for s in compute_client.servers(): + server_cache[s.id] = s + except sdk_exceptions.SDKException: # noqa: S110 + # Just forget it if there's any trouble + pass + AttachmentsColumnWithCache = functools.partial( + AttachmentsColumn, server_cache=server_cache + ) + column_headers = utils.backward_compat_col_lister( - column_headers, parsed_args.columns, {'Display Name': 'Name'}) + column_headers, parsed_args.columns, {'Display Name': 'Name'} + ) - return (column_headers, - (utils.get_item_properties( - s, columns, - formatters={'Metadata': format_columns.DictColumn, - 'Attachments': AttachmentsColumnWithCache}, - ) for s in data)) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={ + 'Metadata': format_columns.DictColumn, + 'Attachments': AttachmentsColumnWithCache, + }, + ) + for s in data + ), + ) class MigrateVolume(command.Command): _description = _("Migrate volume to a new host") def get_parser(self, prog_name): - parser = super(MigrateVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'volume', metavar="", - help=_("Volume to migrate (name or ID)") + help=_("Volume to migrate (name or ID)"), ) parser.add_argument( '--host', metavar="", required=True, - help=_("Destination host (takes the form: host@backend-name#pool)") + help=_( + "Destination host (takes the form: host@backend-name#pool)" + ), ) parser.add_argument( '--force-host-copy', action="store_true", - help=_("Enable generic host-based force-migration, " - "which bypasses driver optimizations") + help=_( + "Enable generic host-based force-migration, " + "which bypasses driver optimizations" + ), ) parser.add_argument( '--lock-volume', action="store_true", - help=_("If specified, the volume state will be locked " - "and will not allow a migration to be aborted " - "(possibly by another operation)") + help=_( + "If specified, the volume state will be locked " + "and will not allow a migration to be aborted " + "(possibly by another operation)" + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = utils.find_resource(volume_client.volumes, parsed_args.volume) - volume_client.volumes.migrate_volume(volume.id, parsed_args.host, - parsed_args.force_host_copy, - parsed_args.lock_volume,) + volume_client = self.app.client_manager.sdk_connection.volume + volume = volume_client.find_volume( + parsed_args.volume, ignore_missing=False + ) + volume_client.migrate_volume( + volume.id, + host=parsed_args.host, + force_host_copy=parsed_args.force_host_copy, + lock_volume=parsed_args.lock_volume, + ) class SetVolume(command.Command): _description = _("Set volume properties") def get_parser(self, prog_name): - parser = super(SetVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'volume', metavar='', @@ -568,56 +655,78 @@ def get_parser(self, prog_name): "--no-property", dest="no_property", action="store_true", - help=_("Remove all properties from " - "(specify both --no-property and --property to " - "remove the current properties before setting " - "new properties.)"), + help=_( + "Remove all properties from " + "(specify both --no-property and --property to " + "remove the current properties before setting " + "new properties.)" + ), ) parser.add_argument( '--property', metavar='', action=parseractions.KeyValueAction, - help=_('Set a property on this volume ' - '(repeat option to set multiple properties)'), + dest="properties", + help=_( + 'Set a property on this volume ' + '(repeat option to set multiple properties)' + ), ) parser.add_argument( '--image-property', metavar='', action=parseractions.KeyValueAction, - help=_('Set an image property on this volume ' - '(repeat option to set multiple image properties)'), + dest="image_properties", + help=_( + 'Set an image property on this volume ' + '(repeat option to set multiple image properties)' + ), ) parser.add_argument( "--state", metavar="", - choices=['available', 'error', 'creating', 'deleting', - 'in-use', 'attaching', 'detaching', 'error_deleting', - 'maintenance'], - help=_('New volume state ("available", "error", "creating", ' - '"deleting", "in-use", "attaching", "detaching", ' - '"error_deleting" or "maintenance") (admin only) ' - '(This option simply changes the state of the volume ' - 'in the database with no regard to actual status, ' - 'exercise caution when using)'), + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'in-use', + 'attaching', + 'detaching', + 'error_deleting', + 'maintenance', + ], + help=_( + 'New volume state ("available", "error", "creating", ' + '"deleting", "in-use", "attaching", "detaching", ' + '"error_deleting" or "maintenance") (admin only) ' + '(This option simply changes the state of the volume ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), ) attached_group = parser.add_mutually_exclusive_group() attached_group.add_argument( "--attached", action="store_true", - help=_('Set volume attachment status to "attached" ' - '(admin only) ' - '(This option simply changes the state of the volume ' - 'in the database with no regard to actual status, ' - 'exercise caution when using)'), + help=_( + 'Set volume attachment status to "attached" ' + '(admin only) ' + '(This option simply changes the state of the volume ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), ) attached_group.add_argument( "--detached", action="store_true", - help=_('Set volume attachment status to "detached" ' - '(admin only) ' - '(This option simply changes the state of the volume ' - 'in the database with no regard to actual status, ' - 'exercise caution when using)'), + help=_( + 'Set volume attachment status to "detached" ' + '(admin only) ' + '(This option simply changes the state of the volume ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), ) parser.add_argument( '--type', @@ -628,31 +737,47 @@ def get_parser(self, prog_name): '--retype-policy', metavar='', choices=['never', 'on-demand'], - help=_('Migration policy while re-typing volume ' - '("never" or "on-demand", default is "never" ) ' - '(available only when --type option is specified)'), + help=argparse.SUPPRESS, + ) + parser.add_argument( + '--migration-policy', + metavar='', + choices=['never', 'on-demand'], + help=_( + 'Migration policy while re-typing volume ' + '("never" or "on-demand", default is "never" ) ' + '(available only when --type option is specified)' + ), ) bootable_group = parser.add_mutually_exclusive_group() bootable_group.add_argument( "--bootable", action="store_true", - help=_("Mark volume as bootable") + dest="bootable", + default=None, + help=_("Mark volume as bootable"), ) bootable_group.add_argument( "--non-bootable", - action="store_true", - help=_("Mark volume as non-bootable") + action="store_false", + dest="bootable", + default=None, + help=_("Mark volume as non-bootable"), ) readonly_group = parser.add_mutually_exclusive_group() readonly_group.add_argument( "--read-only", action="store_true", - help=_("Set volume to read-only access mode") + dest="read_only", + default=None, + help=_("Set volume to read-only access mode"), ) readonly_group.add_argument( "--read-write", - action="store_true", - help=_("Set volume to read-write access mode") + action="store_false", + dest="read_only", + default=None, + help=_("Set volume to read-write access mode"), ) return parser @@ -661,17 +786,30 @@ def take_action(self, parsed_args): volume = utils.find_resource(volume_client.volumes, parsed_args.volume) result = 0 + if parsed_args.retype_policy: + msg = _( + "The '--retype-policy' option has been deprecated in favor " + "of '--migration-policy' option. The '--retype-policy' option " + "will be removed in a future release. Please use " + "'--migration-policy' instead." + ) + self.log.warning(msg) + if parsed_args.size: try: if parsed_args.size <= volume.size: - msg = (_("New size must be greater than %s GB") - % volume.size) + msg = ( + _("New size must be greater than %s GB") % volume.size + ) raise exceptions.CommandError(msg) - if volume.status != 'available' and \ - not volume_client.api_version.matches('3.42'): - - msg = (_("Volume is in %s state, it must be available " - "before size can be extended") % volume.status) + if volume.status != 'available': + msg = ( + _( + "Volume is in %s state, it must be available " + "before size can be extended" + ) + % volume.status + ) raise exceptions.CommandError(msg) volume_client.volumes.extend(volume.id, parsed_args.size) except Exception as e: @@ -681,86 +819,104 @@ def take_action(self, parsed_args): if parsed_args.no_property: try: volume_client.volumes.delete_metadata( - volume.id, volume.metadata.keys()) + volume.id, volume.metadata.keys() + ) except Exception as e: LOG.error(_("Failed to clean volume properties: %s"), e) result += 1 - if parsed_args.property: + if parsed_args.properties: try: volume_client.volumes.set_metadata( - volume.id, parsed_args.property) + volume.id, parsed_args.properties + ) except Exception as e: - LOG.error(_("Failed to set volume property: %s"), e) + LOG.error(_("Failed to set volume properties: %s"), e) result += 1 - if parsed_args.image_property: + + if parsed_args.image_properties: try: volume_client.volumes.set_image_metadata( - volume.id, parsed_args.image_property) + volume.id, parsed_args.image_properties + ) except Exception as e: - LOG.error(_("Failed to set image property: %s"), e) + LOG.error(_("Failed to set image properties: %s"), e) result += 1 + if parsed_args.state: try: - volume_client.volumes.reset_state( - volume.id, parsed_args.state) + volume_client.volumes.reset_state(volume.id, parsed_args.state) except Exception as e: LOG.error(_("Failed to set volume state: %s"), e) result += 1 + if parsed_args.attached: try: volume_client.volumes.reset_state( - volume.id, state=None, - attach_status="attached") + volume.id, state=None, attach_status="attached" + ) except Exception as e: LOG.error(_("Failed to set volume attach-status: %s"), e) result += 1 + if parsed_args.detached: try: volume_client.volumes.reset_state( - volume.id, state=None, - attach_status="detached") + volume.id, state=None, attach_status="detached" + ) except Exception as e: LOG.error(_("Failed to set volume attach-status: %s"), e) result += 1 - if parsed_args.bootable or parsed_args.non_bootable: + + if parsed_args.bootable is not None: try: volume_client.volumes.set_bootable( - volume.id, parsed_args.bootable) + volume.id, parsed_args.bootable + ) except Exception as e: LOG.error(_("Failed to set volume bootable property: %s"), e) result += 1 - if parsed_args.read_only or parsed_args.read_write: + + if parsed_args.read_only is not None: try: volume_client.volumes.update_readonly_flag( - volume.id, - parsed_args.read_only) + volume.id, parsed_args.read_only + ) except Exception as e: - LOG.error(_("Failed to set volume read-only access " - "mode flag: %s"), e) + LOG.error( + _("Failed to set volume read-only access mode flag: %s"), + e, + ) result += 1 + + policy = parsed_args.migration_policy or parsed_args.retype_policy if parsed_args.type: # get the migration policy migration_policy = 'never' - if parsed_args.retype_policy: - migration_policy = parsed_args.retype_policy + if policy: + migration_policy = policy try: # find the volume type volume_type = utils.find_resource( - volume_client.volume_types, - parsed_args.type) + volume_client.volume_types, parsed_args.type + ) # reset to the new volume type volume_client.volumes.retype( - volume.id, - volume_type.id, - migration_policy) + volume.id, volume_type.id, migration_policy + ) except Exception as e: LOG.error(_("Failed to set volume type: %s"), e) result += 1 - elif parsed_args.retype_policy: - # If the "--retype-policy" is specified without "--type" - LOG.warning(_("'--retype-policy' option will not work " - "without '--type' option")) + elif policy: + # If the "--migration-policy" is specified without "--type" + LOG.warning( + _("'%s' option will not work without '--type' option") + % ( + '--migration-policy' + if parsed_args.migration_policy + else '--retype-policy' + ) + ) kwargs = {} if parsed_args.name: @@ -771,52 +927,48 @@ def take_action(self, parsed_args): try: volume_client.volumes.update(volume.id, **kwargs) except Exception as e: - LOG.error(_("Failed to update volume display name " - "or display description: %s"), e) + LOG.error( + _( + "Failed to update volume display name " + "or display description: %s" + ), + e, + ) result += 1 if result > 0: - raise exceptions.CommandError(_("One or more of the " - "set operations failed")) + raise exceptions.CommandError( + _("One or more of the set operations failed") + ) class ShowVolume(command.ShowOne): _description = _("Display volume details") def get_parser(self, prog_name): - parser = super(ShowVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'volume', metavar="", - help=_("Volume to display (name or ID)") + help=_("Volume to display (name or ID)"), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - volume = utils.find_resource(volume_client.volumes, parsed_args.volume) - - # Special mapping for columns to make the output easier to read: - # 'metadata' --> 'properties' - # 'volume_type' --> 'type' - volume._info.update( - { - 'properties': - format_columns.DictColumn(volume._info.pop('metadata')), - 'type': volume._info.pop('volume_type'), - }, + volume_client = self.app.client_manager.sdk_connection.volume + volume = volume_client.find_volume( + parsed_args.volume, ignore_missing=False ) - # Remove key links from being displayed - volume._info.pop("links", None) - return zip(*sorted(volume._info.items())) + data = _format_volume(volume) + return zip(*sorted(data.items())) class UnsetVolume(command.Command): _description = _("Unset volume properties") def get_parser(self, prog_name): - parser = super(UnsetVolume, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'volume', metavar='', @@ -826,40 +978,48 @@ def get_parser(self, prog_name): '--property', metavar='', action='append', - help=_('Remove a property from volume ' - '(repeat option to remove multiple properties)'), + dest='properties', + help=_( + 'Remove a property from volume ' + '(repeat option to remove multiple properties)' + ), ) parser.add_argument( '--image-property', metavar='', action='append', - help=_('Remove an image property from volume ' - '(repeat option to remove multiple image properties)'), + dest='image_properties', + help=_( + 'Remove an image property from volume ' + '(repeat option to remove multiple image properties)' + ), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - volume = utils.find_resource( - volume_client.volumes, parsed_args.volume) + volume = utils.find_resource(volume_client.volumes, parsed_args.volume) result = 0 - if parsed_args.property: + if parsed_args.properties: try: volume_client.volumes.delete_metadata( - volume.id, parsed_args.property) + volume.id, parsed_args.properties + ) except Exception as e: - LOG.error(_("Failed to unset volume property: %s"), e) + LOG.error(_("Failed to unset volume properties: %s"), e) result += 1 - if parsed_args.image_property: + if parsed_args.image_properties: try: volume_client.volumes.delete_image_metadata( - volume.id, parsed_args.image_property) + volume.id, parsed_args.image_properties + ) except Exception as e: - LOG.error(_("Failed to unset image property: %s"), e) + LOG.error(_("Failed to unset image properties: %s"), e) result += 1 if result > 0: - raise exceptions.CommandError(_("One or more of the " - "unset operations failed")) + raise exceptions.CommandError( + _("One or more of the unset operations failed") + ) diff --git a/openstackclient/volume/v2/volume_backend.py b/openstackclient/volume/v2/volume_backend.py index c5194d3509..e51e37bb9f 100644 --- a/openstackclient/volume/v2/volume_backend.py +++ b/openstackclient/volume/v2/volume_backend.py @@ -14,9 +14,10 @@ """Storage backend action implementations""" -from osc_lib.command import command +from osc_lib.cli import format_columns from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -24,16 +25,16 @@ class ShowCapability(command.Lister): _description = _("Show capability command") def get_parser(self, prog_name): - parser = super(ShowCapability, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "host", metavar="", - help=_("List capabilities of specified host (host@backend-name)") + help=_("List capabilities of specified host (host@backend-name)"), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume columns = [ 'Title', @@ -42,7 +43,7 @@ def take_action(self, parsed_args): 'Description', ] - data = volume_client.capabilities.get(parsed_args.host) + data = volume_client.get_capabilities(parsed_args.host) # The get capabilities API is... interesting. We only want the names of # the capabilities that can set for a backend through extra specs, so @@ -55,50 +56,45 @@ def take_action(self, parsed_args): capability_data['key'] = key print_data.append(capability_data) - return (columns, - (utils.get_dict_properties( - s, columns, - ) for s in print_data)) + return ( + columns, + ( + utils.get_dict_properties( + s, + columns, + ) + for s in print_data + ), + ) class ListPool(command.Lister): _description = _("List pool command") def get_parser(self, prog_name): - parser = super(ListPool, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--long", action="store_true", default=False, - help=_("Show detailed information about pools.") + help=_("Show detailed information about pools."), ) # TODO(smcginnis): Starting with Cinder microversion 3.33, user is also # able to pass in --filters with a = pair to filter on. return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume if parsed_args.long: columns = [ 'name', - 'storage_protocol', - 'thick_provisioning_support', - 'thin_provisioning_support', - 'total_volumes', - 'total_capacity_gb', - 'allocated_capacity_gb', - 'max_over_subscription_ratio', + 'capabilities', ] + headers = [ 'Name', - 'Protocol', - 'Thick', - 'Thin', - 'Volumes', - 'Capacity', - 'Allocated', - 'Max Over Ratio' + 'Capabilities', ] else: columns = [ @@ -106,8 +102,16 @@ def take_action(self, parsed_args): ] headers = columns - data = volume_client.pools.list(detailed=parsed_args.long) - return (headers, - (utils.get_item_properties( - s, columns, - ) for s in data)) + data = volume_client.backend_pools(detailed=parsed_args.long) + formatters = {'capabilities': format_columns.DictColumn} + return ( + headers, + ( + utils.get_item_properties( + s, + columns, + formatters=formatters, + ) + for s in data + ), + ) diff --git a/openstackclient/volume/v2/volume_backup.py b/openstackclient/volume/v2/volume_backup.py index d96b28e958..7dbe92c962 100644 --- a/openstackclient/volume/v2/volume_backup.py +++ b/openstackclient/volume/v2/volume_backup.py @@ -14,24 +14,21 @@ """Volume v2 Backup action implementations""" -import copy import functools import logging -from cinderclient import api_versions from cliff import columns as cliff_columns -from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ - LOG = logging.getLogger(__name__) -class VolumeIdColumn(cliff_columns.FormattableColumn): +class VolumeIdColumn(cliff_columns.FormattableColumn[str]): """Formattable column for volume ID column. Unlike the parent FormattableColumn class, the initializer of the @@ -43,7 +40,7 @@ class takes volume_cache as the second argument. """ def __init__(self, value, volume_cache=None): - super(VolumeIdColumn, self).__init__(value) + super().__init__(value) self._volume_cache = volume_cache or {} def human_readable(self): @@ -66,153 +63,127 @@ def get_parser(self, prog_name): parser.add_argument( "volume", metavar="", - help=_("Volume to backup (name or ID)") + help=_("Volume to backup (name or ID)"), ) parser.add_argument( - "--name", - metavar="", - help=_("Name of the backup") + "--name", metavar="", help=_("Name of the backup") ) parser.add_argument( "--description", metavar="", - help=_("Description of the backup") + help=_("Description of the backup"), ) parser.add_argument( "--container", metavar="", - help=_("Optional backup container name") + help=_("Optional backup container name"), ) parser.add_argument( "--snapshot", metavar="", - help=_("Snapshot to backup (name or ID)") + help=_("Snapshot to backup (name or ID)"), ) parser.add_argument( '--force', action='store_true', default=False, - help=_("Allow to back up an in-use volume") + help=_("Allow to back up an in-use volume"), ) parser.add_argument( '--incremental', action='store_true', default=False, - help=_("Perform an incremental backup") + help=_("Perform an incremental backup"), ) parser.add_argument( '--no-incremental', action='store_false', - help=_("Do not perform an incremental backup") - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - dest='properties', - help=_( - 'Set a property on this backup ' - '(repeat option to remove multiple values) ' - '(supported by --os-volume-api-version 3.43 or above)' - ), - ) - parser.add_argument( - '--availability-zone', - metavar='', - help=_( - 'AZ where the backup should be stored; by default it will be ' - 'the same as the source ' - '(supported by --os-volume-api-version 3.51 or above)' - ), + help=_("Do not perform an incremental backup"), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - volume_id = utils.find_resource( - volume_client.volumes, parsed_args.volume, + volume_id = volume_client.find_volume( + parsed_args.volume, + ignore_missing=False, ).id kwargs = {} if parsed_args.snapshot: - kwargs['snapshot_id'] = utils.find_resource( - volume_client.volume_snapshots, parsed_args.snapshot, + kwargs['snapshot_id'] = volume_client.find_snapshot( + parsed_args.snapshot, + ignore_missing=False, ).id - if parsed_args.properties: - if volume_client.api_version < api_versions.APIVersion('3.43'): - msg = _( - '--os-volume-api-version 3.43 or greater is required to ' - 'support the --property option' - ) - raise exceptions.CommandError(msg) - - kwargs['metadata'] = parsed_args.properties - - if parsed_args.availability_zone: - if volume_client.api_version < api_versions.APIVersion('3.51'): - msg = _( - '--os-volume-api-version 3.51 or greater is required to ' - 'support the --availability-zone option' - ) - raise exceptions.CommandError(msg) - - kwargs['availability_zone'] = parsed_args.availability_zone - - backup = volume_client.backups.create( - volume_id, + columns: tuple[str, ...] = ( + "id", + "name", + "volume_id", + ) + backup = volume_client.create_backup( + volume_id=volume_id, container=parsed_args.container, name=parsed_args.name, description=parsed_args.description, force=parsed_args.force, - incremental=parsed_args.incremental, + is_incremental=parsed_args.incremental, **kwargs, ) - backup._info.pop("links", None) - return zip(*sorted(backup._info.items())) + data = utils.get_dict_properties(backup, columns) + return (columns, data) class DeleteVolumeBackup(command.Command): _description = _("Delete volume backup(s)") def get_parser(self, prog_name): - parser = super(DeleteVolumeBackup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "backups", metavar="", nargs="+", - help=_("Backup(s) to delete (name or ID)") + help=_("Backup(s) to delete (name or ID)"), ) parser.add_argument( '--force', action='store_true', default=False, - help=_("Allow delete in state other than error or available") + help=_("Allow delete in state other than error or available"), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume result = 0 - for i in parsed_args.backups: + for backup in parsed_args.backups: try: - backup_id = utils.find_resource( - volume_client.backups, i, + backup_id = volume_client.find_backup( + backup, ignore_missing=False ).id - volume_client.backups.delete(backup_id, parsed_args.force) + volume_client.delete_backup( + backup_id, + ignore_missing=False, + force=parsed_args.force, + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete backup with " - "name or ID '%(backup)s': %(e)s") - % {'backup': i, 'e': e}) + LOG.error( + _( + "Failed to delete backup with " + "name or ID '%(backup)s': %(e)s" + ) + % {'backup': backup, 'e': e} + ) if result > 0: total = len(parsed_args.backups) msg = _("%(result)s of %(total)s backups failed to delete.") % { - 'result': result, 'total': total, + 'result': result, + 'total': total, } raise exceptions.CommandError(msg) @@ -221,24 +192,28 @@ class ListVolumeBackup(command.Lister): _description = _("List volume backups") def get_parser(self, prog_name): - parser = super(ListVolumeBackup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "--long", action="store_true", default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) parser.add_argument( "--name", metavar="", - help=_("Filters results by the backup name") + help=_("Filters results by the backup name"), ) parser.add_argument( "--status", metavar="", choices=[ - 'creating', 'available', 'deleting', - 'error', 'restoring', 'error_restoring', + 'creating', + 'available', + 'deleting', + 'error', + 'restoring', + 'error_restoring', ], help=_( "Filters results by the backup status, one of: " @@ -253,45 +228,36 @@ def get_parser(self, prog_name): "Filters results by the volume which they backup (name or ID)" ), ) - parser.add_argument( - '--marker', - metavar='', - help=_('The last backup of the previous page (name or ID)'), - ) - parser.add_argument( - '--limit', - type=int, - action=parseractions.NonNegativeAction, - metavar='', - help=_('Maximum number of backups to display'), - ) + pagination.add_marker_pagination_option_to_parser(parser) parser.add_argument( '--all-projects', action='store_true', default=False, help=_('Include all projects (admin only)'), ) - # TODO(stephenfin): Add once we have an equivalent command for - # 'cinder list-filters' - # parser.add_argument( - # '--filter', - # metavar='', - # action=parseractions.KeyValueAction, - # dest='filters', - # help=_( - # "Filter key and value pairs. Use 'foo' to " - # "check enabled filters from server. Use 'key~=value' for " - # "inexact filtering if the key supports " - # "(supported by --os-volume-api-version 3.33 or above)" - # ), - # ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - - columns = ('id', 'name', 'description', 'status', 'size') - column_headers = ('ID', 'Name', 'Description', 'Status', 'Size') + volume_client = self.app.client_manager.sdk_connection.volume + + columns: tuple[str, ...] = ( + 'id', + 'name', + 'description', + 'status', + 'size', + 'is_incremental', + 'created_at', + ) + column_headers: tuple[str, ...] = ( + 'ID', + 'Name', + 'Description', + 'Status', + 'Size', + 'Incremental', + 'Created At', + ) if parsed_args.long: columns += ('availability_zone', 'volume_id', 'container') column_headers += ('Availability Zone', 'Volume', 'Container') @@ -299,43 +265,45 @@ def take_action(self, parsed_args): # Cache the volume list volume_cache = {} try: - for s in volume_client.volumes.list(): + for s in volume_client.volumes(): volume_cache[s.id] = s - except Exception: + except Exception: # noqa: S110 # Just forget it if there's any trouble pass _VolumeIdColumn = functools.partial( - VolumeIdColumn, volume_cache=volume_cache) + VolumeIdColumn, volume_cache=volume_cache + ) filter_volume_id = None if parsed_args.volume: try: - filter_volume_id = utils.find_resource( - volume_client.volumes, parsed_args.volume, + filter_volume_id = volume_client.find_volume( + parsed_args.volume, + ignore_missing=False, ).id except exceptions.CommandError: # Volume with that ID does not exist, but search for backups # for that volume nevertheless - LOG.debug("No volume with ID %s existing, continuing to " - "search for backups for that volume ID", - parsed_args.volume) + LOG.debug( + "No volume with ID %s existing, continuing to " + "search for backups for that volume ID", + parsed_args.volume, + ) filter_volume_id = parsed_args.volume marker_backup_id = None if parsed_args.marker: - marker_backup_id = utils.find_resource( - volume_client.backups, parsed_args.marker, + marker_backup_id = volume_client.find_backup( + parsed_args.marker, + ignore_missing=False, ).id - search_opts = { - 'name': parsed_args.name, - 'status': parsed_args.status, - 'volume_id': filter_volume_id, - 'all_tenants': parsed_args.all_projects, - } - data = volume_client.backups.list( - search_opts=search_opts, + data = volume_client.backups( + name=parsed_args.name, + status=parsed_args.status, + volume_id=filter_volume_id, + all_tenants=parsed_args.all_projects, marker=marker_backup_id, limit=parsed_args.limit, ) @@ -344,8 +312,11 @@ def take_action(self, parsed_args): column_headers, ( utils.get_item_properties( - s, columns, formatters={'volume_id': _VolumeIdColumn}, - ) for s in data + s, + columns, + formatters={'volume_id': _VolumeIdColumn}, + ) + for s in data ), ) @@ -354,11 +325,11 @@ class RestoreVolumeBackup(command.ShowOne): _description = _("Restore volume backup") def get_parser(self, prog_name): - parser = super(RestoreVolumeBackup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "backup", metavar="", - help=_("Backup to restore (name or ID)") + help=_("Backup to restore (name or ID)"), ) parser.add_argument( "volume", @@ -368,29 +339,37 @@ def get_parser(self, prog_name): "Volume to restore to " "(name or ID for existing volume, name only for new volume) " "(default to None)" - ) + ), ) parser.add_argument( "--force", action="store_true", help=_( - "Restore the backup to an existing volume " - "(default to False)" - ) + "Restore the backup to an existing volume (default to False)" + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - backup = utils.find_resource(volume_client.backups, parsed_args.backup) + backup = volume_client.find_backup( + parsed_args.backup, + ignore_missing=False, + ) + + columns: tuple[str, ...] = ( + 'id', + 'volume_id', + 'volume_name', + ) volume_name = None volume_id = None try: - volume_id = utils.find_resource( - volume_client.volumes, + volume_id = volume_client.find_volume( parsed_args.volume, + ignore_missing=False, ).id except Exception: volume_name = parsed_args.volume @@ -401,13 +380,18 @@ def take_action(self, parsed_args): msg = _( "Volume '%s' already exists; if you want to restore the " "backup to it you need to specify the '--force' option" - ) % parsed_args.volume - raise exceptions.CommandError(msg) + ) + raise exceptions.CommandError(msg % parsed_args.volume) - return volume_client.restores.restore( - backup.id, volume_id, volume_name, + restore = volume_client.restore_backup( + backup.id, + volume_id=volume_id, + name=volume_name, ) + data = utils.get_dict_properties(restore, columns) + return (columns, data) + class SetVolumeBackup(command.Command): _description = _("Set volume backup properties") @@ -417,23 +401,7 @@ def get_parser(self, prog_name): parser.add_argument( "backup", metavar="", - help=_("Backup to modify (name or ID)") - ) - parser.add_argument( - '--name', - metavar='', - help=_( - 'New backup name' - '(supported by --os-volume-api-version 3.9 or above)' - ), - ) - parser.add_argument( - '--description', - metavar='', - help=_( - 'New backup description ' - '(supported by --os-volume-api-version 3.9 or above)' - ), + help=_("Backup to modify (name or ID)"), ) parser.add_argument( '--state', @@ -446,95 +414,24 @@ def get_parser(self, prog_name): 'exercise caution when using)' ), ) - parser.add_argument( - '--no-property', - action='store_true', - help=_( - 'Remove all properties from this backup ' - '(specify both --no-property and --property to remove the ' - 'current properties before setting new properties)' - ), - ) - parser.add_argument( - '--property', - metavar='', - action=parseractions.KeyValueAction, - dest='properties', - default={}, - help=_( - 'Set a property on this backup ' - '(repeat option to set multiple values) ' - '(supported by --os-volume-api-version 3.43 or above)' - ), - ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - backup = utils.find_resource( - volume_client.backups, parsed_args.backup) + volume_client = self.app.client_manager.sdk_connection.volume + + backup = volume_client.find_backup( + parsed_args.backup, + ignore_missing=False, + ) result = 0 if parsed_args.state: try: - volume_client.backups.reset_state( - backup.id, parsed_args.state) - except Exception as e: - LOG.error(_("Failed to set backup state: %s"), e) - result += 1 - - kwargs = {} - - if parsed_args.name: - if volume_client.api_version < api_versions.APIVersion('3.9'): - msg = _( - '--os-volume-api-version 3.9 or greater is required to ' - 'support the --name option' - ) - raise exceptions.CommandError(msg) - - kwargs['name'] = parsed_args.name - - if parsed_args.description: - if volume_client.api_version < api_versions.APIVersion('3.9'): - msg = _( - '--os-volume-api-version 3.9 or greater is required to ' - 'support the --description option' - ) - raise exceptions.CommandError(msg) - - kwargs['description'] = parsed_args.description - - if parsed_args.no_property: - if volume_client.api_version < api_versions.APIVersion('3.43'): - msg = _( - '--os-volume-api-version 3.43 or greater is required to ' - 'support the --no-property option' + volume_client.reset_backup_status( + backup, status=parsed_args.state ) - raise exceptions.CommandError(msg) - - if parsed_args.properties: - if volume_client.api_version < api_versions.APIVersion('3.43'): - msg = _( - '--os-volume-api-version 3.43 or greater is required to ' - 'support the --property option' - ) - raise exceptions.CommandError(msg) - - if volume_client.api_version >= api_versions.APIVersion('3.43'): - metadata = copy.deepcopy(backup.metadata) - - if parsed_args.no_property: - metadata = {} - - metadata.update(parsed_args.properties) - kwargs['metadata'] = metadata - - if kwargs: - try: - volume_client.backups.update(backup.id, **kwargs) except Exception as e: - LOG.error("Failed to update backup: %s", e) + LOG.error(_("Failed to set backup state: %s"), e) result += 1 if result > 0: @@ -542,78 +439,40 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) -class UnsetVolumeBackup(command.Command): - """Unset volume backup properties. - - This command requires ``--os-volume-api-version`` 3.43 or greater. - """ - - def get_parser(self, prog_name): - parser = super().get_parser(prog_name) - parser.add_argument( - 'backup', - metavar='', - help=_('Backup to modify (name or ID)') - ) - parser.add_argument( - '--property', - metavar='', - action='append', - dest='properties', - help=_( - 'Property to remove from this backup ' - '(repeat option to unset multiple values) ' - ), - ) - return parser - - def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - - if volume_client.api_version < api_versions.APIVersion('3.43'): - msg = _( - '--os-volume-api-version 3.43 or greater is required to ' - 'support the --property option' - ) - raise exceptions.CommandError(msg) - - backup = utils.find_resource( - volume_client.backups, parsed_args.backup) - metadata = copy.deepcopy(backup.metadata) - - for key in parsed_args.properties: - if key not in metadata: - # ignore invalid properties but continue - LOG.warning( - "'%s' is not a valid property for backup '%s'", - key, parsed_args.backup, - ) - continue - - del metadata[key] - - kwargs = { - 'metadata': metadata, - } - - volume_client.backups.update(backup.id, **kwargs) - - class ShowVolumeBackup(command.ShowOne): _description = _("Display volume backup details") def get_parser(self, prog_name): - parser = super(ShowVolumeBackup, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "backup", metavar="", - help=_("Backup to display (name or ID)") + help=_("Backup to display (name or ID)"), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - backup = utils.find_resource(volume_client.backups, - parsed_args.backup) - backup._info.pop("links", None) - return zip(*sorted(backup._info.items())) + volume_client = self.app.client_manager.sdk_connection.volume + backup = volume_client.find_backup( + parsed_args.backup, ignore_missing=False + ) + columns: tuple[str, ...] = ( + "availability_zone", + "container", + "created_at", + "data_timestamp", + "description", + "fail_reason", + "has_dependent_backups", + "id", + "is_incremental", + "name", + "object_count", + "size", + "snapshot_id", + "status", + "updated_at", + "volume_id", + ) + data = utils.get_dict_properties(backup, columns) + return (columns, data) diff --git a/openstackclient/volume/v2/volume_host.py b/openstackclient/volume/v2/volume_host.py index 2fdeb9684b..44fd58a5cd 100644 --- a/openstackclient/volume/v2/volume_host.py +++ b/openstackclient/volume/v2/volume_host.py @@ -14,8 +14,7 @@ """Volume v2 host action implementations""" -from osc_lib.command import command - +from openstackclient import command from openstackclient.i18n import _ @@ -23,47 +22,46 @@ class FailoverVolumeHost(command.Command): _description = _("Failover volume host to different backend") def get_parser(self, prog_name): - parser = super(FailoverVolumeHost, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - "host", - metavar="", - help=_("Name of volume host") + "host", metavar="", help=_("Name of volume host") ) parser.add_argument( "--volume-backend", metavar="", required=True, - help=_("The ID of the volume backend replication " - "target where the host will failover to (required)") + help=_( + "The ID of the volume backend replication " + "target where the host will failover to (required)" + ), ) return parser def take_action(self, parsed_args): service_client = self.app.client_manager.volume - service_client.services.failover_host(parsed_args.host, - parsed_args.volume_backend) + service_client.services.failover_host( + parsed_args.host, parsed_args.volume_backend + ) class SetVolumeHost(command.Command): _description = _("Set volume host properties") def get_parser(self, prog_name): - parser = super(SetVolumeHost, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( - "host", - metavar="", - help=_("Name of volume host") + "host", metavar="", help=_("Name of volume host") ) enabled_group = parser.add_mutually_exclusive_group() enabled_group.add_argument( "--disable", action="store_true", - help=_("Freeze and disable the specified volume host") + help=_("Freeze and disable the specified volume host"), ) enabled_group.add_argument( "--enable", action="store_true", - help=_("Thaw and enable the specified volume host") + help=_("Thaw and enable the specified volume host"), ) return parser diff --git a/openstackclient/volume/v2/volume_snapshot.py b/openstackclient/volume/v2/volume_snapshot.py index 53d8d27fed..3b1dbbabf2 100644 --- a/openstackclient/volume/v2/volume_snapshot.py +++ b/openstackclient/volume/v2/volume_snapshot.py @@ -14,17 +14,19 @@ """Volume v2 snapshot action implementations""" -import copy import functools import logging +import typing as ty from cliff import columns as cliff_columns +from openstack.block_storage.v2 import snapshot as _snapshot from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -32,7 +34,7 @@ LOG = logging.getLogger(__name__) -class VolumeIdColumn(cliff_columns.FormattableColumn): +class VolumeIdColumn(cliff_columns.FormattableColumn[str]): """Formattable column for volume ID column. Unlike the parent FormattableColumn class, the initializer of the @@ -44,7 +46,7 @@ class takes volume_cache as the second argument. """ def __init__(self, value, volume_cache=None): - super(VolumeIdColumn, self).__init__(value) + super().__init__(value) self._volume_cache = volume_cache or {} def human_readable(self): @@ -59,11 +61,47 @@ def human_readable(self): return volume +def _format_snapshot(snapshot: _snapshot.Snapshot) -> dict[str, ty.Any]: + # Some columns returned by openstacksdk should not be shown because they're + # either irrelevant or duplicates + ignored_columns = { + # computed columns + 'location', + # create-only columns + 'consumes_quota', + 'force', + 'group_snapshot_id', + # ignored columns + 'os-extended-snapshot-attributes:progress', + 'os-extended-snapshot-attributes:project_id', + 'updated_at', + 'user_id', + # unnecessary columns + 'links', + } + + info = snapshot.to_dict(original_names=True) + data = {} + for key, value in info.items(): + if key in ignored_columns: + continue + + data[key] = value + + data.update( + { + 'properties': format_columns.DictColumn(data.pop('metadata')), + } + ) + + return data + + class CreateVolumeSnapshot(command.ShowOne): _description = _("Create new volume snapshot") def get_parser(self, prog_name): - parser = super(CreateVolumeSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "snapshot_name", metavar="", @@ -72,115 +110,134 @@ def get_parser(self, prog_name): parser.add_argument( "--volume", metavar="", - help=_("Volume to snapshot (name or ID) " - "(default is )") + help=_( + "Volume to snapshot (name or ID) (default is )" + ), ) parser.add_argument( "--description", metavar="", - help=_("Description of the snapshot") + help=_("Description of the snapshot"), ) parser.add_argument( "--force", action="store_true", default=False, - help=_("Create a snapshot attached to an instance. " - "Default is False") + help=_( + "Create a snapshot attached to an instance. Default is False" + ), ) parser.add_argument( "--property", metavar="", action=parseractions.KeyValueAction, - help=_("Set a property to this snapshot " - "(repeat option to set multiple properties)"), + dest="properties", + help=_( + "Set a property to this snapshot " + "(repeat option to set multiple properties)" + ), ) parser.add_argument( "--remote-source", metavar="", action=parseractions.KeyValueAction, - help=_("The attribute(s) of the existing remote volume snapshot " - "(admin required) (repeat option to specify multiple " - "attributes) e.g.: '--remote-source source-name=test_name " - "--remote-source source-id=test_id'"), + help=_( + "The attribute(s) of the existing remote volume snapshot " + "(admin required) (repeat option to specify multiple " + "attributes) e.g.: '--remote-source source-name=test_name " + "--remote-source source-id=test_id'" + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume + volume = parsed_args.volume if not parsed_args.volume: volume = parsed_args.snapshot_name - volume_id = utils.find_resource( - volume_client.volumes, volume).id + volume_id = volume_client.find_volume(volume, ignore_missing=False).id + if parsed_args.remote_source: # Create a new snapshot from an existing remote snapshot source if parsed_args.force: - msg = (_("'--force' option will not work when you create " - "new volume snapshot from an existing remote " - "volume snapshot")) + msg = _( + "'--force' option will not work when you create " + "new volume snapshot from an existing remote " + "volume snapshot" + ) LOG.warning(msg) - snapshot = volume_client.volume_snapshots.manage( + + snapshot = volume_client.manage_snapshot( volume_id=volume_id, ref=parsed_args.remote_source, name=parsed_args.snapshot_name, description=parsed_args.description, - metadata=parsed_args.property, + metadata=parsed_args.properties, ) else: # create a new snapshot from scratch - snapshot = volume_client.volume_snapshots.create( - volume_id, + snapshot = volume_client.create_snapshot( + volume_id=volume_id, force=parsed_args.force, name=parsed_args.snapshot_name, description=parsed_args.description, - metadata=parsed_args.property, + metadata=parsed_args.properties, ) - snapshot._info.update( - {'properties': - format_columns.DictColumn(snapshot._info.pop('metadata'))} - ) - return zip(*sorted(snapshot._info.items())) + + data = _format_snapshot(snapshot) + return zip(*sorted(data.items())) class DeleteVolumeSnapshot(command.Command): _description = _("Delete volume snapshot(s)") def get_parser(self, prog_name): - parser = super(DeleteVolumeSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "snapshots", metavar="", nargs="+", - help=_("Snapshot(s) to delete (name or ID)") + help=_("Snapshot(s) to delete (name or ID)"), ) parser.add_argument( '--force', action='store_true', - help=_("Attempt forced removal of snapshot(s), " - "regardless of state (defaults to False)") + help=_( + "Attempt forced removal of snapshot(s), " + "regardless of state (defaults to False)" + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume result = 0 - for i in parsed_args.snapshots: + for snapshot in parsed_args.snapshots: try: - snapshot_id = utils.find_resource( - volume_client.volume_snapshots, i).id - volume_client.volume_snapshots.delete( - snapshot_id, parsed_args.force) + snapshot_id = volume_client.find_snapshot( + snapshot, ignore_missing=False + ).id + volume_client.delete_snapshot( + snapshot_id, force=parsed_args.force + ) except Exception as e: result += 1 - LOG.error(_("Failed to delete snapshot with " - "name or ID '%(snapshot)s': %(e)s") - % {'snapshot': i, 'e': e}) + LOG.error( + _( + "Failed to delete snapshot with " + "name or ID '%(snapshot)s': %(e)s" + ) + % {'snapshot': snapshot, 'e': e} + ) if result > 0: total = len(parsed_args.snapshots) - msg = (_("%(result)s of %(total)s snapshots failed " - "to delete.") % {'result': result, 'total': total}) + msg = _("%(result)s of %(total)s snapshots failed to delete.") % { + 'result': result, + 'total': total, + } raise exceptions.CommandError(msg) @@ -188,7 +245,7 @@ class ListVolumeSnapshot(command.Lister): _description = _("List volume snapshots") def get_parser(self, prog_name): - parser = super(ListVolumeSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', action='store_true', @@ -198,7 +255,7 @@ def get_parser(self, prog_name): parser.add_argument( '--project', metavar='', - help=_('Filter results by project (name or ID) (admin only)') + help=_('Filter results by project (name or ID) (admin only)'), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( @@ -207,181 +264,213 @@ def get_parser(self, prog_name): default=False, help=_('List additional fields in output'), ) - parser.add_argument( - '--marker', - metavar='', - help=_('The last snapshot ID of the previous page'), - ) - parser.add_argument( - '--limit', - type=int, - action=parseractions.NonNegativeAction, - metavar='', - help=_('Maximum number of snapshots to display'), - ) parser.add_argument( '--name', metavar='', default=None, - help=_('Filters results by a name.') + help=_('Filters results by a name.'), ) parser.add_argument( '--status', metavar='', - choices=['available', 'error', 'creating', 'deleting', - 'error_deleting'], - help=_("Filters results by a status. " - "('available', 'error', 'creating', 'deleting'" - " or 'error_deleting')") + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'error_deleting', + ], + help=_( + "Filters results by a status. " + "('available', 'error', 'creating', 'deleting'" + " or 'error_deleting')" + ), ) parser.add_argument( '--volume', metavar='', default=None, - help=_('Filters results by a volume (name or ID).') + help=_('Filters results by a volume (name or ID).'), ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume identity_client = self.app.client_manager.identity + columns: tuple[str, ...] = ( + 'id', + 'name', + 'description', + 'status', + 'size', + ) + column_headers: tuple[str, ...] = ( + 'ID', + 'Name', + 'Description', + 'Status', + 'Size', + ) if parsed_args.long: - columns = ['ID', 'Name', 'Description', 'Status', - 'Size', 'Created At', 'Volume ID', 'Metadata'] - column_headers = copy.deepcopy(columns) - column_headers[6] = 'Volume' - column_headers[7] = 'Properties' - else: - columns = ['ID', 'Name', 'Description', 'Status', 'Size'] - column_headers = copy.deepcopy(columns) + columns += ( + 'created_at', + 'volume_id', + 'metadata', + ) + column_headers += ( + 'Created At', + 'Volume', + 'Properties', + ) # Cache the volume list volume_cache = {} try: - for s in volume_client.volumes.list(): + for s in volume_client.volumes(): volume_cache[s.id] = s - except Exception: + except Exception: # noqa: S110 # Just forget it if there's any trouble pass - _VolumeIdColumn = functools.partial(VolumeIdColumn, - volume_cache=volume_cache) + _VolumeIdColumn = functools.partial( + VolumeIdColumn, volume_cache=volume_cache + ) volume_id = None if parsed_args.volume: - volume_id = utils.find_resource( - volume_client.volumes, parsed_args.volume).id + volume_id = volume_client.find_volume( + parsed_args.volume, ignore_missing=False + ).id project_id = None if parsed_args.project: project_id = identity_common.find_project( identity_client, parsed_args.project, - parsed_args.project_domain).id + parsed_args.project_domain, + ).id # set value of 'all_tenants' when using project option - all_projects = True if parsed_args.project else \ - parsed_args.all_projects - - search_opts = { - 'all_tenants': all_projects, - 'project_id': project_id, - 'name': parsed_args.name, - 'status': parsed_args.status, - 'volume_id': volume_id, - } + all_projects = ( + True if parsed_args.project else parsed_args.all_projects + ) - data = volume_client.volume_snapshots.list( - search_opts=search_opts, + data = volume_client.snapshots( marker=parsed_args.marker, limit=parsed_args.limit, + all_projects=all_projects, + project_id=project_id, + name=parsed_args.name, + status=parsed_args.status, + volume_id=volume_id, + ) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={ + 'metadata': format_columns.DictColumn, + 'volume_id': _VolumeIdColumn, + }, + ) + for s in data + ), ) - return (column_headers, - (utils.get_item_properties( - s, columns, - formatters={'Metadata': format_columns.DictColumn, - 'Volume ID': _VolumeIdColumn}, - ) for s in data)) class SetVolumeSnapshot(command.Command): _description = _("Set volume snapshot properties") def get_parser(self, prog_name): - parser = super(SetVolumeSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'snapshot', metavar='', - help=_('Snapshot to modify (name or ID)') + help=_('Snapshot to modify (name or ID)'), ) parser.add_argument( - '--name', - metavar='', - help=_('New snapshot name') + '--name', metavar='', help=_('New snapshot name') ) parser.add_argument( '--description', metavar='', - help=_('New snapshot description') + help=_('New snapshot description'), ) parser.add_argument( "--no-property", dest="no_property", action="store_true", - help=_("Remove all properties from " - "(specify both --no-property and --property to " - "remove the current properties before setting " - "new properties.)"), + help=_( + "Remove all properties from " + "(specify both --no-property and --property to " + "remove the current properties before setting " + "new properties.)" + ), ) parser.add_argument( '--property', metavar='', action=parseractions.KeyValueAction, - help=_('Property to add/change for this snapshot ' - '(repeat option to set multiple properties)'), + dest='properties', + help=_( + 'Property to add/change for this snapshot ' + '(repeat option to set multiple properties)' + ), ) parser.add_argument( '--state', metavar='', - choices=['available', 'error', 'creating', 'deleting', - 'error_deleting'], - help=_('New snapshot state. ("available", "error", "creating", ' - '"deleting", or "error_deleting") (admin only) ' - '(This option simply changes the state of the snapshot ' - 'in the database with no regard to actual status, ' - 'exercise caution when using)'), + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'error_deleting', + ], + help=_( + 'New snapshot state. ("available", "error", "creating", ' + '"deleting", or "error_deleting") (admin only) ' + '(This option simply changes the state of the snapshot ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - snapshot = utils.find_resource(volume_client.volume_snapshots, - parsed_args.snapshot) + volume_client = self.app.client_manager.sdk_connection.volume + + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) result = 0 if parsed_args.no_property: try: - key_list = snapshot.metadata.keys() - volume_client.volume_snapshots.delete_metadata( - snapshot.id, - list(key_list), + volume_client.delete_snapshot_metadata( + snapshot.id, keys=list(snapshot.metadata) ) except Exception as e: LOG.error(_("Failed to clean snapshot properties: %s"), e) result += 1 - if parsed_args.property: + if parsed_args.properties: try: - volume_client.volume_snapshots.set_metadata( - snapshot.id, parsed_args.property) + volume_client.set_snapshot_metadata( + snapshot.id, **parsed_args.properties + ) except Exception as e: LOG.error(_("Failed to set snapshot property: %s"), e) result += 1 if parsed_args.state: try: - volume_client.volume_snapshots.reset_state( - snapshot.id, parsed_args.state) + volume_client.reset_snapshot_status( + snapshot.id, parsed_args.state + ) except Exception as e: LOG.error(_("Failed to set snapshot state: %s"), e) result += 1 @@ -393,46 +482,48 @@ def take_action(self, parsed_args): kwargs['description'] = parsed_args.description if kwargs: try: - volume_client.volume_snapshots.update( - snapshot.id, **kwargs) + volume_client.update_snapshot(snapshot.id, **kwargs) except Exception as e: - LOG.error(_("Failed to update snapshot name " - "or description: %s"), e) + LOG.error( + _("Failed to update snapshot name or description: %s"), + e, + ) result += 1 if result > 0: - raise exceptions.CommandError(_("One or more of the " - "set operations failed")) + raise exceptions.CommandError( + _("One or more of the set operations failed") + ) class ShowVolumeSnapshot(command.ShowOne): _description = _("Display volume snapshot details") def get_parser(self, prog_name): - parser = super(ShowVolumeSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "snapshot", metavar="", - help=_("Snapshot to display (name or ID)") + help=_("Snapshot to display (name or ID)"), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - snapshot = utils.find_resource( - volume_client.volume_snapshots, parsed_args.snapshot) - snapshot._info.update( - {'properties': - format_columns.DictColumn(snapshot._info.pop('metadata'))} + volume_client = self.app.client_manager.sdk_connection.volume + + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False ) - return zip(*sorted(snapshot._info.items())) + + data = _format_snapshot(snapshot) + return zip(*sorted(data.items())) class UnsetVolumeSnapshot(command.Command): _description = _("Unset volume snapshot properties") def get_parser(self, prog_name): - parser = super(UnsetVolumeSnapshot, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'snapshot', metavar='', @@ -443,18 +534,22 @@ def get_parser(self, prog_name): metavar='', action='append', default=[], - help=_('Property to remove from snapshot ' - '(repeat option to remove multiple properties)'), + dest='properties', + help=_( + 'Property to remove from snapshot ' + '(repeat option to remove multiple properties)' + ), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume - snapshot = utils.find_resource( - volume_client.volume_snapshots, parsed_args.snapshot) - - if parsed_args.property: - volume_client.volume_snapshots.delete_metadata( - snapshot.id, - parsed_args.property, + volume_client = self.app.client_manager.sdk_connection.volume + + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) + + if parsed_args.properties: + volume_client.delete_snapshot_metadata( + snapshot.id, keys=parsed_args.properties ) diff --git a/openstackclient/volume/v2/volume_transfer_request.py b/openstackclient/volume/v2/volume_transfer_request.py index 8919933609..dcdc527625 100644 --- a/openstackclient/volume/v2/volume_transfer_request.py +++ b/openstackclient/volume/v2/volume_transfer_request.py @@ -16,11 +16,10 @@ import logging -from cinderclient import api_versions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -31,7 +30,7 @@ class AcceptTransferRequest(command.ShowOne): _description = _("Accept volume transfer request.") def get_parser(self, prog_name): - parser = super(AcceptTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'transfer_request', metavar="", @@ -50,8 +49,7 @@ def take_action(self, parsed_args): try: transfer_request_id = utils.find_resource( - volume_client.transfers, - parsed_args.transfer_request + volume_client.transfers, parsed_args.transfer_request ).id except exceptions.CommandError: # Non-admin users will fail to lookup name -> ID so we just @@ -71,31 +69,12 @@ class CreateTransferRequest(command.ShowOne): _description = _("Create volume transfer request.") def get_parser(self, prog_name): - parser = super(CreateTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--name', metavar="", help=_('New transfer request name (default to None)'), ) - parser.add_argument( - '--snapshots', - action='store_true', - dest='snapshots', - help=_( - 'Allow transfer volumes without snapshots (default) ' - '(supported by --os-volume-api-version 3.55 or later)' - ), - default=None, - ) - parser.add_argument( - '--no-snapshots', - action='store_false', - dest='snapshots', - help=_( - 'Disallow transfer volumes without snapshots ' - '(supported by --os-volume-api-version 3.55 or later)' - ), - ) parser.add_argument( 'volume', metavar="", @@ -106,20 +85,6 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): volume_client = self.app.client_manager.volume - kwargs = {} - - if parsed_args.snapshots is not None: - if volume_client.api_version < api_versions.APIVersion('3.55'): - msg = _( - "--os-volume-api-version 3.55 or greater is required to " - "support the '--(no-)snapshots' option" - ) - raise exceptions.CommandError(msg) - - # unfortunately this option is negative so we have to reverse - # things - kwargs['no_snapshots'] = not parsed_args.snapshots - volume_id = utils.find_resource( volume_client.volumes, parsed_args.volume, @@ -127,7 +92,6 @@ def take_action(self, parsed_args): volume_transfer_request = volume_client.transfers.create( volume_id, parsed_args.name, - **kwargs, ) volume_transfer_request._info.pop("links", None) @@ -138,7 +102,7 @@ class DeleteTransferRequest(command.Command): _description = _("Delete volume transfer request(s).") def get_parser(self, prog_name): - parser = super(DeleteTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'transfer_request', metavar="", @@ -160,14 +124,20 @@ def take_action(self, parsed_args): volume_client.transfers.delete(transfer_request_id) except Exception as e: result += 1 - LOG.error(_("Failed to delete volume transfer request " - "with name or ID '%(transfer)s': %(e)s") - % {'transfer': t, 'e': e}) + LOG.error( + _( + "Failed to delete volume transfer request " + "with name or ID '%(transfer)s': %(e)s" + ) + % {'transfer': t, 'e': e} + ) if result > 0: total = len(parsed_args.transfer_request) - msg = (_("%(result)s of %(total)s volume transfer requests failed" - " to delete") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s volume transfer requests failed" + " to delete" + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -175,7 +145,7 @@ class ListTransferRequest(command.Lister): _description = _("Lists all volume transfer requests.") def get_parser(self, prog_name): - parser = super(ListTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', dest='all_projects', @@ -196,16 +166,20 @@ def take_action(self, parsed_args): search_opts={'all_tenants': parsed_args.all_projects}, ) - return (column_headers, ( - utils.get_item_properties(s, columns) - for s in volume_transfer_result)) + return ( + column_headers, + ( + utils.get_item_properties(s, columns) + for s in volume_transfer_result + ), + ) class ShowTransferRequest(command.ShowOne): _description = _("Show volume transfer request details.") def get_parser(self, prog_name): - parser = super(ShowTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'transfer_request', metavar="", diff --git a/openstackclient/volume/v2/volume_type.py b/openstackclient/volume/v2/volume_type.py index 483e6dd3b5..e7b90af95a 100644 --- a/openstackclient/volume/v2/volume_type.py +++ b/openstackclient/volume/v2/volume_type.py @@ -16,14 +16,15 @@ import functools import logging +import typing as ty from cliff import columns as cliff_columns from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -31,7 +32,7 @@ LOG = logging.getLogger(__name__) -class EncryptionInfoColumn(cliff_columns.FormattableColumn): +class EncryptionInfoColumn(cliff_columns.FormattableColumn[ty.Any]): """Formattable column for encryption info column. Unlike the parent FormattableColumn class, the initializer of the @@ -43,7 +44,7 @@ class takes encryption_data as the second argument. """ def __init__(self, value, encryption_data=None): - super(EncryptionInfoColumn, self).__init__(value) + super().__init__(value) self._encryption_data = encryption_data or {} def _get_encryption_info(self): @@ -63,8 +64,10 @@ def machine_readable(self): def _create_encryption_type(volume_client, volume_type, parsed_args): if not parsed_args.encryption_provider: - msg = _("'--encryption-provider' should be specified while " - "creating a new encryption type") + msg = _( + "'--encryption-provider' should be specified while " + "creating a new encryption type" + ) raise exceptions.CommandError(msg) # set the default of control location while creating control_location = 'front-end' @@ -74,10 +77,11 @@ def _create_encryption_type(volume_client, volume_type, parsed_args): 'provider': parsed_args.encryption_provider, 'cipher': parsed_args.encryption_cipher, 'key_size': parsed_args.encryption_key_size, - 'control_location': control_location + 'control_location': control_location, } encryption = volume_client.volume_encryption_types.create( - volume_type, body) + volume_type, body + ) return encryption @@ -93,17 +97,20 @@ def _set_encryption_type(volume_client, volume_type, parsed_args): except Exception as e: if type(e).__name__ == 'NotFound': # create new encryption type - LOG.warning(_("No existing encryption type found, creating " - "new encryption type for this volume type ...")) - _create_encryption_type( - volume_client, volume_type, parsed_args) + LOG.warning( + _( + "No existing encryption type found, creating " + "new encryption type for this volume type ..." + ) + ) + _create_encryption_type(volume_client, volume_type, parsed_args) class CreateVolumeType(command.ShowOne): _description = _("Create new volume type") def get_parser(self, prog_name): - parser = super(CreateVolumeType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "name", metavar="", @@ -118,85 +125,141 @@ def get_parser(self, prog_name): public_group.add_argument( "--public", action="store_true", - default=False, + dest="is_public", + default=None, help=_("Volume type is accessible to the public"), ) public_group.add_argument( "--private", - action="store_true", - default=False, + action="store_false", + dest="is_public", + default=None, help=_("Volume type is not accessible to the public"), ) parser.add_argument( '--property', metavar='', action=parseractions.KeyValueAction, - help=_('Set a property on this volume type ' - '(repeat option to set multiple properties)'), + dest='properties', + help=_( + 'Set a property on this volume type ' + '(repeat option to set multiple properties)' + ), + ) + parser.add_argument( + '--multiattach', + action='store_true', + default=False, + help=_( + "Enable multi-attach for this volume type " + "(this is an alias for '--property multiattach= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--cacheable', + action='store_true', + default=False, + help=_( + "Enable caching for this volume type " + "(this is an alias for '--property cacheable= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--replicated', + action='store_true', + default=False, + help=_( + "Enabled replication for this volume type " + "(this is an alias for " + "'--property replication_enabled= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--availability-zone', + action='append', + dest='availability_zones', + help=_( + "Set an availability zone for this volume type " + "(this is an alias for " + "'--property RESKEY:availability_zones:') " + "(repeat option to set multiple availability zones)" + ), ) parser.add_argument( '--project', metavar='', - help=_("Allow to access private type (name or ID) " - "(Must be used with --private option)"), + help=_( + "Allow to access private type (name or ID) " + "(must be used with --private option)" + ), ) + identity_common.add_project_domain_option_to_parser(parser) # TODO(Huanxuan Ao): Add choices for each "--encryption-*" option. parser.add_argument( '--encryption-provider', metavar='', - help=_('Set the encryption provider format for ' - 'this volume type (e.g "luks" or "plain") (admin only) ' - '(This option is required when setting encryption type ' - 'of a volume. Consider using other encryption options ' - 'such as: "--encryption-cipher", "--encryption-key-size" ' - 'and "--encryption-control-location")'), + help=_( + 'Set the encryption provider format for ' + 'this volume type (e.g "luks" or "plain") (admin only) ' + '(this option is required when setting encryption type ' + 'of a volume; consider using other encryption options ' + 'such as: "--encryption-cipher", "--encryption-key-size" ' + 'and "--encryption-control-location")' + ), ) parser.add_argument( '--encryption-cipher', metavar='', - help=_('Set the encryption algorithm or mode for this ' - 'volume type (e.g "aes-xts-plain64") (admin only)'), + help=_( + 'Set the encryption algorithm or mode for this ' + 'volume type (e.g "aes-xts-plain64") (admin only)' + ), ) parser.add_argument( '--encryption-key-size', metavar='', type=int, - help=_('Set the size of the encryption key of this ' - 'volume type (e.g "128" or "256") (admin only)'), + help=_( + 'Set the size of the encryption key of this ' + 'volume type (e.g "128" or "256") (admin only)' + ), ) parser.add_argument( '--encryption-control-location', metavar='', choices=['front-end', 'back-end'], - help=_('Set the notional service where the encryption is ' - 'performed ("front-end" or "back-end") (admin only) ' - '(The default value for this option is "front-end" ' - 'when setting encryption type of a volume. Consider ' - 'using other encryption options such as: ' - '"--encryption-cipher", "--encryption-key-size" and ' - '"--encryption-provider")'), + help=_( + 'Set the notional service where the encryption is ' + 'performed ("front-end" or "back-end") (admin only) ' + '(The default value for this option is "front-end" ' + 'when setting encryption type of a volume. Consider ' + 'using other encryption options such as: ' + '"--encryption-cipher", "--encryption-key-size" and ' + '"--encryption-provider")' + ), ) - identity_common.add_project_domain_option_to_parser(parser) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity volume_client = self.app.client_manager.volume - if parsed_args.project and not parsed_args.private: + if parsed_args.project and parsed_args.is_public is not False: msg = _("--project is only allowed with --private") raise exceptions.CommandError(msg) kwargs = {} - if parsed_args.public: - kwargs['is_public'] = True - if parsed_args.private: - kwargs['is_public'] = False + + if parsed_args.is_public is not None: + kwargs['is_public'] = parsed_args.is_public volume_type = volume_client.volume_types.create( parsed_args.name, description=parsed_args.description, - **kwargs + **kwargs, ) volume_type._info.pop('extra_specs') @@ -208,30 +271,58 @@ def take_action(self, parsed_args): parsed_args.project_domain, ).id volume_client.volume_type_access.add_project_access( - volume_type.id, project_id) + volume_type.id, project_id + ) except Exception as e: - msg = _("Failed to add project %(project)s access to " - "type: %(e)s") + msg = _( + "Failed to add project %(project)s access to type: %(e)s" + ) LOG.error(msg % {'project': parsed_args.project, 'e': e}) - if parsed_args.property: - result = volume_type.set_keys(parsed_args.property) + + properties = {} + if parsed_args.properties: + properties.update(parsed_args.properties) + if parsed_args.multiattach: + properties['multiattach'] = ' True' + if parsed_args.cacheable: + properties['cacheable'] = ' True' + if parsed_args.replicated: + properties['replication_enabled'] = ' True' + if parsed_args.availability_zones: + properties['RESKEY:availability_zones'] = ','.join( + parsed_args.availability_zones + ) + if properties: + result = volume_type.set_keys(properties) volume_type._info.update( - {'properties': format_columns.DictColumn(result)}) - if (parsed_args.encryption_provider or - parsed_args.encryption_cipher or - parsed_args.encryption_key_size or - parsed_args.encryption_control_location): + {'properties': format_columns.DictColumn(result)} + ) + + if ( + parsed_args.encryption_provider + or parsed_args.encryption_cipher + or parsed_args.encryption_key_size + or parsed_args.encryption_control_location + ): try: # create new encryption encryption = _create_encryption_type( - volume_client, volume_type, parsed_args) + volume_client, volume_type, parsed_args + ) except Exception as e: - LOG.error(_("Failed to set encryption information for this " - "volume type: %s"), e) + LOG.error( + _( + "Failed to set encryption information for this " + "volume type: %s" + ), + e, + ) # add encryption info in result encryption._info.pop("volume_type_id", None) volume_type._info.update( - {'encryption': format_columns.DictColumn(encryption._info)}) + {'encryption': format_columns.DictColumn(encryption._info)} + ) + volume_type._info.pop("os-volume-type-access:is_public", None) return zip(*sorted(volume_type._info.items())) @@ -241,12 +332,12 @@ class DeleteVolumeType(command.Command): _description = _("Delete volume type(s)") def get_parser(self, prog_name): - parser = super(DeleteVolumeType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "volume_types", metavar="", nargs="+", - help=_("Volume type(s) to delete (name or ID)") + help=_("Volume type(s) to delete (name or ID)"), ) return parser @@ -256,20 +347,26 @@ def take_action(self, parsed_args): for volume_type in parsed_args.volume_types: try: - vol_type = utils.find_resource(volume_client.volume_types, - volume_type) + vol_type = utils.find_resource( + volume_client.volume_types, volume_type + ) volume_client.volume_types.delete(vol_type) except Exception as e: result += 1 - LOG.error(_("Failed to delete volume type with " - "name or ID '%(volume_type)s': %(e)s") - % {'volume_type': volume_type, 'e': e}) + LOG.error( + _( + "Failed to delete volume type with " + "name or ID '%(volume_type)s': %(e)s" + ) + % {'volume_type': volume_type, 'e': e} + ) if result > 0: total = len(parsed_args.volume_types) - msg = (_("%(result)s of %(total)s volume types failed " - "to delete.") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s volume types failed to delete." + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -277,57 +374,70 @@ class ListVolumeType(command.Lister): _description = _("List volume types") def get_parser(self, prog_name): - parser = super(ListVolumeType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--long', action='store_true', default=False, - help=_('List additional fields in output') + help=_('List additional fields in output'), ) public_group = parser.add_mutually_exclusive_group() public_group.add_argument( "--default", action='store_true', default=False, - help=_('List the default volume type') + help=_('List the default volume type'), ) public_group.add_argument( "--public", action="store_true", - help=_("List only public types") + dest="is_public", + default=None, + help=_("List only public types"), ) public_group.add_argument( "--private", - action="store_true", - help=_("List only private types (admin only)") + action="store_false", + dest="is_public", + default=None, + help=_("List only private types (admin only)"), ) parser.add_argument( "--encryption-type", action="store_true", - help=_("Display encryption information for each volume type " - "(admin only)"), + help=_( + "Display encryption information for each volume type " + "(admin only)" + ), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume + if parsed_args.long: - columns = ['ID', 'Name', 'Is Public', 'Description', 'Extra Specs'] + columns = [ + 'ID', + 'Name', + 'Is Public', + 'Description', + ] column_headers = [ - 'ID', 'Name', 'Is Public', 'Description', 'Properties'] + 'ID', + 'Name', + 'Is Public', + 'Description', + ] else: columns = ['ID', 'Name', 'Is Public'] column_headers = ['ID', 'Name', 'Is Public'] + if parsed_args.default: data = [volume_client.volume_types.default()] else: - is_public = None - if parsed_args.public: - is_public = True - if parsed_args.private: - is_public = False data = volume_client.volume_types.list( - is_public=is_public) + is_public=parsed_args.is_public, + ) formatters = {'Extra Specs': format_columns.DictColumn} @@ -341,7 +451,7 @@ def take_action(self, parsed_args): 'created_at', 'updated_at', 'deleted_at', - 'volume_type_id' + 'volume_type_id', ] for key in del_key: d._info.pop(key, None) @@ -354,21 +464,28 @@ def take_action(self, parsed_args): column_headers += ['Encryption'] _EncryptionInfoColumn = functools.partial( - EncryptionInfoColumn, encryption_data=encryption) + EncryptionInfoColumn, encryption_data=encryption + ) formatters['id'] = _EncryptionInfoColumn - return (column_headers, - (utils.get_item_properties( - s, columns, + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, formatters=formatters, - ) for s in data)) + ) + for s in data + ), + ) class SetVolumeType(command.Command): _description = _("Set volume type properties") def get_parser(self, prog_name): - parser = super(SetVolumeType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'volume_type', metavar='', @@ -388,52 +505,121 @@ def get_parser(self, prog_name): '--property', metavar='', action=parseractions.KeyValueAction, - help=_('Set a property on this volume type ' - '(repeat option to set multiple properties)'), + dest='properties', + help=_( + 'Set a property on this volume type ' + '(repeat option to set multiple properties)' + ), + ) + parser.add_argument( + '--multiattach', + action='store_true', + default=False, + help=_( + "Enable multi-attach for this volume type " + "(this is an alias for '--property multiattach= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--cacheable', + action='store_true', + default=False, + help=_( + "Enable caching for this volume type " + "(this is an alias for '--property cacheable= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--replicated', + action='store_true', + default=False, + help=_( + "Enabled replication for this volume type " + "(this is an alias for " + "'--property replication_enabled= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--availability-zone', + action='append', + dest='availability_zones', + help=_( + "Set an availability zone for this volume type " + "(this is an alias for " + "'--property RESKEY:availability_zones:') " + "(repeat option to set multiple availability zones)" + ), ) parser.add_argument( '--project', metavar='', - help=_('Set volume type access to project (name or ID) ' - '(admin only)'), + help=_( + 'Set volume type access to project (name or ID) (admin only)' + ), + ) + public_group = parser.add_mutually_exclusive_group() + public_group.add_argument( + '--public', + action='store_true', + dest='is_public', + default=None, + help=_('Volume type is accessible to the public'), + ) + public_group.add_argument( + '--private', + action='store_false', + dest='is_public', + default=None, + help=_("Volume type is not accessible to the public"), ) identity_common.add_project_domain_option_to_parser(parser) # TODO(Huanxuan Ao): Add choices for each "--encryption-*" option. parser.add_argument( '--encryption-provider', metavar='', - help=_('Set the encryption provider format for ' - 'this volume type (e.g "luks" or "plain") (admin only) ' - '(This option is required when setting encryption type ' - 'of a volume for the first time. Consider using other ' - 'encryption options such as: "--encryption-cipher", ' - '"--encryption-key-size" and ' - '"--encryption-control-location")'), + help=_( + 'Set the encryption provider format for ' + 'this volume type (e.g "luks" or "plain") (admin only) ' + '(This option is required when setting encryption type ' + 'of a volume for the first time. Consider using other ' + 'encryption options such as: "--encryption-cipher", ' + '"--encryption-key-size" and ' + '"--encryption-control-location")' + ), ) parser.add_argument( '--encryption-cipher', metavar='', - help=_('Set the encryption algorithm or mode for this ' - 'volume type (e.g "aes-xts-plain64") (admin only)'), + help=_( + 'Set the encryption algorithm or mode for this ' + 'volume type (e.g "aes-xts-plain64") (admin only)' + ), ) parser.add_argument( '--encryption-key-size', metavar='', type=int, - help=_('Set the size of the encryption key of this ' - 'volume type (e.g "128" or "256") (admin only)'), + help=_( + 'Set the size of the encryption key of this ' + 'volume type (e.g "128" or "256") (admin only)' + ), ) parser.add_argument( '--encryption-control-location', metavar='', choices=['front-end', 'back-end'], - help=_('Set the notional service where the encryption is ' - 'performed ("front-end" or "back-end") (admin only) ' - '(The default value for this option is "front-end" ' - 'when setting encryption type of a volume for the ' - 'first time. Consider using other encryption options ' - 'such as: "--encryption-cipher", "--encryption-key-size" ' - 'and "--encryption-provider")'), + help=_( + 'Set the notional service where the encryption is ' + 'performed ("front-end" or "back-end") (admin only) ' + '(The default value for this option is "front-end" ' + 'when setting encryption type of a volume for the ' + 'first time. Consider using other encryption options ' + 'such as: "--encryption-cipher", "--encryption-key-size" ' + 'and "--encryption-provider")' + ), ) return parser @@ -442,30 +628,50 @@ def take_action(self, parsed_args): identity_client = self.app.client_manager.identity volume_type = utils.find_resource( - volume_client.volume_types, parsed_args.volume_type) + volume_client.volume_types, + parsed_args.volume_type, + ) + result = 0 kwargs = {} + if parsed_args.name: kwargs['name'] = parsed_args.name + if parsed_args.description: kwargs['description'] = parsed_args.description + if parsed_args.is_public is not None: + kwargs['is_public'] = parsed_args.is_public + if kwargs: try: - volume_client.volume_types.update( - volume_type.id, - **kwargs - ) + volume_client.volume_types.update(volume_type.id, **kwargs) except Exception as e: - LOG.error(_("Failed to update volume type name or" - " description: %s"), e) + LOG.error( + _("Failed to update volume type name or description: %s"), + e, + ) result += 1 - if parsed_args.property: + properties: dict[str, str] = {} + if parsed_args.properties: + properties.update(parsed_args.properties) + if parsed_args.multiattach: + properties['multiattach'] = ' True' + if parsed_args.cacheable: + properties['cacheable'] = ' True' + if parsed_args.replicated: + properties['replication_enabled'] = ' True' + if parsed_args.availability_zones: + properties['RESKEY:availability_zones'] = ','.join( + parsed_args.availability_zones + ) + if properties: try: - volume_type.set_keys(parsed_args.property) + volume_type.set_keys(properties) except Exception as e: - LOG.error(_("Failed to set volume type property: %s"), e) + LOG.error(_("Failed to set volume type properties: %s"), e) result += 1 if parsed_args.project: @@ -474,83 +680,109 @@ def take_action(self, parsed_args): project_info = identity_common.find_project( identity_client, parsed_args.project, - parsed_args.project_domain) + parsed_args.project_domain, + ) volume_client.volume_type_access.add_project_access( - volume_type.id, project_info.id) + volume_type.id, project_info.id + ) except Exception as e: - LOG.error(_("Failed to set volume type access to " - "project: %s"), e) + LOG.error( + _("Failed to set volume type access to project: %s"), e + ) result += 1 - if (parsed_args.encryption_provider or - parsed_args.encryption_cipher or - parsed_args.encryption_key_size or - parsed_args.encryption_control_location): + if ( + parsed_args.encryption_provider + or parsed_args.encryption_cipher + or parsed_args.encryption_key_size + or parsed_args.encryption_control_location + ): try: _set_encryption_type(volume_client, volume_type, parsed_args) except Exception as e: - LOG.error(_("Failed to set encryption information for this " - "volume type: %s"), e) + LOG.error( + _( + "Failed to set encryption information for this " + "volume type: %s" + ), + e, + ) result += 1 if result > 0: - raise exceptions.CommandError(_("Command Failed: One or more of" - " the operations failed")) + raise exceptions.CommandError( + _("Command Failed: One or more of the operations failed") + ) class ShowVolumeType(command.ShowOne): _description = _("Display volume type details") def get_parser(self, prog_name): - parser = super(ShowVolumeType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( "volume_type", metavar="", - help=_("Volume type to display (name or ID)") + help=_("Volume type to display (name or ID)"), ) parser.add_argument( "--encryption-type", action="store_true", - help=_("Display encryption information of this volume type " - "(admin only)"), + help=_( + "Display encryption information of this volume type " + "(admin only)" + ), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume volume_type = utils.find_resource( - volume_client.volume_types, parsed_args.volume_type) + volume_client.volume_types, parsed_args.volume_type + ) properties = format_columns.DictColumn( - volume_type._info.pop('extra_specs', {})) + volume_type._info.pop('extra_specs', {}) + ) volume_type._info.update({'properties': properties}) access_project_ids = None if not volume_type.is_public: try: volume_type_access = volume_client.volume_type_access.list( - volume_type.id) - project_ids = [utils.get_field(item, 'project_id') - for item in volume_type_access] + volume_type.id + ) + project_ids = [ + utils.get_field(item, 'project_id') + for item in volume_type_access + ] # TODO(Rui Chen): This format list case can be removed after # patch https://review.opendev.org/#/c/330223/ merged. access_project_ids = format_columns.ListColumn(project_ids) except Exception as e: - msg = _('Failed to get access project list for volume type ' - '%(type)s: %(e)s') + msg = _( + 'Failed to get access project list for volume type ' + '%(type)s: %(e)s' + ) LOG.error(msg % {'type': volume_type.id, 'e': e}) volume_type._info.update({'access_project_ids': access_project_ids}) if parsed_args.encryption_type: # show encryption type information for this volume type try: encryption = volume_client.volume_encryption_types.get( - volume_type.id) + volume_type.id + ) encryption._info.pop("volume_type_id", None) volume_type._info.update( - {'encryption': - format_columns.DictColumn(encryption._info)}) + {'encryption': format_columns.DictColumn(encryption._info)} + ) except Exception as e: - LOG.error(_("Failed to display the encryption information " - "of this volume type: %s"), e) + LOG.error( + _( + "Failed to display the encryption information " + "of this volume type: %s" + ), + e, + ) volume_type._info.pop("os-volume-type-access:is_public", None) return zip(*sorted(volume_type._info.items())) @@ -559,7 +791,7 @@ class UnsetVolumeType(command.Command): _description = _("Unset volume type properties") def get_parser(self, prog_name): - parser = super(UnsetVolumeType, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'volume_type', metavar='', @@ -569,21 +801,27 @@ def get_parser(self, prog_name): '--property', metavar='', action='append', - help=_('Remove a property from this volume type ' - '(repeat option to remove multiple properties)'), + dest='properties', + help=_( + 'Remove a property from this volume type ' + '(repeat option to remove multiple properties)' + ), ) parser.add_argument( '--project', metavar='', - help=_('Removes volume type access to project (name or ID) ' - '(admin only)'), + help=_( + 'Removes volume type access to project (name or ID) ' + '(admin only)' + ), ) identity_common.add_project_domain_option_to_parser(parser) parser.add_argument( "--encryption-type", action="store_true", - help=_("Remove the encryption type for this volume type " - "(admin only)"), + help=_( + "Remove the encryption type for this volume type (admin only)" + ), ) return parser @@ -597,11 +835,11 @@ def take_action(self, parsed_args): ) result = 0 - if parsed_args.property: + if parsed_args.properties: try: - volume_type.unset_keys(parsed_args.property) + volume_type.unset_keys(parsed_args.properties) except Exception as e: - LOG.error(_("Failed to unset volume type property: %s"), e) + LOG.error(_("Failed to unset volume type properties: %s"), e) result += 1 if parsed_args.project: @@ -610,22 +848,32 @@ def take_action(self, parsed_args): project_info = identity_common.find_project( identity_client, parsed_args.project, - parsed_args.project_domain) + parsed_args.project_domain, + ) volume_client.volume_type_access.remove_project_access( - volume_type.id, project_info.id) + volume_type.id, project_info.id + ) except Exception as e: - LOG.error(_("Failed to remove volume type access from " - "project: %s"), e) + LOG.error( + _("Failed to remove volume type access from project: %s"), + e, + ) result += 1 if parsed_args.encryption_type: try: volume_client.volume_encryption_types.delete(volume_type) except Exception as e: - LOG.error(_("Failed to remove the encryption type for this " - "volume type: %s"), e) + LOG.error( + _( + "Failed to remove the encryption type for this " + "volume type: %s" + ), + e, + ) result += 1 if result > 0: - raise exceptions.CommandError(_("Command Failed: One or more of" - " the operations failed")) + raise exceptions.CommandError( + _("Command Failed: One or more of the operations failed") + ) diff --git a/openstackclient/volume/v3/block_storage_cleanup.py b/openstackclient/volume/v3/block_storage_cleanup.py index f99b821777..5208504a35 100644 --- a/openstackclient/volume/v3/block_storage_cleanup.py +++ b/openstackclient/volume/v3/block_storage_cleanup.py @@ -11,9 +11,9 @@ # under the License. from cinderclient import api_versions -from osc_lib.command import command from osc_lib import exceptions +from openstackclient import command from openstackclient.i18n import _ @@ -31,8 +31,13 @@ def _format_cleanup_response(cleaning, unavailable): combined_data.append(details) for obj in unavailable: - details = (obj.id, obj.cluster_name, obj.host, obj.binary, - 'Unavailable') + details = ( + obj.id, + obj.cluster_name, + obj.host, + obj.binary, + 'Unavailable', + ) combined_data.append(details) return (column_headers, combined_data) @@ -49,20 +54,22 @@ def get_parser(self, prog_name): parser.add_argument( '--cluster', metavar='', - help=_('Name of block storage cluster in which cleanup needs ' - 'to be performed (name only)') + help=_( + 'Name of block storage cluster in which cleanup needs ' + 'to be performed (name only)' + ), ) parser.add_argument( "--host", metavar="", default=None, - help=_("Host where the service resides. (name only)") + help=_("Host where the service resides. (name only)"), ) parser.add_argument( '--binary', metavar='', default=None, - help=_("Name of the service binary.") + help=_("Name of the service binary."), ) service_up_parser = parser.add_mutually_exclusive_group() service_up_parser.add_argument( @@ -72,7 +79,7 @@ def get_parser(self, prog_name): default=None, help=_( 'Filter by up status. If this is set, services need to be up.' - ) + ), ) service_up_parser.add_argument( '--down', @@ -81,7 +88,7 @@ def get_parser(self, prog_name): help=_( 'Filter by down status. If this is set, services need to be ' 'down.' - ) + ), ) service_disabled_parser = parser.add_mutually_exclusive_group() service_disabled_parser.add_argument( @@ -89,25 +96,25 @@ def get_parser(self, prog_name): dest='disabled', action='store_true', default=None, - help=_('Filter by disabled status.') + help=_('Filter by disabled status.'), ) service_disabled_parser.add_argument( '--enabled', dest='disabled', action='store_false', - help=_('Filter by enabled status.') + help=_('Filter by enabled status.'), ) parser.add_argument( '--resource-id', metavar='', default=None, - help=_('UUID of a resource to cleanup.') + help=_('UUID of a resource to cleanup.'), ) parser.add_argument( '--resource-type', metavar='', choices=('Volume', 'Snapshot'), - help=_('Type of resource to cleanup.') + help=_('Type of resource to cleanup.'), ) parser.add_argument( '--service-id', @@ -116,7 +123,7 @@ def get_parser(self, prog_name): help=_( 'The service ID field from the DB, not the UUID of the ' 'service.' - ) + ), ) return parser @@ -138,7 +145,7 @@ def take_action(self, parsed_args): 'disabled': parsed_args.disabled, 'resource_id': parsed_args.resource_id, 'resource_type': parsed_args.resource_type, - 'service_id': parsed_args.service_id + 'service_id': parsed_args.service_id, } filters = {k: v for k, v in filters.items() if v is not None} diff --git a/openstackclient/volume/v3/block_storage_cluster.py b/openstackclient/volume/v3/block_storage_cluster.py index 34b25efcfd..d99ec52b0a 100644 --- a/openstackclient/volume/v3/block_storage_cluster.py +++ b/openstackclient/volume/v3/block_storage_cluster.py @@ -11,21 +11,21 @@ # under the License. from cinderclient import api_versions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ def _format_cluster(cluster, detailed=False): - columns = ( + columns: tuple[str, ...] = ( 'name', 'binary', 'state', 'status', ) - column_headers = ( + column_headers: tuple[str, ...] = ( 'Name', 'Binary', 'State', @@ -76,7 +76,9 @@ class ListBlockStorageCluster(command.Lister): def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( - '--cluster', metavar='', default=None, + '--cluster', + metavar='', + default=None, help=_( 'Filter by cluster name, without backend will list ' 'all clustered services from the same cluster.' @@ -131,7 +133,7 @@ def get_parser(self, prog_name): '--long', action='store_true', default=False, - help=_("List additional fields in output") + help=_("List additional fields in output"), ) return parser @@ -145,7 +147,7 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) - columns = ('Name', 'Binary', 'State', 'Status') + columns: tuple[str, ...] = ('Name', 'Binary', 'State', 'Status') if parsed_args.long: columns += ( 'Num Hosts', @@ -183,7 +185,7 @@ def get_parser(self, prog_name): parser.add_argument( 'cluster', metavar='', - help=_('Name of block storage cluster to update (name only)') + help=_('Name of block storage cluster to update (name only)'), ) parser.add_argument( '--binary', @@ -192,7 +194,7 @@ def get_parser(self, prog_name): help=_( "Name of binary to filter by; defaults to 'cinder-volume' " "(optional)" - ) + ), ) enabled_group = parser.add_mutually_exclusive_group() enabled_group.add_argument( @@ -200,13 +202,13 @@ def get_parser(self, prog_name): action='store_false', dest='disabled', default=None, - help=_('Enable cluster') + help=_('Enable cluster'), ) enabled_group.add_argument( '--disable', action='store_true', dest='disabled', - help=_('Disable cluster') + help=_('Disable cluster'), ) parser.add_argument( '--disable-reason', @@ -215,7 +217,7 @@ def get_parser(self, prog_name): help=_( 'Reason for disabling the cluster ' '(should be used with --disable option)' - ) + ), ) return parser diff --git a/openstackclient/volume/v3/block_storage_log_level.py b/openstackclient/volume/v3/block_storage_log_level.py index d5286cddde..2e2fdc5138 100644 --- a/openstackclient/volume/v3/block_storage_log_level.py +++ b/openstackclient/volume/v3/block_storage_log_level.py @@ -14,11 +14,10 @@ """Block Storage Service action implementations""" -from cinderclient import api_versions -from osc_lib.command import command +from openstack import utils as sdk_utils from osc_lib import exceptions -from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -33,34 +32,39 @@ def get_parser(self, prog_name): parser.add_argument( "--host", metavar="", - default="", - help=_("List block storage service log level of specified host " - "(name only)") + default=None, + help=_( + "List block storage service log level of specified host " + "(name only)" + ), ) parser.add_argument( "--service", metavar="", - default="", + default=None, choices=( - '', + None, '*', 'cinder-api', 'cinder-volume', 'cinder-scheduler', - 'cinder-backup'), - help=_("List block storage service log level of the specified " - "service (name only)") + 'cinder-backup', + ), + help=_( + "List block storage service log level of the specified " + "service (name only)" + ), ) parser.add_argument( "--log-prefix", metavar="", - default="", - help="Prefix for the log, e.g. 'sqlalchemy'" + default=None, + help="Prefix for the log, e.g. 'sqlalchemy'", ) return parser def take_action(self, parsed_args): - service_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume columns = [ "Binary", "Host", @@ -68,22 +72,24 @@ def take_action(self, parsed_args): "Level", ] - if service_client.api_version < api_versions.APIVersion('3.32'): + if not sdk_utils.supports_microversion(volume_client, '3.32'): msg = _( "--os-volume-api-version 3.32 or greater is required to " "support the 'block storage log level list' command" ) raise exceptions.CommandError(msg) - data = service_client.services.get_log_levels( + data = [] + for entry in volume_client.get_service_log_levels( binary=parsed_args.service, server=parsed_args.host, - prefix=parsed_args.log_prefix) + prefix=parsed_args.log_prefix, + ): + entry_levels = sorted(entry.levels.items(), key=lambda x: x[0]) + for prefix, level in entry_levels: + data.append((entry.binary, entry.host, prefix, level)) - return (columns, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return (columns, data) class BlockStorageLogLevelSet(command.Command): @@ -99,49 +105,55 @@ def get_parser(self, prog_name): metavar="", choices=('INFO', 'WARNING', 'ERROR', 'DEBUG'), type=str.upper, - help=_("Desired log level.") + help=_("Desired log level"), ) parser.add_argument( "--host", metavar="", - default="", - help=_("Set block storage service log level of specified host " - "(name only)") + default=None, + help=_( + "Set block storage service log level of specified host " + "(name only)" + ), ) parser.add_argument( "--service", metavar="", - default="", + default=None, choices=( - '', + None, '*', 'cinder-api', 'cinder-volume', 'cinder-scheduler', - 'cinder-backup'), - help=_("Set block storage service log level of specified service " - "(name only)") + 'cinder-backup', + ), + help=_( + "Set block storage service log level of specified service " + "(name only)" + ), ) parser.add_argument( "--log-prefix", metavar="", - default="", - help="Prefix for the log, e.g. 'sqlalchemy'" + default=None, + help="Prefix for the log, e.g. 'sqlalchemy'", ) return parser def take_action(self, parsed_args): - service_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if service_client.api_version < api_versions.APIVersion('3.32'): + if not sdk_utils.supports_microversion(volume_client, '3.32'): msg = _( "--os-volume-api-version 3.32 or greater is required to " "support the 'block storage log level set' command" ) raise exceptions.CommandError(msg) - service_client.services.set_log_levels( + volume_client.set_service_log_levels( level=parsed_args.level, binary=parsed_args.service, server=parsed_args.host, - prefix=parsed_args.log_prefix) + prefix=parsed_args.log_prefix, + ) diff --git a/openstackclient/volume/v3/block_storage_manage.py b/openstackclient/volume/v3/block_storage_manage.py index 9015f44d60..78756385cc 100644 --- a/openstackclient/volume/v3/block_storage_manage.py +++ b/openstackclient/volume/v3/block_storage_manage.py @@ -13,12 +13,13 @@ """Block Storage Volume/Snapshot Management implementations""" +import argparse + from cinderclient import api_versions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils -from oslo_utils import strutils +from openstackclient import command from openstackclient.i18n import _ @@ -38,50 +39,68 @@ def get_parser(self, prog_name): "host", metavar="", nargs='?', - help=_('Cinder host on which to list manageable volumes. ' - 'Takes the form: host@backend-name#pool') + help=_( + 'Cinder host on which to list manageable volumes. ' + 'Takes the form: host@backend-name#pool' + ), ) host_group.add_argument( "--cluster", metavar="", - help=_('Cinder cluster on which to list manageable volumes. ' - 'Takes the form: cluster@backend-name#pool. ' - '(supported by --os-volume-api-version 3.17 or later)') + help=_( + 'Cinder cluster on which to list manageable volumes. ' + 'Takes the form: cluster@backend-name#pool. ' + '(supported by --os-volume-api-version 3.17 or later)' + ), + ) + parser.add_argument( + '--long', + action='store_true', + default=False, + help=_('List additional fields in output'), ) + # TODO(stephenfin): Remove this in a future major version bump parser.add_argument( '--detailed', metavar='', - default=True, - help=_('Returns detailed information (Default=True).') + default=None, + help=argparse.SUPPRESS, ) parser.add_argument( '--marker', metavar='', default=None, - help=_('Begin returning volumes that appear later in the volume ' - 'list than that represented by this reference. This ' - 'reference should be json like. Default=None.') + help=_( + 'Begin returning volumes that appear later in the volume ' + 'list than that represented by this reference. This ' + 'reference should be json like. Default=None.' + ), ) parser.add_argument( '--limit', metavar='', default=None, - help=_('Maximum number of volumes to return. Default=None.') + help=_('Maximum number of volumes to return. Default=None.'), ) parser.add_argument( '--offset', metavar='', default=None, - help=_('Number of volumes to skip after marker. Default=None.') + help=_('Number of volumes to skip after marker. Default=None.'), ) parser.add_argument( '--sort', metavar='[:]', default=None, - help=(_('Comma-separated list of sort keys and directions in the ' + help=( + _( + 'Comma-separated list of sort keys and directions in the ' 'form of [:]. ' 'Valid keys: %s. ' - 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES)) + 'Default=None.' + ) + % ', '.join(SORT_MANAGEABLE_KEY_VALUES) + ), ) return parser @@ -110,8 +129,30 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) - detailed = strutils.bool_from_string(parsed_args.detailed) - cluster = getattr(parsed_args, 'cluster', None) + detailed = parsed_args.long + if parsed_args.detailed is not None: + detailed = parsed_args.detailed.lower().strip() in { + '1', + 't', + 'true', + 'on', + 'y', + 'yes', + } + if detailed: + # if the user requested e.g. '--detailed true' then they should + # not request '--long' + msg = _( + "The --detailed option has been deprecated. " + "Use --long instead." + ) + self.log.warning(msg) + else: + # if the user requested e.g. '--detailed false' then they + # should simply stop requesting this since the default has + # changed + msg = _("The --detailed option has been deprecated. Unset it.") + self.log.warning(msg) columns = [ 'reference', @@ -119,11 +160,13 @@ def take_action(self, parsed_args): 'safe_to_manage', ] if detailed: - columns.extend([ - 'reason_not_safe', - 'cinder_id', - 'extra_info', - ]) + columns.extend( + [ + 'reason_not_safe', + 'cinder_id', + 'extra_info', + ] + ) data = volume_client.volumes.list_manageable( host=parsed_args.host, @@ -132,12 +175,19 @@ def take_action(self, parsed_args): limit=parsed_args.limit, offset=parsed_args.offset, sort=parsed_args.sort, - cluster=cluster) + cluster=parsed_args.cluster, + ) - return (columns, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + columns, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) class BlockStorageManageSnapshots(command.Lister): @@ -153,50 +203,68 @@ def get_parser(self, prog_name): "host", metavar="", nargs='?', - help=_('Cinder host on which to list manageable snapshots. ' - 'Takes the form: host@backend-name#pool') + help=_( + 'Cinder host on which to list manageable snapshots. ' + 'Takes the form: host@backend-name#pool' + ), ) host_group.add_argument( "--cluster", metavar="", - help=_('Cinder cluster on which to list manageable snapshots. ' - 'Takes the form: cluster@backend-name#pool. ' - '(supported by --os-volume-api-version 3.17 or later)') + help=_( + 'Cinder cluster on which to list manageable snapshots. ' + 'Takes the form: cluster@backend-name#pool. ' + '(supported by --os-volume-api-version 3.17 or later)' + ), ) + parser.add_argument( + '--long', + action='store_true', + default=False, + help=_('List additional fields in output'), + ) + # TODO(stephenfin): Remove this in a future major version bump parser.add_argument( '--detailed', metavar='', - default=True, - help=_('Returns detailed information (Default=True).') + default=None, + help=argparse.SUPPRESS, ) parser.add_argument( '--marker', metavar='', default=None, - help=_('Begin returning snapshots that appear later in the ' - 'snapshot list than that represented by this reference. ' - 'This reference should be json like. Default=None.') + help=_( + 'Begin returning snapshots that appear later in the ' + 'snapshot list than that represented by this reference. ' + 'This reference should be json like. Default=None.' + ), ) parser.add_argument( '--limit', metavar='', default=None, - help=_('Maximum number of snapshots to return. Default=None.') + help=_('Maximum number of snapshots to return. Default=None.'), ) parser.add_argument( '--offset', metavar='', default=None, - help=_('Number of snapshots to skip after marker. Default=None.') + help=_('Number of snapshots to skip after marker. Default=None.'), ) parser.add_argument( '--sort', metavar='[:]', default=None, - help=(_('Comma-separated list of sort keys and directions in the ' + help=( + _( + 'Comma-separated list of sort keys and directions in the ' 'form of [:]. ' 'Valid keys: %s. ' - 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES)) + 'Default=None.' + ) + % ', '.join(SORT_MANAGEABLE_KEY_VALUES) + ), ) return parser @@ -227,8 +295,30 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) - detailed = strutils.bool_from_string(parsed_args.detailed) - cluster = getattr(parsed_args, 'cluster', None) + detailed = parsed_args.long + if parsed_args.detailed is not None: + detailed = parsed_args.detailed.lower().strip() in { + '1', + 't', + 'true', + 'on', + 'y', + 'yes', + } + if detailed: + # if the user requested e.g. '--detailed true' then they should + # not request '--long' + msg = _( + "The --detailed option has been deprecated. " + "Use --long instead." + ) + self.log.warning(msg) + else: + # if the user requested e.g. '--detailed false' then they + # should simply stop requesting this since the default has + # changed + msg = _("The --detailed option has been deprecated. Unset it.") + self.log.warning(msg) columns = [ 'reference', @@ -237,11 +327,13 @@ def take_action(self, parsed_args): 'source_reference', ] if detailed: - columns.extend([ - 'reason_not_safe', - 'cinder_id', - 'extra_info', - ]) + columns.extend( + [ + 'reason_not_safe', + 'cinder_id', + 'extra_info', + ] + ) data = volume_client.volume_snapshots.list_manageable( host=parsed_args.host, @@ -250,9 +342,16 @@ def take_action(self, parsed_args): limit=parsed_args.limit, offset=parsed_args.offset, sort=parsed_args.sort, - cluster=cluster) + cluster=parsed_args.cluster, + ) - return (columns, - (utils.get_item_properties( - s, columns, - ) for s in data)) + return ( + columns, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) diff --git a/openstackclient/volume/v3/block_storage_resource_filter.py b/openstackclient/volume/v3/block_storage_resource_filter.py index 4bcacf90c2..fc564386e2 100644 --- a/openstackclient/volume/v3/block_storage_resource_filter.py +++ b/openstackclient/volume/v3/block_storage_resource_filter.py @@ -12,11 +12,12 @@ """Volume V3 Resource Filters implementations""" -from cinderclient import api_versions -from osc_lib.command import command +from openstack import utils as sdk_utils +from osc_lib.cli import format_columns from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -24,9 +25,9 @@ class ListBlockStorageResourceFilter(command.Lister): _description = _('List block storage resource filters') def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.33'): + if not sdk_utils.supports_microversion(volume_client, '3.33'): msg = _( "--os-volume-api-version 3.33 or greater is required to " "support the 'block storage resource filter list' command" @@ -37,12 +38,20 @@ def take_action(self, parsed_args): 'Resource', 'Filters', ) + columns = ( + 'resource', + 'filters', + ) - data = volume_client.resource_filters.list() + data = volume_client.resource_filters() + formatters = {'filters': format_columns.ListColumn} return ( column_headers, - (utils.get_item_properties(s, column_headers) for s in data) + ( + utils.get_item_properties(s, columns, formatters=formatters) + for s in data + ), ) @@ -54,24 +63,22 @@ def get_parser(self, prog_name): parser.add_argument( 'resource', metavar='', - help=_('Resource to show filters for (name).') + help=_('Resource to show filters for (name).'), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.33'): + if not sdk_utils.supports_microversion(volume_client, '3.33'): msg = _( "--os-volume-api-version 3.33 or greater is required to " "support the 'block storage resource filter show' command" ) raise exceptions.CommandError(msg) - data = volume_client.resource_filters.list( - resource=parsed_args.resource - ) + data = volume_client.resource_filters(resource=parsed_args.resource) if not data: msg = _( "No resource filter with a name of {parsed_args.resource}' " @@ -80,4 +87,19 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) resource_filter = next(data) - return zip(*sorted(resource_filter._info.items())) + column_headers = ( + 'Resource', + 'Filters', + ) + columns = ( + 'resource', + 'filters', + ) + formatters = {'filters': format_columns.ListColumn} + + return ( + column_headers, + utils.get_dict_properties( + resource_filter, columns, formatters=formatters + ), + ) diff --git a/openstackclient/volume/v3/service.py b/openstackclient/volume/v3/service.py new file mode 100644 index 0000000000..eecd8e0d06 --- /dev/null +++ b/openstackclient/volume/v3/service.py @@ -0,0 +1,146 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Service action implementations""" + +from openstack import utils as sdk_utils +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ + + +class ListService(command.Lister): + _description = _("List service command") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "--host", + metavar="", + help=_("List services on specified host (name only)"), + ) + parser.add_argument( + "--service", + metavar="", + help=_("List only specified service (name only)"), + ) + parser.add_argument( + "--long", + action="store_true", + default=False, + help=_("List additional fields in output"), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + columns: tuple[str, ...] = ( + "binary", + "host", + "availability_zone", + "status", + "state", + "updated_at", + ) + column_names: tuple[str, ...] = ( + "Binary", + "Host", + "Zone", + "Status", + "State", + "Updated At", + ) + + if sdk_utils.supports_microversion(volume_client, '3.7'): + columns += ("cluster",) + column_names += ("Cluster",) + if sdk_utils.supports_microversion(volume_client, '3.49'): + columns += ("backend_state",) + column_names += ("Backend State",) + if parsed_args.long: + columns += ("disabled_reason",) + column_names += ("Disabled Reason",) + + data = volume_client.services( + host=parsed_args.host, binary=parsed_args.service + ) + return ( + column_names, + ( + utils.get_item_properties( + s, + columns, + ) + for s in data + ), + ) + + +class SetService(command.Command): + _description = _("Set volume service properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "host", + metavar="", + help=_("Name of host"), + ) + parser.add_argument( + "service", + metavar="", + help=_("Name of service (Binary name)"), + ) + enabled_group = parser.add_mutually_exclusive_group() + enabled_group.add_argument( + "--enable", action="store_true", help=_("Enable volume service") + ) + enabled_group.add_argument( + "--disable", action="store_true", help=_("Disable volume service") + ) + parser.add_argument( + "--disable-reason", + metavar="", + help=_( + "Reason for disabling the service " + "(should be used with --disable option)" + ), + ) + return parser + + def take_action(self, parsed_args): + if parsed_args.disable_reason and not parsed_args.disable: + msg = _( + "Cannot specify option --disable-reason without " + "--disable specified." + ) + raise exceptions.CommandError(msg) + + volume_client = self.app.client_manager.sdk_connection.volume + + service = volume_client.find_service( + parsed_args.service, ignore_missing=False, host=parsed_args.host + ) + + if parsed_args.enable: + service.enable(volume_client) + + if parsed_args.disable: + service.disable( + volume_client, + reason=parsed_args.disable_reason, + ) diff --git a/openstackclient/volume/v3/volume.py b/openstackclient/volume/v3/volume.py index 4b159688e8..50ea77fb5a 100644 --- a/openstackclient/volume/v3/volume.py +++ b/openstackclient/volume/v3/volume.py @@ -14,38 +14,1195 @@ """Volume V3 Volume action implementations""" +import argparse +import copy +import functools import logging +import typing as ty -from cinderclient import api_versions +from cliff import columns as cliff_columns +from openstack.block_storage.v3 import volume as _volume +from openstack import exceptions as sdk_exceptions +from openstack import utils as sdk_utils from osc_lib.cli import format_columns -from osc_lib.command import command +from osc_lib.cli import parseractions from osc_lib import exceptions from osc_lib import utils +from openstackclient.api import volume_v3 +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ +from openstackclient.identity import common as identity_common LOG = logging.getLogger(__name__) -class VolumeSummary(command.ShowOne): - _description = _("Show a summary of all volumes in this deployment.") +class KeyValueHintAction(argparse.Action): + """Uses KeyValueAction or KeyValueAppendAction based on the given key""" + + APPEND_KEYS = ('same_host', 'different_host') + + def __init__(self, *args, **kwargs): + self._key_value_action = parseractions.KeyValueAction(*args, **kwargs) + self._key_value_append_action = parseractions.KeyValueAppendAction( + *args, **kwargs + ) + super().__init__(*args, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + if values.startswith(self.APPEND_KEYS): + self._key_value_append_action( + parser, namespace, values, option_string=option_string + ) + else: + self._key_value_action( + parser, namespace, values, option_string=option_string + ) + + +class AttachmentsColumn(cliff_columns.FormattableColumn[list[ty.Any]]): + """Formattable column for attachments column. + + Unlike the parent FormattableColumn class, the initializer of the + class takes server_cache as the second argument. + osc_lib.utils.get_item_properties instantiate cliff FormattableColumn + object with a single parameter "column value", so you need to pass + a partially initialized class like + ``functools.partial(AttachmentsColumn, server_cache)``. + """ + + def __init__(self, value, server_cache=None): + super().__init__(value) + self._server_cache = server_cache or {} + + def human_readable(self): + """Return a formatted string of a volume's attached instances + + :rtype: a string of formatted instances + """ + + msg = '' + for attachment in self._value: + server = attachment['server_id'] + if server in self._server_cache.keys(): + server = self._server_cache[server].name + device = attachment['device'] + msg += f'Attached to {server} on {device} ' + return msg + + +def _format_volume(volume: _volume.Volume) -> dict[str, ty.Any]: + # Some columns returned by openstacksdk should not be shown because they're + # either irrelevant or duplicates + ignored_columns = { + # computed columns + 'location', + # create-only columns + 'OS-SCH-HNT:scheduler_hints', + 'imageRef', + # removed columns + 'os-volume-replication:driver_data', + 'os-volume-replication:extended_status', + # unnecessary columns + 'links', + } + optional_columns = { + # only present if part of a consistency group + 'consistencygroup_id', + # only present if the volume is encrypted + 'encryption_key_id', + # only present if there are image properties associated + 'volume_image_metadata', + } + + info = volume.to_dict(original_names=True) + data = {} + for key, value in info.items(): + if key in ignored_columns: + continue + + if key in optional_columns: + if info[key] is None: + continue + + data[key] = value + + data.update( + { + 'properties': format_columns.DictColumn(data.pop('metadata')), + 'type': data.pop('volume_type'), + } + ) + + return data + + +class CreateVolume(command.ShowOne): + _description = _("Create new volume") + + @staticmethod + def _check_size_arg(args): + """Check whether --size option is required or not. + + Require size parameter in case if any of the following is not + specified: + + * snapshot + * source volume + * backup + * remote source (volume to be managed) + """ + + if ( + args.snapshot or args.source or args.backup or args.remote_source + ) is None and args.size is None: + msg = _( + "--size is a required option if none of --snapshot, " + "--backup, --source, or --remote-source are provided." + ) + raise exceptions.CommandError(msg) + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "name", + metavar="", + nargs="?", + help=_("Volume name"), + ) + parser.add_argument( + "--size", + metavar="", + type=int, + help=_( + "Volume size in GB (required unless --snapshot or " + "--source specified)" + ), + ) + parser.add_argument( + "--type", + metavar="", + help=_("Set the type of volume"), + ) + source_group = parser.add_mutually_exclusive_group() + source_group.add_argument( + "--image", + metavar="", + help=_("Use as source of volume (name or ID)"), + ) + source_group.add_argument( + "--snapshot", + metavar="", + help=_("Use as source of volume (name or ID)"), + ) + source_group.add_argument( + "--source", + metavar="", + help=_("Volume to clone (name or ID)"), + ) + source_group.add_argument( + "--source-replicated", + metavar="", + help=argparse.SUPPRESS, + ) + source_group.add_argument( + "--backup", + metavar="", + help=_( + "Restore backup to a volume (name or ID) " + "(supported by --os-volume-api-version 3.47 or later)" + ), + ) + source_group.add_argument( + "--remote-source", + metavar="", + action=parseractions.KeyValueAction, + help=_( + "The attribute(s) of the existing remote volume " + "(admin required) (repeat option to specify multiple " + "attributes, e.g.: '--remote-source source-name=test_name " + "--remote-source source-id=test_id')" + ), + ) + parser.add_argument( + "--description", + metavar="", + help=_("Volume description"), + ) + parser.add_argument( + "--availability-zone", + metavar="", + help=_("Create volume in "), + ) + parser.add_argument( + "--consistency-group", + metavar="consistency-group>", + help=_("Consistency group where the new volume belongs to"), + ) + parser.add_argument( + "--property", + metavar="", + action=parseractions.KeyValueAction, + dest="properties", + help=_( + "Set a property to this volume " + "(repeat option to set multiple properties)" + ), + ) + parser.add_argument( + "--hint", + metavar="", + action=KeyValueHintAction, + help=_( + "Arbitrary scheduler hint key-value pairs to help creating " + "a volume. Repeat the option to set multiple hints. " + "'same_host' and 'different_host' get values appended when " + "repeated, all other keys take the last given value" + ), + ) + bootable_group = parser.add_mutually_exclusive_group() + bootable_group.add_argument( + "--bootable", + action="store_true", + dest="bootable", + default=None, + help=_("Mark volume as bootable"), + ) + bootable_group.add_argument( + "--non-bootable", + action="store_false", + dest="bootable", + default=None, + help=_("Mark volume as non-bootable (default)"), + ) + readonly_group = parser.add_mutually_exclusive_group() + readonly_group.add_argument( + "--read-only", + action="store_true", + dest="read_only", + default=None, + help=_("Set volume to read-only access mode"), + ) + readonly_group.add_argument( + "--read-write", + action="store_false", + dest="read_only", + default=None, + help=_("Set volume to read-write access mode (default)"), + ) + parser.add_argument( + "--host", + metavar="", + help=_( + "Cinder host on which the existing volume resides; " + "takes the form: host@backend-name#pool. This is only " + "used along with the --remote-source option." + ), + ) + parser.add_argument( + "--cluster", + metavar="", + help=_( + "Cinder cluster on which the existing volume resides; " + "takes the form: cluster@backend-name#pool. This is only " + "used along with the --remote-source option. " + "(supported by --os-volume-api-version 3.16 or above)" + ), + ) + return parser + + def take_action(self, parsed_args): + self._check_size_arg(parsed_args) + # size is validated in the above call to + # _check_size_arg where we check that size + # should be passed if we are not creating a + # volume from snapshot, backup or source volume + size = parsed_args.size + + volume_client = self.app.client_manager.sdk_connection.volume + image_client = self.app.client_manager.image + + if ( + parsed_args.host or parsed_args.cluster + ) and not parsed_args.remote_source: + msg = _( + "The --host and --cluster options are only supported " + "with --remote-source parameter." + ) + raise exceptions.CommandError(msg) + + if parsed_args.backup and not sdk_utils.supports_microversion( + volume_client, '3.47' + ): + msg = _( + "--os-volume-api-version 3.47 or greater is required " + "to create a volume from backup." + ) + raise exceptions.CommandError(msg) + + if parsed_args.remote_source: + if ( + parsed_args.size + or parsed_args.consistency_group + or parsed_args.hint + or parsed_args.read_only is not None + ): + msg = _( + "The --size, --consistency-group, --hint, --read-only " + "and --read-write options are not supported with the " + "--remote-source parameter." + ) + raise exceptions.CommandError(msg) + if parsed_args.cluster: + if not sdk_utils.supports_microversion(volume_client, '3.16'): + msg = _( + "--os-volume-api-version 3.16 or greater is required " + "to support the cluster parameter." + ) + raise exceptions.CommandError(msg) + if parsed_args.cluster and parsed_args.host: + msg = _( + "Only one of --host or --cluster needs to be specified " + "to manage a volume." + ) + raise exceptions.CommandError(msg) + if not parsed_args.cluster and not parsed_args.host: + msg = _( + "One of --host or --cluster needs to be specified to " + "manage a volume." + ) + raise exceptions.CommandError(msg) + volume = volume_client.manage_volume( + host=parsed_args.host, + cluster=parsed_args.cluster, + ref=parsed_args.remote_source, + name=parsed_args.name, + description=parsed_args.description, + volume_type=parsed_args.type, + availability_zone=parsed_args.availability_zone, + metadata=parsed_args.properties, + bootable=parsed_args.bootable, + ) + data = _format_volume(volume) + return zip(*sorted(data.items())) + + source_volume = None + if parsed_args.source: + source_volume_obj = volume_client.find_volume( + parsed_args.source, ignore_missing=False + ) + source_volume = source_volume_obj.id + size = max(size or 0, source_volume_obj.size) + + consistency_group = None + if parsed_args.consistency_group: + consistency_group = volume_v3.find_consistency_group( + volume_client, parsed_args.consistency_group + )['id'] + + image = None + if parsed_args.image: + image = image_client.find_image( + parsed_args.image, ignore_missing=False + ).id + + snapshot = None + if parsed_args.snapshot: + snapshot_obj = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) + snapshot = snapshot_obj.id + # Cinder requires a value for size when creating a volume + # even if creating from a snapshot. Cinder will create the + # volume with at least the same size as the snapshot anyway, + # so since we have the object here, just override the size + # value if it's either not given or is smaller than the + # snapshot size. + size = max(size or 0, snapshot_obj.size) + + backup = None + if parsed_args.backup: + backup_obj = volume_client.find_backup( + parsed_args.backup, ignore_missing=False + ) + backup = backup_obj.id + # As above + size = max(size or 0, backup_obj.size) + + volume = volume_client.create_volume( + size=size, + snapshot_id=snapshot, + name=parsed_args.name, + description=parsed_args.description, + volume_type=parsed_args.type, + availability_zone=parsed_args.availability_zone, + metadata=parsed_args.properties, + image_id=image, + source_volume_id=source_volume, + consistency_group_id=consistency_group, + scheduler_hints=parsed_args.hint, + backup_id=backup, + ) + + if parsed_args.bootable is not None: + try: + if utils.wait_for_status( + volume_client.get_volume, + volume.id, + success_status=['available'], + error_status=['error'], + sleep_time=1, + ): + volume_client.set_volume_bootable_status( + volume, parsed_args.bootable + ) + else: + msg = _( + "Volume status is not available for setting boot state" + ) + raise exceptions.CommandError(msg) + except Exception as e: + LOG.error(_("Failed to set volume bootable property: %s"), e) + + if parsed_args.read_only is not None: + try: + if utils.wait_for_status( + volume_client.get_volume, + volume.id, + success_status=['available'], + error_status=['error'], + sleep_time=1, + ): + volume_client.set_volume_readonly( + volume, parsed_args.read_only + ) + else: + msg = _( + "Volume status is not available for setting it" + "read only." + ) + raise exceptions.CommandError(msg) + except Exception as e: + LOG.error( + _("Failed to set volume read-only access mode flag: %s"), + e, + ) + + data = _format_volume(volume) + return zip(*sorted(data.items())) + + +class DeleteVolume(command.Command): + _description = _("Delete volume(s)") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "volumes", + metavar="", + nargs="+", + help=_("Volume(s) to delete (name or ID)"), + ) + group = parser.add_mutually_exclusive_group() + group.add_argument( + "--force", + action="store_true", + help=_( + "Attempt forced removal of volume(s), regardless of state " + "(defaults to False)" + ), + ) + group.add_argument( + "--purge", + action="store_true", + help=_( + "Remove any snapshots along with volume(s) (defaults to False)" + ), + ) + parser.add_argument( + '--remote', + action='store_true', + help=_("Specify this parameter to unmanage a volume."), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + result = 0 + + if parsed_args.remote and (parsed_args.force or parsed_args.purge): + msg = _( + "The --force and --purge options are not " + "supported with the --remote parameter." + ) + raise exceptions.CommandError(msg) + + for volume in parsed_args.volumes: + try: + volume_obj = volume_client.find_volume( + volume, ignore_missing=False + ) + if parsed_args.remote: + volume_client.unmanage_volume(volume_obj.id) + else: + volume_client.delete_volume( + volume_obj.id, + force=parsed_args.force, + cascade=parsed_args.purge, + ) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete volume with " + "name or ID '%(volume)s': %(e)s" + ), + {'volume': volume, 'e': e}, + ) + + if result > 0: + total = len(parsed_args.volumes) + msg = _("%(result)s of %(total)s volumes failed to delete.") % { + 'result': result, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class ListVolume(command.Lister): + _description = _("List volumes") def get_parser(self, prog_name): parser = super().get_parser(prog_name) + parser.add_argument( + '--project', + metavar='', + help=_('Filter results by project (name or ID) (admin only)'), + ) + identity_common.add_project_domain_option_to_parser(parser) + parser.add_argument( + '--user', + metavar='', + help=_('Filter results by user (name or ID) (admin only)'), + ) + identity_common.add_user_domain_option_to_parser(parser) + parser.add_argument( + '--name', + metavar='', + help=_('Filter results by volume name'), + ) + parser.add_argument( + '--status', + metavar='', + help=_('Filter results by status'), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Filter by a property on the volume list ' + '(repeat option to filter by multiple properties) ' + ), + ) parser.add_argument( '--all-projects', action='store_true', default=False, help=_('Include all projects (admin only)'), ) + parser.add_argument( + '--long', + action='store_true', + default=False, + help=_('List additional fields in output'), + ) + pagination.add_marker_pagination_option_to_parser(parser) return parser def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + identity_client = self.app.client_manager.identity + + if parsed_args.long: + columns = [ + 'ID', + 'Name', + 'Status', + 'Size', + 'Volume Type', + 'Bootable', + 'Attachments', + 'Metadata', + ] + column_headers = copy.deepcopy(columns) + column_headers[4] = 'Type' + column_headers[6] = 'Attached to' + column_headers[7] = 'Properties' + else: + columns = [ + 'ID', + 'Name', + 'Status', + 'Size', + 'Attachments', + ] + column_headers = copy.deepcopy(columns) + column_headers[4] = 'Attached to' + + project_id = None + if parsed_args.project: + project_id = identity_common.find_project( + identity_client, + parsed_args.project, + parsed_args.project_domain, + ).id + + user_id = None + if parsed_args.user: + user_id = identity_common.find_user( + identity_client, parsed_args.user, parsed_args.user_domain + ).id + # set value of 'all_tenants' when using project option + all_projects = bool(parsed_args.project) or parsed_args.all_projects + + search_opts = { + 'all_tenants': all_projects, + 'project_id': project_id, + 'user_id': user_id, + 'name': parsed_args.name, + 'status': parsed_args.status, + 'metadata': parsed_args.properties, + } + + data = volume_client.volumes.list( + search_opts=search_opts, + marker=parsed_args.marker, + limit=parsed_args.limit, + ) + + do_server_list = False + + for vol in data: + if vol.status == 'in-use': + do_server_list = True + break + + # Cache the server list + server_cache = {} + if do_server_list: + try: + compute_client = self.app.client_manager.compute + for s in compute_client.servers(): + server_cache[s.id] = s + except sdk_exceptions.SDKException: # noqa: S110 + # Just forget it if there's any trouble + pass + AttachmentsColumnWithCache = functools.partial( + AttachmentsColumn, server_cache=server_cache + ) + + column_headers = utils.backward_compat_col_lister( + column_headers, parsed_args.columns, {'Display Name': 'Name'} + ) + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={ + 'Metadata': format_columns.DictColumn, + 'Attachments': AttachmentsColumnWithCache, + }, + ) + for s in data + ), + ) + + +class MigrateVolume(command.Command): + _description = _("Migrate volume to a new host") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'volume', + metavar="", + help=_("Volume to migrate (name or ID)"), + ) + parser.add_argument( + '--host', + metavar="", + required=True, + help=_( + "Destination host (takes the form: host@backend-name#pool)" + ), + ) + parser.add_argument( + '--force-host-copy', + action="store_true", + help=_( + "Enable generic host-based force-migration, " + "which bypasses driver optimizations" + ), + ) + parser.add_argument( + '--lock-volume', + action="store_true", + help=_( + "If specified, the volume state will be locked " + "and will not allow a migration to be aborted " + "(possibly by another operation)" + ), + ) + # TODO(stephenfin): Add --cluster argument + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + volume = volume_client.find_volume( + parsed_args.volume, ignore_missing=False + ) + volume_client.migrate_volume( + volume.id, + host=parsed_args.host, + force_host_copy=parsed_args.force_host_copy, + lock_volume=parsed_args.lock_volume, + ) + + +class SetVolume(command.Command): + _description = _("Set volume properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'volume', + metavar='', + help=_('Volume to modify (name or ID)'), + ) + parser.add_argument( + '--name', + metavar='', + help=_('New volume name'), + ) + parser.add_argument( + '--size', + metavar='', + type=int, + help=_('Extend volume size in GB'), + ) + parser.add_argument( + '--description', + metavar='', + help=_('New volume description'), + ) + parser.add_argument( + "--no-property", + dest="no_property", + action="store_true", + help=_( + "Remove all properties from " + "(specify both --no-property and --property to " + "remove the current properties before setting " + "new properties.)" + ), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Set a property on this volume ' + '(repeat option to set multiple properties)' + ), + ) + parser.add_argument( + '--image-property', + metavar='', + action=parseractions.KeyValueAction, + dest='image_properties', + help=_( + 'Set an image property on this volume ' + '(repeat option to set multiple image properties)' + ), + ) + parser.add_argument( + "--state", + metavar="", + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'in-use', + 'attaching', + 'detaching', + 'error_deleting', + 'maintenance', + ], + help=_( + 'New volume state ("available", "error", "creating", ' + '"deleting", "in-use", "attaching", "detaching", ' + '"error_deleting" or "maintenance") (admin only) ' + '(This option simply changes the state of the volume ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), + ) + attached_group = parser.add_mutually_exclusive_group() + attached_group.add_argument( + "--attached", + action="store_true", + help=_( + 'Set volume attachment status to "attached" ' + '(admin only) ' + '(This option simply changes the state of the volume ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), + ) + attached_group.add_argument( + "--detached", + action="store_true", + help=_( + 'Set volume attachment status to "detached" ' + '(admin only) ' + '(This option simply changes the state of the volume ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), + ) + parser.add_argument( + '--type', + metavar='', + help=_('New volume type (name or ID)'), + ) + parser.add_argument( + '--retype-policy', + metavar='', + choices=['never', 'on-demand'], + help=argparse.SUPPRESS, + ) + parser.add_argument( + '--migration-policy', + metavar='', + choices=['never', 'on-demand'], + help=_( + 'Migration policy while re-typing volume ' + '("never" or "on-demand", default is "never" ) ' + '(available only when --type option is specified)' + ), + ) + bootable_group = parser.add_mutually_exclusive_group() + bootable_group.add_argument( + "--bootable", + action="store_true", + dest="bootable", + default=None, + help=_("Mark volume as bootable"), + ) + bootable_group.add_argument( + "--non-bootable", + action="store_false", + dest="bootable", + default=None, + help=_("Mark volume as non-bootable"), + ) + readonly_group = parser.add_mutually_exclusive_group() + readonly_group.add_argument( + "--read-only", + action="store_true", + dest="read_only", + default=None, + help=_("Set volume to read-only access mode"), + ) + readonly_group.add_argument( + "--read-write", + action="store_false", + dest="read_only", + default=None, + help=_("Set volume to read-write access mode"), + ) + return parser + + def take_action(self, parsed_args): volume_client = self.app.client_manager.volume + volume = utils.find_resource(volume_client.volumes, parsed_args.volume) - if volume_client.api_version < api_versions.APIVersion('3.12'): + result = 0 + if parsed_args.retype_policy: + msg = _( + "The '--retype-policy' option has been deprecated in favor " + "of '--migration-policy' option. The '--retype-policy' option " + "will be removed in a future release. Please use " + "'--migration-policy' instead." + ) + self.log.warning(msg) + + if parsed_args.size: + try: + if parsed_args.size <= volume.size: + msg = ( + _("New size must be greater than %s GB") % volume.size + ) + raise exceptions.CommandError(msg) + if volume.status not in ('available', 'in-use'): + msg = ( + _( + "Volume is in %s state, it must be available " + "or in-use before size can be extended." + ) + % volume.status + ) + raise exceptions.CommandError(msg) + if ( + volume.status == 'in-use' + and not volume_client.api_version.matches('3.42') + ): + msg = _( + "--os-volume-api-version 3.42 or greater is " + "required to extend in-use volumes." + ) + raise exceptions.CommandError(msg) + volume_client.volumes.extend(volume.id, parsed_args.size) + except Exception as e: + LOG.error(_("Failed to set volume size: %s"), e) + result += 1 + + if parsed_args.no_property: + try: + volume_client.volumes.delete_metadata( + volume.id, volume.metadata.keys() + ) + except Exception as e: + LOG.error(_("Failed to clean volume properties: %s"), e) + result += 1 + + if parsed_args.properties: + try: + volume_client.volumes.set_metadata( + volume.id, parsed_args.properties + ) + except Exception as e: + LOG.error(_("Failed to set volume properties: %s"), e) + result += 1 + + if parsed_args.image_properties: + try: + volume_client.volumes.set_image_metadata( + volume.id, parsed_args.image_properties + ) + except Exception as e: + LOG.error(_("Failed to set image properties: %s"), e) + result += 1 + + if parsed_args.state: + try: + volume_client.volumes.reset_state(volume.id, parsed_args.state) + except Exception as e: + LOG.error(_("Failed to set volume state: %s"), e) + result += 1 + + if parsed_args.attached: + try: + volume_client.volumes.reset_state( + volume.id, state=None, attach_status="attached" + ) + except Exception as e: + LOG.error(_("Failed to set volume attach-status: %s"), e) + result += 1 + + if parsed_args.detached: + try: + volume_client.volumes.reset_state( + volume.id, state=None, attach_status="detached" + ) + except Exception as e: + LOG.error(_("Failed to set volume attach-status: %s"), e) + result += 1 + + if parsed_args.bootable is not None: + try: + volume_client.volumes.set_bootable( + volume.id, parsed_args.bootable + ) + except Exception as e: + LOG.error(_("Failed to set volume bootable property: %s"), e) + result += 1 + + if parsed_args.read_only is not None: + try: + volume_client.volumes.update_readonly_flag( + volume.id, parsed_args.read_only + ) + except Exception as e: + LOG.error( + _("Failed to set volume read-only access mode flag: %s"), + e, + ) + result += 1 + + policy = parsed_args.migration_policy or parsed_args.retype_policy + if parsed_args.type: + # get the migration policy + migration_policy = 'never' + if policy: + migration_policy = policy + try: + # find the volume type + volume_type = utils.find_resource( + volume_client.volume_types, parsed_args.type + ) + # reset to the new volume type + volume_client.volumes.retype( + volume.id, volume_type.id, migration_policy + ) + except Exception as e: + LOG.error(_("Failed to set volume type: %s"), e) + result += 1 + elif policy: + # If the "--migration-policy" is specified without "--type" + LOG.warning( + _("'%s' option will not work without '--type' option") + % ( + '--migration-policy' + if parsed_args.migration_policy + else '--retype-policy' + ) + ) + + kwargs = {} + if parsed_args.name: + kwargs['display_name'] = parsed_args.name + if parsed_args.description: + kwargs['display_description'] = parsed_args.description + if kwargs: + try: + volume_client.volumes.update(volume.id, **kwargs) + except Exception as e: + LOG.error( + _( + "Failed to update volume display name " + "or display description: %s" + ), + e, + ) + result += 1 + + if result > 0: + raise exceptions.CommandError( + _("One or more of the set operations failed") + ) + + +class ShowVolume(command.ShowOne): + _description = _("Display volume details") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'volume', + metavar="", + help=_("Volume to display (name or ID)"), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + volume = volume_client.find_volume( + parsed_args.volume, ignore_missing=False + ) + + data = _format_volume(volume) + return zip(*sorted(data.items())) + + +class UnsetVolume(command.Command): + _description = _("Unset volume properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'volume', + metavar='', + help=_('Volume to modify (name or ID)'), + ) + parser.add_argument( + '--property', + metavar='', + action='append', + dest='properties', + help=_( + 'Remove a property from volume ' + '(repeat option to remove multiple properties)' + ), + ) + parser.add_argument( + '--image-property', + metavar='', + action='append', + dest='image_properties', + help=_( + 'Remove an image property from volume ' + '(repeat option to remove multiple image properties)' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + volume = utils.find_resource(volume_client.volumes, parsed_args.volume) + + result = 0 + if parsed_args.properties: + try: + volume_client.volumes.delete_metadata( + volume.id, parsed_args.properties + ) + except Exception as e: + LOG.error(_("Failed to unset volume properties: %s"), e) + result += 1 + + if parsed_args.image_properties: + try: + volume_client.volumes.delete_image_metadata( + volume.id, parsed_args.image_properties + ) + except Exception as e: + LOG.error(_("Failed to unset image properties: %s"), e) + result += 1 + + if result > 0: + raise exceptions.CommandError( + _("One or more of the unset operations failed") + ) + + +class VolumeSummary(command.ShowOne): + _description = _("Show a summary of all volumes in this deployment.") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + '--all-projects', + action='store_true', + default=False, + help=_('Include all projects (admin only)'), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + if not sdk_utils.supports_microversion(volume_client, '3.12'): msg = _( "--os-volume-api-version 3.12 or greater is required to " "support the 'volume summary' command" @@ -60,21 +1217,19 @@ def take_action(self, parsed_args): 'Total Count', 'Total Size', ] - if volume_client.api_version.matches('3.36'): + if sdk_utils.supports_microversion(volume_client, '3.36'): columns.append('metadata') column_headers.append('Metadata') # set value of 'all_tenants' when using project option all_projects = parsed_args.all_projects - vol_summary = volume_client.volumes.summary( - all_tenants=all_projects, - ) + vol_summary = volume_client.summary(all_projects) return ( column_headers, - utils.get_dict_properties( - vol_summary['volume-summary'], + utils.get_item_properties( + vol_summary, columns, formatters={'metadata': format_columns.DictColumn}, ), @@ -89,26 +1244,30 @@ def get_parser(self, prog_name): parser.add_argument( 'snapshot', metavar="", - help=_('Name or ID of the snapshot to restore. The snapshot must ' - 'be the most recent one known to cinder.'), + help=_( + 'Name or ID of the snapshot to restore. The snapshot must ' + 'be the most recent one known to cinder.' + ), ) return parser def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume - volume_client = self.app.client_manager.volume - - if volume_client.api_version < api_versions.APIVersion('3.40'): + if not sdk_utils.supports_microversion(volume_client, '3.40'): msg = _( "--os-volume-api-version 3.40 or greater is required to " "support the 'volume revert snapshot' command" ) raise exceptions.CommandError(msg) - snapshot = utils.find_resource( - volume_client.volume_snapshots, parsed_args.snapshot) - volume = utils.find_resource( - volume_client.volumes, snapshot.volume_id) + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, + ignore_missing=False, + ) + volume = volume_client.find_volume( + snapshot.volume_id, + ignore_missing=False, + ) - volume_client.volumes.revert_to_snapshot( - volume=volume, snapshot=snapshot) + volume_client.revert_volume_to_snapshot(volume, snapshot) diff --git a/openstackclient/volume/v3/volume_attachment.py b/openstackclient/volume/v3/volume_attachment.py index 57a6da7342..3201da34bc 100644 --- a/openstackclient/volume/v3/volume_attachment.py +++ b/openstackclient/volume/v3/volume_attachment.py @@ -11,13 +11,16 @@ # under the License. import logging +import typing as ty -from cinderclient import api_versions +from openstack import utils as sdk_utils from osc_lib.cli import format_columns -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import envvars +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -54,12 +57,12 @@ def _format_attachment(attachment): # VolumeAttachmentManager.create returns a dict while everything else # returns a VolumeAttachment object if isinstance(attachment, dict): - data = [] + data: tuple[ty.Any, ...] = () for column in columns: if column == 'connection_info': - data.append(format_columns.DictColumn(attachment[column])) + data += (format_columns.DictColumn(attachment[column]),) continue - data.append(attachment[column]) + data += (attachment[column],) else: data = utils.get_item_properties( attachment, @@ -169,10 +172,10 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume compute_client = self.app.client_manager.compute - if volume_client.api_version < api_versions.APIVersion('3.27'): + if not sdk_utils.supports_microversion(volume_client, '3.27'): msg = _( "--os-volume-api-version 3.27 or greater is required to " "support the 'volume attachment create' command" @@ -180,7 +183,7 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) if parsed_args.mode: - if volume_client.api_version < api_versions.APIVersion('3.54'): + if not sdk_utils.supports_microversion(volume_client, '3.54'): msg = _( "--os-volume-api-version 3.54 or greater is required to " "support the '--mode' option" @@ -199,15 +202,17 @@ def take_action(self, parsed_args): 'mountpoint': parsed_args.mountpoint, } else: - if any({ - parsed_args.initiator, - parsed_args.ip, - parsed_args.platform, - parsed_args.host, - parsed_args.host, - parsed_args.multipath, - parsed_args.mountpoint, - }): + if any( + { + parsed_args.initiator, + parsed_args.ip, + parsed_args.platform, + parsed_args.host, + parsed_args.host, + parsed_args.multipath, + parsed_args.mountpoint, + } + ): msg = _( 'You must specify the --connect option for any of the ' 'connection-specific options such as --initiator to be ' @@ -215,17 +220,19 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) - volume = utils.find_resource( - volume_client.volumes, - parsed_args.volume, + volume = volume_client.find_volume( + parsed_args.volume, ignore_missing=False ) - server = utils.find_resource( - compute_client.servers, - parsed_args.server, + server = compute_client.find_server( + parsed_args.server, ignore_missing=False ) - attachment = volume_client.attachments.create( - volume.id, connector, server.id, parsed_args.mode) + attachment = volume_client.create_attachment( + volume.id, + connector=connector, + instance=server.id, + mode=parsed_args.mode, + ) return _format_attachment(attachment) @@ -252,16 +259,16 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.27'): + if not sdk_utils.supports_microversion(volume_client, '3.27'): msg = _( "--os-volume-api-version 3.27 or greater is required to " "support the 'volume attachment delete' command" ) raise exceptions.CommandError(msg) - volume_client.attachments.delete(parsed_args.attachment) + volume_client.delete_attachment(parsed_args.attachment) class SetVolumeAttachment(command.ShowOne): @@ -326,9 +333,9 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.27'): + if not sdk_utils.supports_microversion(volume_client, '3.27'): msg = _( "--os-volume-api-version 3.27 or greater is required to " "support the 'volume attachment set' command" @@ -345,8 +352,10 @@ def take_action(self, parsed_args): 'mountpoint': parsed_args.mountpoint, } - attachment = volume_client.attachments.update( - parsed_args.attachment, connector) + attachment = volume_client.update_attachment( + parsed_args.attachment, + connector=connector, + ) return _format_attachment(attachment) @@ -364,16 +373,16 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.44'): + if not sdk_utils.supports_microversion(volume_client, '3.44'): msg = _( "--os-volume-api-version 3.44 or greater is required to " "support the 'volume attachment complete' command" ) raise exceptions.CommandError(msg) - volume_client.attachments.complete(parsed_args.attachment) + volume_client.complete_attachment(parsed_args.attachment) class ListVolumeAttachment(command.Lister): @@ -392,7 +401,7 @@ def get_parser(self, prog_name): '--all-projects', dest='all_projects', action='store_true', - default=utils.env('ALL_PROJECTS', default=False), + default=envvars.boolenv('ALL_PROJECTS'), help=_('Shows details for all projects (admin only).'), ) parser.add_argument( @@ -406,20 +415,7 @@ def get_parser(self, prog_name): metavar='', help=_('Filters results by a status. ') + _FILTER_DEPRECATED, ) - parser.add_argument( - '--marker', - metavar='', - help=_( - 'Begin returning volume attachments that appear later in ' - 'volume attachment list than that represented by this ID.' - ), - ) - parser.add_argument( - '--limit', - type=int, - metavar='', - help=_('Maximum number of volume attachments to return.'), - ) + pagination.add_marker_pagination_option_to_parser(parser) # TODO(stephenfin): Add once we have an equivalent command for # 'cinder list-filters' # parser.add_argument( @@ -437,10 +433,10 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume identity_client = self.app.client_manager.identity - if volume_client.api_version < api_versions.APIVersion('3.27'): + if not sdk_utils.supports_microversion(volume_client, '3.27'): msg = _( "--os-volume-api-version 3.27 or greater is required to " "support the 'volume attachment list' command" @@ -463,13 +459,14 @@ def take_action(self, parsed_args): } # Update search option with `filters` # if AppendFilters.filters: - # search_opts.update(shell_utils.extract_filters(AppendFilters.filters)) + # search_opts.update(shell_utils.extract_filters(AppendFilters.filters)) # noqa: E501 # TODO(stephenfin): Implement sorting - attachments = volume_client.attachments.list( + attachments = volume_client.attachments( search_opts=search_opts, marker=parsed_args.marker, - limit=parsed_args.limit) + limit=parsed_args.limit, + ) column_headers = ( 'ID', @@ -486,10 +483,7 @@ def take_action(self, parsed_args): return ( column_headers, - ( - utils.get_item_properties(a, columns) - for a in attachments - ), + (utils.get_item_properties(a, columns) for a in attachments), ) @@ -506,15 +500,15 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.27'): + if not sdk_utils.supports_microversion(volume_client, '3.27'): msg = _( "--os-volume-api-version 3.27 or greater is required to " "support the 'volume attachment show' command" ) raise exceptions.CommandError(msg) - attachment = volume_client.attachments.show(parsed_args.attachment) + attachment = volume_client.get_attachment(parsed_args.attachment) return _format_attachment(attachment) diff --git a/openstackclient/volume/v3/volume_backup.py b/openstackclient/volume/v3/volume_backup.py new file mode 100644 index 0000000000..df9a17eb03 --- /dev/null +++ b/openstackclient/volume/v3/volume_backup.py @@ -0,0 +1,700 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Volume v3 Backup action implementations""" + +import copy +import functools +import logging + +from cliff import columns as cliff_columns +from openstack import utils as sdk_utils +from osc_lib.cli import parseractions +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.common import pagination +from openstackclient.i18n import _ + +LOG = logging.getLogger(__name__) + + +class VolumeIdColumn(cliff_columns.FormattableColumn[str]): + """Formattable column for volume ID column. + + Unlike the parent FormattableColumn class, the initializer of the + class takes volume_cache as the second argument. + osc_lib.utils.get_item_properties instantiate cliff FormattableColumn + object with a single parameter "column value", so you need to pass + a partially initialized class like + ``functools.partial(VolumeIdColumn, volume_cache)``. + """ + + def __init__(self, value, volume_cache=None): + super().__init__(value) + self._volume_cache = volume_cache or {} + + def human_readable(self): + """Return a volume name if available + + :rtype: either the volume ID or name + """ + volume_id = self._value + volume = volume_id + if volume_id in self._volume_cache.keys(): + volume = self._volume_cache[volume_id].name + return volume + + +class CreateVolumeBackup(command.ShowOne): + _description = _("Create new volume backup") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "volume", + metavar="", + help=_("Volume to backup (name or ID)"), + ) + parser.add_argument( + "--name", metavar="", help=_("Name of the backup") + ) + parser.add_argument( + "--description", + metavar="", + help=_("Description of the backup"), + ) + parser.add_argument( + "--container", + metavar="", + help=_("Optional backup container name"), + ) + parser.add_argument( + "--snapshot", + metavar="", + help=_("Snapshot to backup (name or ID)"), + ) + parser.add_argument( + '--force', + action='store_true', + default=False, + help=_("Allow to back up an in-use volume"), + ) + parser.add_argument( + '--incremental', + action='store_true', + default=False, + help=_("Perform an incremental backup"), + ) + parser.add_argument( + '--no-incremental', + action='store_false', + help=_("Do not perform an incremental backup"), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Set a property on this backup ' + '(repeat option to remove multiple values) ' + '(supported by --os-volume-api-version 3.43 or above)' + ), + ) + parser.add_argument( + '--availability-zone', + metavar='', + help=_( + 'AZ where the backup should be stored; by default it will be ' + 'the same as the source ' + '(supported by --os-volume-api-version 3.51 or above)' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + volume_id = volume_client.find_volume( + parsed_args.volume, + ignore_missing=False, + ).id + + kwargs = {} + + if parsed_args.snapshot: + kwargs['snapshot_id'] = volume_client.find_snapshot( + parsed_args.snapshot, + ignore_missing=False, + ).id + + if parsed_args.properties: + if not sdk_utils.supports_microversion(volume_client, '3.43'): + msg = _( + '--os-volume-api-version 3.43 or greater is required to ' + 'support the --property option' + ) + raise exceptions.CommandError(msg) + + kwargs['metadata'] = parsed_args.properties + + if parsed_args.availability_zone: + if not sdk_utils.supports_microversion(volume_client, '3.51'): + msg = _( + '--os-volume-api-version 3.51 or greater is required to ' + 'support the --availability-zone option' + ) + raise exceptions.CommandError(msg) + + kwargs['availability_zone'] = parsed_args.availability_zone + + columns: tuple[str, ...] = ( + "id", + "name", + "volume_id", + ) + backup = volume_client.create_backup( + volume_id=volume_id, + container=parsed_args.container, + name=parsed_args.name, + description=parsed_args.description, + force=parsed_args.force, + is_incremental=parsed_args.incremental, + **kwargs, + ) + data = utils.get_dict_properties(backup, columns) + return (columns, data) + + +class DeleteVolumeBackup(command.Command): + _description = _("Delete volume backup(s)") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "backups", + metavar="", + nargs="+", + help=_("Backup(s) to delete (name or ID)"), + ) + parser.add_argument( + '--force', + action='store_true', + default=False, + help=_("Allow delete in state other than error or available"), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + result = 0 + + for backup in parsed_args.backups: + try: + backup_id = volume_client.find_backup( + backup, ignore_missing=False + ).id + volume_client.delete_backup( + backup_id, + ignore_missing=False, + force=parsed_args.force, + ) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete backup with " + "name or ID '%(backup)s': %(e)s" + ) + % {'backup': backup, 'e': e} + ) + + if result > 0: + total = len(parsed_args.backups) + msg = _("%(result)s of %(total)s backups failed to delete.") % { + 'result': result, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class ListVolumeBackup(command.Lister): + _description = _("List volume backups") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + '--project', + metavar='', + help=_('Filter results by project (name or ID) (admin only)'), + ) + parser.add_argument( + "--long", + action="store_true", + default=False, + help=_("List additional fields in output"), + ) + parser.add_argument( + "--name", + metavar="", + help=_("Filters results by the backup name"), + ) + parser.add_argument( + "--status", + metavar="", + choices=[ + 'creating', + 'available', + 'deleting', + 'error', + 'restoring', + 'error_restoring', + ], + help=_( + "Filters results by the backup status, one of: " + "creating, available, deleting, error, restoring or " + "error_restoring" + ), + ) + parser.add_argument( + "--volume", + metavar="", + help=_( + "Filters results by the volume which they backup (name or ID)" + ), + ) + pagination.add_marker_pagination_option_to_parser(parser) + parser.add_argument( + '--all-projects', + action='store_true', + default=False, + help=_('Include all projects (admin only)'), + ) + # TODO(stephenfin): Add once we have an equivalent command for + # 'cinder list-filters' + # parser.add_argument( + # '--filter', + # metavar='', + # action=parseractions.KeyValueAction, + # dest='filters', + # help=_( + # "Filter key and value pairs. Use 'foo' to " + # "check enabled filters from server. Use 'key~=value' for " + # "inexact filtering if the key supports " + # "(supported by --os-volume-api-version 3.33 or above)" + # ), + # ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + identity_client = self.app.client_manager.sdk_connection.identity + + columns: tuple[str, ...] = ( + 'id', + 'name', + 'description', + 'status', + 'size', + 'is_incremental', + 'created_at', + ) + column_headers: tuple[str, ...] = ( + 'ID', + 'Name', + 'Description', + 'Status', + 'Size', + 'Incremental', + 'Created At', + ) + if parsed_args.long: + columns += ('availability_zone', 'volume_id', 'container') + column_headers += ('Availability Zone', 'Volume', 'Container') + + # Cache the volume list + volume_cache = {} + try: + for s in volume_client.volumes(): + volume_cache[s.id] = s + except Exception: # noqa: S110 + # Just forget it if there's any trouble + pass + + _VolumeIdColumn = functools.partial( + VolumeIdColumn, volume_cache=volume_cache + ) + + all_tenants = parsed_args.all_projects + project_id = None + if parsed_args.project: + all_tenants = True + project_id = identity_client.find_project( + parsed_args.project, ignore_missing=False + ).id + + filter_volume_id = None + if parsed_args.volume: + try: + filter_volume_id = volume_client.find_volume( + parsed_args.volume, + ignore_missing=False, + ).id + except exceptions.CommandError: + # Volume with that ID does not exist, but search for backups + # for that volume nevertheless + LOG.debug( + "No volume with ID %s existing, continuing to " + "search for backups for that volume ID", + parsed_args.volume, + ) + filter_volume_id = parsed_args.volume + + marker_backup_id = None + if parsed_args.marker: + marker_backup_id = volume_client.find_backup( + parsed_args.marker, + ignore_missing=False, + ).id + + data = volume_client.backups( + name=parsed_args.name, + status=parsed_args.status, + volume_id=filter_volume_id, + all_tenants=all_tenants, + marker=marker_backup_id, + limit=parsed_args.limit, + project_id=project_id, + ) + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={'volume_id': _VolumeIdColumn}, + ) + for s in data + ), + ) + + +class RestoreVolumeBackup(command.ShowOne): + _description = _("Restore volume backup") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "backup", + metavar="", + help=_("Backup to restore (name or ID)"), + ) + parser.add_argument( + "volume", + metavar="", + nargs="?", + help=_( + "Volume to restore to " + "(name or ID for existing volume, name only for new volume) " + "(default to None)" + ), + ) + parser.add_argument( + "--force", + action="store_true", + help=_( + "Restore the backup to an existing volume (default to False)" + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + columns: tuple[str, ...] = ( + 'id', + 'volume_id', + 'volume_name', + ) + + backup = volume_client.find_backup( + parsed_args.backup, + ignore_missing=False, + ) + + volume_name = None + volume_id = None + try: + volume_id = volume_client.find_volume( + parsed_args.volume, + ignore_missing=False, + ).id + except Exception: + volume_name = parsed_args.volume + else: + # If we didn't fail, the volume must already exist. We only allow + # this to work if the user forced things + if not parsed_args.force: + msg = _( + "Volume '%s' already exists; if you want to restore the " + "backup to it you need to specify the '--force' option" + ) + raise exceptions.CommandError(msg % parsed_args.volume) + + restore = volume_client.restore_backup( + backup.id, + volume_id=volume_id, + name=volume_name, + ) + + data = utils.get_dict_properties(restore, columns) + return (columns, data) + + +class SetVolumeBackup(command.Command): + _description = _("Set volume backup properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "backup", + metavar="", + help=_("Backup to modify (name or ID)"), + ) + parser.add_argument( + '--name', + metavar='', + help=_( + 'New backup name ' + '(supported by --os-volume-api-version 3.9 or above)' + ), + ) + parser.add_argument( + '--description', + metavar='', + help=_( + 'New backup description ' + '(supported by --os-volume-api-version 3.9 or above)' + ), + ) + parser.add_argument( + '--state', + metavar='', + choices=['available', 'error'], + help=_( + 'New backup state ("available" or "error") (admin only) ' + '(This option simply changes the state of the backup ' + 'in the database with no regard to actual status; ' + 'exercise caution when using)' + ), + ) + parser.add_argument( + '--no-property', + action='store_true', + help=_( + 'Remove all properties from this backup ' + '(specify both --no-property and --property to remove the ' + 'current properties before setting new properties)' + ), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + default={}, + help=_( + 'Set a property on this backup ' + '(repeat option to set multiple values) ' + '(supported by --os-volume-api-version 3.43 or above)' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + backup = volume_client.find_backup( + parsed_args.backup, + ignore_missing=False, + ) + + result = 0 + if parsed_args.state: + try: + volume_client.reset_backup_status( + backup, status=parsed_args.state + ) + except Exception as e: + LOG.error(_("Failed to set backup state: %s"), e) + result += 1 + + kwargs = {} + + if parsed_args.name: + if not sdk_utils.supports_microversion(volume_client, '3.9'): + msg = _( + '--os-volume-api-version 3.9 or greater is required to ' + 'support the --name option' + ) + raise exceptions.CommandError(msg) + + kwargs['name'] = parsed_args.name + + if parsed_args.description: + if not sdk_utils.supports_microversion(volume_client, '3.9'): + msg = _( + '--os-volume-api-version 3.9 or greater is required to ' + 'support the --description option' + ) + raise exceptions.CommandError(msg) + + kwargs['description'] = parsed_args.description + + if parsed_args.no_property: + if not sdk_utils.supports_microversion(volume_client, '3.43'): + msg = _( + '--os-volume-api-version 3.43 or greater is required to ' + 'support the --no-property option' + ) + raise exceptions.CommandError(msg) + + if parsed_args.properties: + if not sdk_utils.supports_microversion(volume_client, '3.43'): + msg = _( + '--os-volume-api-version 3.43 or greater is required to ' + 'support the --property option' + ) + raise exceptions.CommandError(msg) + + if sdk_utils.supports_microversion(volume_client, '3.43'): + metadata = copy.deepcopy(backup.metadata) + + if parsed_args.no_property: + metadata = {} + + metadata.update(parsed_args.properties) + kwargs['metadata'] = metadata + + if kwargs: + try: + volume_client.update_backup(backup, **kwargs) + except Exception as e: + LOG.error("Failed to update backup: %s", e) + result += 1 + + if result > 0: + msg = _("One or more of the set operations failed") + raise exceptions.CommandError(msg) + + +class UnsetVolumeBackup(command.Command): + """Unset volume backup properties. + + This command requires ``--os-volume-api-version`` 3.43 or greater. + """ + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'backup', + metavar='', + help=_('Backup to modify (name or ID)'), + ) + parser.add_argument( + '--property', + metavar='', + action='append', + dest='properties', + help=_( + 'Property to remove from this backup ' + '(repeat option to unset multiple values) ' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + if not sdk_utils.supports_microversion(volume_client, '3.43'): + msg = _( + '--os-volume-api-version 3.43 or greater is required to ' + 'support the --property option' + ) + raise exceptions.CommandError(msg) + + backup = volume_client.find_backup( + parsed_args.backup, ignore_missing=False + ) + metadata = copy.deepcopy(backup.metadata) + + for key in parsed_args.properties: + if key not in metadata: + # ignore invalid properties but continue + LOG.warning( + "'%s' is not a valid property for backup '%s'", + key, + parsed_args.backup, + ) + continue + + del metadata[key] + + volume_client.delete_backup_metadata(backup, keys=list(metadata)) + + +class ShowVolumeBackup(command.ShowOne): + _description = _("Display volume backup details") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "backup", + metavar="", + help=_("Backup to display (name or ID)"), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + backup = volume_client.find_backup( + parsed_args.backup, ignore_missing=False + ) + columns: tuple[str, ...] = ( + "availability_zone", + "container", + "created_at", + "data_timestamp", + "description", + "encryption_key_id", + "fail_reason", + "has_dependent_backups", + "id", + "is_incremental", + "metadata", + "name", + "object_count", + "project_id", + "size", + "snapshot_id", + "status", + "updated_at", + "user_id", + "volume_id", + ) + data = utils.get_dict_properties(backup, columns) + return (columns, data) diff --git a/openstackclient/volume/v3/volume_group.py b/openstackclient/volume/v3/volume_group.py index 242ffcd49f..1810feef56 100644 --- a/openstackclient/volume/v3/volume_group.py +++ b/openstackclient/volume/v3/volume_group.py @@ -13,10 +13,11 @@ import argparse from cinderclient import api_versions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import envvars from openstackclient.i18n import _ @@ -155,7 +156,7 @@ def get_parser(self, prog_name): parser.add_argument( '--description', metavar='', - help=_('Description of a volume group.') + help=_('Description of a volume group.'), ) parser.add_argument( '--availability-zone', @@ -178,8 +179,10 @@ def take_action(self, parsed_args): ) self.log.warning(msg) - volume_group_type = parsed_args.volume_group_type or \ - parsed_args.volume_group_type_legacy + volume_group_type = ( + parsed_args.volume_group_type + or parsed_args.volume_group_type_legacy + ) volume_types = parsed_args.volume_types[:] volume_types.extend(parsed_args.volume_types_legacy) @@ -229,8 +232,10 @@ def take_action(self, parsed_args): "[--source-group|--group-snapshot]' command" ) raise exceptions.CommandError(msg) - if (parsed_args.source_group is None and - parsed_args.group_snapshot is None): + if ( + parsed_args.source_group is None + and parsed_args.group_snapshot is None + ): msg = _( "Either --source-group or " "'--group-snapshot ' needs to be " @@ -239,24 +244,28 @@ def take_action(self, parsed_args): ) raise exceptions.CommandError(msg) if parsed_args.availability_zone: - msg = _("'--availability-zone' option will not work " - "if creating group from source.") + msg = _( + "'--availability-zone' option will not work " + "if creating group from source." + ) self.log.warning(msg) source_group = None if parsed_args.source_group: - source_group = utils.find_resource(volume_client.groups, - parsed_args.source_group) + source_group = utils.find_resource( + volume_client.groups, parsed_args.source_group + ) group_snapshot = None if parsed_args.group_snapshot: group_snapshot = utils.find_resource( - volume_client.group_snapshots, - parsed_args.group_snapshot) + volume_client.group_snapshots, parsed_args.group_snapshot + ) group = volume_client.groups.create_from_src( group_snapshot.id if group_snapshot else None, source_group.id if source_group else None, parsed_args.name, - parsed_args.description) + parsed_args.description, + ) group = volume_client.groups.get(group.id) return _format_group(group) @@ -280,8 +289,8 @@ def get_parser(self, prog_name): default=False, help=_( 'Delete the volume group even if it contains volumes. ' - 'This will delete any remaining volumes in the group.', - ) + 'This will delete any remaining volumes in the group.' + ), ) return parser @@ -300,8 +309,7 @@ def take_action(self, parsed_args): parsed_args.group, ) - volume_client.groups.delete( - group.id, delete_volumes=parsed_args.force) + volume_client.groups.delete(group.id, delete_volumes=parsed_args.force) class SetVolumeGroup(command.ShowOne): @@ -403,7 +411,7 @@ def get_parser(self, prog_name): '--all-projects', dest='all_projects', action='store_true', - default=utils.env('ALL_PROJECTS', default=False), + default=envvars.boolenv('ALL_PROJECTS'), help=_('Shows details for all projects (admin only).'), ) # TODO(stephenfin): Add once we have an equivalent command for @@ -436,8 +444,7 @@ def take_action(self, parsed_args): 'all_tenants': parsed_args.all_projects, } - groups = volume_client.groups.list( - search_opts=search_opts) + groups = volume_client.groups.list(search_opts=search_opts) column_headers = ( 'ID', @@ -452,10 +459,7 @@ def take_action(self, parsed_args): return ( column_headers, - ( - utils.get_item_properties(a, columns) - for a in groups - ), + (utils.get_item_properties(a, columns) for a in groups), ) @@ -548,11 +552,12 @@ def take_action(self, parsed_args): parsed_args.group, ) - group = volume_client.groups.show(group.id, **kwargs) + group = volume_client.groups.get(group.id, **kwargs) if parsed_args.show_replication_targets: - replication_targets = \ + replication_targets = ( volume_client.groups.list_replication_targets(group.id) + ) group.replication_targets = replication_targets @@ -578,18 +583,14 @@ def get_parser(self, prog_name): action='store_true', dest='allow_attached_volume', default=False, - help=_( - 'Allow group with attached volumes to be failed over.', - ) + help=_('Allow group with attached volumes to be failed over.'), ) parser.add_argument( '--disallow-attached-volume', action='store_false', dest='allow_attached_volume', default=False, - help=_( - 'Disallow group with attached volumes to be failed over.', - ) + help=_('Disallow group with attached volumes to be failed over.'), ) parser.add_argument( '--secondary-backend-id', diff --git a/openstackclient/volume/v3/volume_group_snapshot.py b/openstackclient/volume/v3/volume_group_snapshot.py index 229cbd713c..530b7d5d1a 100644 --- a/openstackclient/volume/v3/volume_group_snapshot.py +++ b/openstackclient/volume/v3/volume_group_snapshot.py @@ -12,11 +12,12 @@ import logging -from cinderclient import api_versions -from osc_lib.command import command +from openstack import utils as sdk_utils from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import envvars from openstackclient.i18n import _ LOG = logging.getLogger(__name__) @@ -70,29 +71,31 @@ def get_parser(self, prog_name): parser.add_argument( '--description', metavar='', - help=_('Description of a volume group snapshot.') + help=_('Description of a volume group snapshot.'), ) return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.14'): + if not sdk_utils.supports_microversion(volume_client, '3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot create' command" ) raise exceptions.CommandError(msg) - volume_group = utils.find_resource( - volume_client.groups, + group = volume_client.find_group( parsed_args.volume_group, + ignore_missing=False, + details=False, ) - snapshot = volume_client.group_snapshots.create( - volume_group.id, - parsed_args.name, - parsed_args.description) + snapshot = volume_client.create_group_snapshot( + group_id=group.id, + name=parsed_args.name, + description=parsed_args.description, + ) return _format_group_snapshot(snapshot) @@ -113,21 +116,22 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.14'): + if not sdk_utils.supports_microversion(volume_client, '3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot delete' command" ) raise exceptions.CommandError(msg) - snapshot = utils.find_resource( - volume_client.group_snapshots, + group_snapshot = volume_client.find_group_snapshot( parsed_args.snapshot, + ignore_missing=False, + details=False, ) - volume_client.group_snapshots.delete(snapshot.id) + volume_client.delete_group_snapshot(group_snapshot.id) class ListVolumeGroupSnapshot(command.Lister): @@ -142,7 +146,7 @@ def get_parser(self, prog_name): '--all-projects', dest='all_projects', action='store_true', - default=utils.env('ALL_PROJECTS', default=False), + default=envvars.boolenv('ALL_PROJECTS'), help=_('Shows details for all projects (admin only).'), ) # TODO(stephenfin): Add once we have an equivalent command for @@ -162,21 +166,18 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.14'): + if not sdk_utils.supports_microversion(volume_client, '3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot list' command" ) raise exceptions.CommandError(msg) - search_opts = { - 'all_tenants': parsed_args.all_projects, - } - - groups = volume_client.group_snapshots.list( - search_opts=search_opts) + groups = volume_client.group_snapshots( + all_projects=parsed_args.all_projects, + ) column_headers = ( 'ID', @@ -191,10 +192,7 @@ def take_action(self, parsed_args): return ( column_headers, - ( - utils.get_item_properties(a, columns) - for a in groups - ), + (utils.get_item_properties(a, columns) for a in groups), ) @@ -214,21 +212,19 @@ def get_parser(self, prog_name): return parser def take_action(self, parsed_args): - volume_client = self.app.client_manager.volume + volume_client = self.app.client_manager.sdk_connection.volume - if volume_client.api_version < api_versions.APIVersion('3.14'): + if not sdk_utils.supports_microversion(volume_client, '3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot show' command" ) raise exceptions.CommandError(msg) - snapshot = utils.find_resource( - volume_client.group_snapshots, + group_snapshot = volume_client.find_group_snapshot( parsed_args.snapshot, + ignore_missing=False, + details=True, ) - # TODO(stephenfin): Do we need this? - snapshot = volume_client.groups.show(snapshot.id) - - return _format_group_snapshot(snapshot) + return _format_group_snapshot(group_snapshot) diff --git a/openstackclient/volume/v3/volume_group_type.py b/openstackclient/volume/v3/volume_group_type.py index 860fa544a5..bdedd25a19 100644 --- a/openstackclient/volume/v3/volume_group_type.py +++ b/openstackclient/volume/v3/volume_group_type.py @@ -15,10 +15,10 @@ from cinderclient import api_versions from osc_lib.cli import format_columns from osc_lib.cli import parseractions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ LOG = logging.getLogger(__name__) @@ -70,7 +70,7 @@ def get_parser(self, prog_name): parser.add_argument( '--description', metavar='', - help=_('Description of the volume group type.') + help=_('Description of the volume group type.'), ) type_group = parser.add_mutually_exclusive_group() type_group.add_argument( @@ -86,7 +86,7 @@ def get_parser(self, prog_name): '--private', dest='is_public', action='store_false', - help=_('Volume group type is not available to other projects') + help=_('Volume group type is not available to other projects'), ) return parser @@ -101,9 +101,8 @@ def take_action(self, parsed_args): raise exceptions.CommandError(msg) group_type = volume_client.group_types.create( - parsed_args.name, - parsed_args.description, - parsed_args.is_public) + parsed_args.name, parsed_args.description, parsed_args.is_public + ) return _format_group_type(group_type) @@ -176,7 +175,7 @@ def get_parser(self, prog_name): '--private', dest='is_public', action='store_false', - help=_('Make volume group type unavailable to other projects.') + help=_('Make volume group type unavailable to other projects.'), ) parser.add_argument( '--no-property', @@ -230,7 +229,8 @@ def take_action(self, parsed_args): if kwargs: try: group_type = volume_client.group_types.update( - group_type.id, **kwargs) + group_type.id, **kwargs + ) except Exception as e: LOG.error(_("Failed to update group type: %s"), e) errors += 1 @@ -251,9 +251,7 @@ def take_action(self, parsed_args): errors += 1 if errors > 0: - msg = _( - "Command Failed: One or more of the operations failed" - ) + msg = _("Command Failed: One or more of the operations failed") raise exceptions.CommandError() return _format_group_type(group_type) @@ -370,10 +368,7 @@ def take_action(self, parsed_args): return ( column_headers, - ( - utils.get_item_properties(a, columns) - for a in group_types - ), + (utils.get_item_properties(a, columns) for a in group_types), ) diff --git a/openstackclient/volume/v3/volume_message.py b/openstackclient/volume/v3/volume_message.py index 4fe5ae92f6..b39c579469 100644 --- a/openstackclient/volume/v3/volume_message.py +++ b/openstackclient/volume/v3/volume_message.py @@ -17,10 +17,11 @@ import logging as LOG from cinderclient import api_versions -from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils +from openstackclient import command +from openstackclient.common import pagination from openstackclient.i18n import _ from openstackclient.identity import common as identity_common @@ -34,7 +35,7 @@ def get_parser(self, prog_name): 'message_ids', metavar='', nargs='+', - help=_('Message(s) to delete (ID)') + help=_('Message(s) to delete (ID)'), ) return parser @@ -60,7 +61,8 @@ def take_action(self, parsed_args): if errors > 0: total = len(parsed_args.message_ids) msg = _('Failed to delete %(errors)s of %(total)s messages.') % { - 'errors': errors, 'total': total, + 'errors': errors, + 'total': total, } raise exceptions.CommandError(msg) @@ -77,19 +79,7 @@ def get_parser(self, prog_name): help=_('Filter results by project (name or ID) (admin only)'), ) identity_common.add_project_domain_option_to_parser(parser) - parser.add_argument( - '--marker', - metavar='', - help=_('The last message ID of the previous page'), - default=None, - ) - parser.add_argument( - '--limit', - type=int, - metavar='', - help=_('Maximum number of messages to display'), - default=None, - ) + pagination.add_marker_pagination_option_to_parser(parser) return parser @@ -121,7 +111,8 @@ def take_action(self, parsed_args): project_id = identity_common.find_project( identity_client, parsed_args.project, - parsed_args.project_domain).id + parsed_args.project_domain, + ).id search_opts = { 'project_id': project_id, @@ -129,11 +120,12 @@ def take_action(self, parsed_args): data = volume_client.messages.list( search_opts=search_opts, marker=parsed_args.marker, - limit=parsed_args.limit) + limit=parsed_args.limit, + ) return ( column_headers, - (utils.get_item_properties(s, column_headers) for s in data) + (utils.get_item_properties(s, column_headers) for s in data), ) @@ -141,11 +133,11 @@ class ShowMessage(command.ShowOne): _description = _('Show a volume failure message') def get_parser(self, prog_name): - parser = super(ShowMessage, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'message_id', metavar='', - help=_('Message to show (ID).') + help=_('Message to show (ID).'), ) return parser diff --git a/openstackclient/volume/v3/volume_snapshot.py b/openstackclient/volume/v3/volume_snapshot.py new file mode 100644 index 0000000000..f89174c3da --- /dev/null +++ b/openstackclient/volume/v3/volume_snapshot.py @@ -0,0 +1,573 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Volume v3 snapshot action implementations""" + +import functools +import logging +import typing as ty + +from cliff import columns as cliff_columns +from openstack.block_storage.v3 import snapshot as _snapshot +from osc_lib.cli import format_columns +from osc_lib.cli import parseractions +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.common import pagination +from openstackclient.i18n import _ +from openstackclient.identity import common as identity_common + +LOG = logging.getLogger(__name__) + + +class VolumeIdColumn(cliff_columns.FormattableColumn[str]): + """Formattable column for volume ID column. + + Unlike the parent FormattableColumn class, the initializer of the + class takes volume_cache as the second argument. + osc_lib.utils.get_item_properties instantiate cliff FormattableColumn + object with a single parameter "column value", so you need to pass + a partially initialized class like + ``functools.partial(VolumeIdColumn, volume_cache)``. + """ + + def __init__(self, value, volume_cache=None): + super().__init__(value) + self._volume_cache = volume_cache or {} + + def human_readable(self): + """Return a volume name if available + + :rtype: either the volume ID or name + """ + volume_id = self._value + volume = volume_id + if volume_id in self._volume_cache.keys(): + volume = self._volume_cache[volume_id].name + return volume + + +def _format_snapshot(snapshot: _snapshot.Snapshot) -> dict[str, ty.Any]: + # Some columns returned by openstacksdk should not be shown because they're + # either irrelevant or duplicates + ignored_columns = { + # computed columns + 'location', + # create-only columns + 'consumes_quota', + 'force', + 'group_snapshot_id', + # ignored columns + 'os-extended-snapshot-attributes:progress', + 'os-extended-snapshot-attributes:project_id', + 'updated_at', + 'user_id', + # unnecessary columns + 'links', + } + + info = snapshot.to_dict(original_names=True) + data = {} + for key, value in info.items(): + if key in ignored_columns: + continue + + data[key] = value + + data.update( + { + 'properties': format_columns.DictColumn(data.pop('metadata')), + } + ) + + return data + + +class CreateVolumeSnapshot(command.ShowOne): + _description = _("Create new volume snapshot") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "snapshot_name", + metavar="", + help=_("Name of the new snapshot"), + ) + parser.add_argument( + "--volume", + metavar="", + help=_( + "Volume to snapshot (name or ID) (default is )" + ), + ) + parser.add_argument( + "--description", + metavar="", + help=_("Description of the snapshot"), + ) + parser.add_argument( + "--force", + action="store_true", + default=False, + help=_( + "Create a snapshot attached to an instance. Default is False" + ), + ) + parser.add_argument( + "--property", + metavar="", + dest='properties', + action=parseractions.KeyValueAction, + help=_( + "Set a property to this snapshot " + "(repeat option to set multiple properties)" + ), + ) + parser.add_argument( + "--remote-source", + metavar="", + action=parseractions.KeyValueAction, + help=_( + "The attribute(s) of the existing remote volume snapshot " + "(admin required) (repeat option to specify multiple " + "attributes) e.g.: '--remote-source source-name=test_name " + "--remote-source source-id=test_id'" + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + volume = parsed_args.volume + if not parsed_args.volume: + volume = parsed_args.snapshot_name + volume_id = volume_client.find_volume(volume, ignore_missing=False).id + + if parsed_args.remote_source: + # Create a new snapshot from an existing remote snapshot source + if parsed_args.force: + msg = _( + "'--force' option will not work when you create " + "new volume snapshot from an existing remote " + "volume snapshot" + ) + LOG.warning(msg) + + snapshot = volume_client.manage_snapshot( + volume_id=volume_id, + ref=parsed_args.remote_source, + name=parsed_args.snapshot_name, + description=parsed_args.description, + metadata=parsed_args.properties, + ) + else: + # Create a new snapshot from scratch + snapshot = volume_client.create_snapshot( + volume_id=volume_id, + force=parsed_args.force, + name=parsed_args.snapshot_name, + description=parsed_args.description, + metadata=parsed_args.properties, + ) + + data = _format_snapshot(snapshot) + return zip(*sorted(data.items())) + + +class DeleteVolumeSnapshot(command.Command): + _description = _("Delete volume snapshot(s)") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "snapshots", + metavar="", + nargs="+", + help=_("Snapshot(s) to delete (name or ID)"), + ) + parser.add_argument( + '--force', + action='store_true', + help=_( + "Attempt forced removal of snapshot(s), " + "regardless of state (defaults to False)" + ), + ) + parser.add_argument( + '--remote', + action='store_true', + help=_( + 'Unmanage the snapshot, removing it from the Block Storage ' + 'service management but not from the backend.' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + result = 0 + + if parsed_args.remote: + if parsed_args.force: + msg = _( + "The --force option is not supported with the " + "--remote parameter." + ) + raise exceptions.CommandError(msg) + + for snapshot in parsed_args.snapshots: + try: + snapshot_id = volume_client.find_snapshot( + snapshot, ignore_missing=False + ).id + if parsed_args.remote: + volume_client.unmanage_snapshot(snapshot_id) + else: + volume_client.delete_snapshot( + snapshot_id, force=parsed_args.force + ) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete snapshot with " + "name or ID '%(snapshot)s': %(e)s" + ) + % {'snapshot': snapshot, 'e': e} + ) + + if result > 0: + total = len(parsed_args.snapshots) + msg = _("%(result)s of %(total)s snapshots failed to delete.") % { + 'result': result, + 'total': total, + } + raise exceptions.CommandError(msg) + + +class ListVolumeSnapshot(command.Lister): + _description = _("List volume snapshots") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + '--all-projects', + action='store_true', + default=False, + help=_('Include all projects (admin only)'), + ) + parser.add_argument( + '--project', + metavar='', + help=_('Filter results by project (name or ID) (admin only)'), + ) + identity_common.add_project_domain_option_to_parser(parser) + parser.add_argument( + '--long', + action='store_true', + default=False, + help=_('List additional fields in output'), + ) + parser.add_argument( + '--name', + metavar='', + default=None, + help=_('Filters results by a name.'), + ) + parser.add_argument( + '--status', + metavar='', + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'error_deleting', + ], + help=_( + "Filters results by a status. " + "('available', 'error', 'creating', 'deleting'" + " or 'error_deleting')" + ), + ) + parser.add_argument( + '--volume', + metavar='', + default=None, + help=_('Filters results by a volume (name or ID).'), + ) + pagination.add_marker_pagination_option_to_parser(parser) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + identity_client = self.app.client_manager.identity + + columns: tuple[str, ...] = ( + 'id', + 'name', + 'description', + 'status', + 'size', + ) + column_headers: tuple[str, ...] = ( + 'ID', + 'Name', + 'Description', + 'Status', + 'Size', + ) + if parsed_args.long: + columns += ( + 'created_at', + 'volume_id', + 'metadata', + ) + column_headers += ( + 'Created At', + 'Volume', + 'Properties', + ) + + # Cache the volume list + volume_cache = {} + try: + for s in volume_client.volumes(): + volume_cache[s.id] = s + except Exception: # noqa: S110 + # Just forget it if there's any trouble + pass + _VolumeIdColumn = functools.partial( + VolumeIdColumn, volume_cache=volume_cache + ) + + volume_id = None + if parsed_args.volume: + volume_id = volume_client.find_volume( + parsed_args.volume, ignore_missing=False + ).id + + project_id = None + if parsed_args.project: + project_id = identity_common.find_project( + identity_client, + parsed_args.project, + parsed_args.project_domain, + ).id + + # set value of 'all_tenants' when using project option + all_projects = ( + True if parsed_args.project else parsed_args.all_projects + ) + + data = volume_client.snapshots( + marker=parsed_args.marker, + limit=parsed_args.limit, + all_projects=all_projects, + project_id=project_id, + name=parsed_args.name, + status=parsed_args.status, + volume_id=volume_id, + ) + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters={ + 'metadata': format_columns.DictColumn, + 'volume_id': _VolumeIdColumn, + }, + ) + for s in data + ), + ) + + +class SetVolumeSnapshot(command.Command): + _description = _("Set volume snapshot properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'snapshot', + metavar='', + help=_('Snapshot to modify (name or ID)'), + ) + parser.add_argument( + '--name', metavar='', help=_('New snapshot name') + ) + parser.add_argument( + '--description', + metavar='', + help=_('New snapshot description'), + ) + parser.add_argument( + "--no-property", + dest="no_property", + action="store_true", + help=_( + "Remove all properties from " + "(specify both --no-property and --property to " + "remove the current properties before setting " + "new properties.)" + ), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Property to add/change for this snapshot ' + '(repeat option to set multiple properties)' + ), + ) + parser.add_argument( + '--state', + metavar='', + choices=[ + 'available', + 'error', + 'creating', + 'deleting', + 'error_deleting', + ], + help=_( + 'New snapshot state. ("available", "error", "creating", ' + '"deleting", or "error_deleting") (admin only) ' + '(This option simply changes the state of the snapshot ' + 'in the database with no regard to actual status, ' + 'exercise caution when using)' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) + + result = 0 + if parsed_args.no_property: + try: + volume_client.delete_snapshot_metadata( + snapshot.id, keys=list(snapshot.metadata) + ) + except Exception as e: + LOG.error(_("Failed to clean snapshot properties: %s"), e) + result += 1 + + if parsed_args.properties: + try: + volume_client.set_snapshot_metadata( + snapshot.id, **parsed_args.properties + ) + except Exception as e: + LOG.error(_("Failed to set snapshot property: %s"), e) + result += 1 + + if parsed_args.state: + try: + volume_client.reset_snapshot_status( + snapshot.id, parsed_args.state + ) + except Exception as e: + LOG.error(_("Failed to set snapshot state: %s"), e) + result += 1 + + kwargs = {} + if parsed_args.name: + kwargs['name'] = parsed_args.name + if parsed_args.description: + kwargs['description'] = parsed_args.description + if kwargs: + try: + volume_client.update_snapshot(snapshot.id, **kwargs) + except Exception as e: + LOG.error( + _("Failed to update snapshot name or description: %s"), + e, + ) + result += 1 + + if result > 0: + raise exceptions.CommandError( + _("One or more of the set operations failed") + ) + + +class ShowVolumeSnapshot(command.ShowOne): + _description = _("Display volume snapshot details") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "snapshot", + metavar="", + help=_("Snapshot to display (name or ID)"), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) + + data = _format_snapshot(snapshot) + return zip(*sorted(data.items())) + + +class UnsetVolumeSnapshot(command.Command): + _description = _("Unset volume snapshot properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'snapshot', + metavar='', + help=_('Snapshot to modify (name or ID)'), + ) + parser.add_argument( + '--property', + metavar='', + dest='properties', + action='append', + default=[], + help=_( + 'Property to remove from snapshot ' + '(repeat option to remove multiple properties)' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.sdk_connection.volume + + snapshot = volume_client.find_snapshot( + parsed_args.snapshot, ignore_missing=False + ) + + if parsed_args.properties: + volume_client.delete_snapshot_metadata( + snapshot.id, keys=parsed_args.properties + ) diff --git a/openstackclient/volume/v1/volume_transfer_request.py b/openstackclient/volume/v3/volume_transfer_request.py similarity index 69% rename from openstackclient/volume/v1/volume_transfer_request.py rename to openstackclient/volume/v3/volume_transfer_request.py index 971b9ab592..afd4626038 100644 --- a/openstackclient/volume/v1/volume_transfer_request.py +++ b/openstackclient/volume/v3/volume_transfer_request.py @@ -12,14 +12,15 @@ # under the License. # -"""Volume v1 transfer action implementations""" +"""Volume v3 transfer action implementations""" import logging -from osc_lib.command import command +from cinderclient import api_versions from osc_lib import exceptions from osc_lib import utils +from openstackclient import command from openstackclient.i18n import _ @@ -30,7 +31,7 @@ class AcceptTransferRequest(command.ShowOne): _description = _("Accept volume transfer request.") def get_parser(self, prog_name): - parser = super(AcceptTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'transfer_request', metavar="", @@ -39,6 +40,7 @@ def get_parser(self, prog_name): parser.add_argument( '--auth-key', metavar="", + required=True, help=_('Volume transfer request authentication key'), ) return parser @@ -48,18 +50,13 @@ def take_action(self, parsed_args): try: transfer_request_id = utils.find_resource( - volume_client.transfers, - parsed_args.transfer_request + volume_client.transfers, parsed_args.transfer_request ).id except exceptions.CommandError: # Non-admin users will fail to lookup name -> ID so we just # move on and attempt with the user-supplied information transfer_request_id = parsed_args.transfer_request - if not parsed_args.auth_key: - msg = _("argument --auth-key is required") - raise exceptions.CommandError(msg) - transfer_accept = volume_client.transfers.accept( transfer_request_id, parsed_args.auth_key, @@ -73,21 +70,55 @@ class CreateTransferRequest(command.ShowOne): _description = _("Create volume transfer request.") def get_parser(self, prog_name): - parser = super(CreateTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--name', metavar="", - help=_('New transfer request name (default to None)') + help=_('New transfer request name (default to None)'), + ) + parser.add_argument( + '--snapshots', + action='store_true', + dest='snapshots', + help=_( + 'Allow transfer volumes without snapshots (default) ' + '(supported by --os-volume-api-version 3.55 or later)' + ), + default=None, + ) + parser.add_argument( + '--no-snapshots', + action='store_false', + dest='snapshots', + help=_( + 'Disallow transfer volumes without snapshots ' + '(supported by --os-volume-api-version 3.55 or later)' + ), ) parser.add_argument( 'volume', metavar="", - help=_('Volume to transfer (name or ID)') + help=_('Volume to transfer (name or ID)'), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume + + kwargs = {} + + if parsed_args.snapshots is not None: + if volume_client.api_version < api_versions.APIVersion('3.55'): + msg = _( + "--os-volume-api-version 3.55 or greater is required to " + "support the '--(no-)snapshots' option" + ) + raise exceptions.CommandError(msg) + + # unfortunately this option is negative so we have to reverse + # things + kwargs['no_snapshots'] = not parsed_args.snapshots + volume_id = utils.find_resource( volume_client.volumes, parsed_args.volume, @@ -95,6 +126,7 @@ def take_action(self, parsed_args): volume_transfer_request = volume_client.transfers.create( volume_id, parsed_args.name, + **kwargs, ) volume_transfer_request._info.pop("links", None) @@ -105,7 +137,7 @@ class DeleteTransferRequest(command.Command): _description = _("Delete volume transfer request(s).") def get_parser(self, prog_name): - parser = super(DeleteTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'transfer_request', metavar="", @@ -127,14 +159,20 @@ def take_action(self, parsed_args): volume_client.transfers.delete(transfer_request_id) except Exception as e: result += 1 - LOG.error(_("Failed to delete volume transfer request " - "with name or ID '%(transfer)s': %(e)s") - % {'transfer': t, 'e': e}) + LOG.error( + _( + "Failed to delete volume transfer request " + "with name or ID '%(transfer)s': %(e)s" + ) + % {'transfer': t, 'e': e} + ) if result > 0: total = len(parsed_args.transfer_request) - msg = (_("%(result)s of %(total)s volume transfer requests failed" - " to delete") % {'result': result, 'total': total}) + msg = _( + "%(result)s of %(total)s volume transfer requests failed" + " to delete" + ) % {'result': result, 'total': total} raise exceptions.CommandError(msg) @@ -142,7 +180,7 @@ class ListTransferRequest(command.Lister): _description = _("Lists all volume transfer requests.") def get_parser(self, prog_name): - parser = super(ListTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', dest='all_projects', @@ -163,16 +201,20 @@ def take_action(self, parsed_args): search_opts={'all_tenants': parsed_args.all_projects}, ) - return (column_headers, ( - utils.get_item_properties(s, columns) - for s in volume_transfer_result)) + return ( + column_headers, + ( + utils.get_item_properties(s, columns) + for s in volume_transfer_result + ), + ) class ShowTransferRequest(command.ShowOne): _description = _("Show volume transfer request details.") def get_parser(self, prog_name): - parser = super(ShowTransferRequest, self).get_parser(prog_name) + parser = super().get_parser(prog_name) parser.add_argument( 'transfer_request', metavar="", diff --git a/openstackclient/volume/v3/volume_type.py b/openstackclient/volume/v3/volume_type.py new file mode 100644 index 0000000000..fbce2f2c9a --- /dev/null +++ b/openstackclient/volume/v3/volume_type.py @@ -0,0 +1,962 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Volume v3 Type action implementations""" + +import functools +import logging +import typing as ty + +from cinderclient import api_versions +from cliff import columns as cliff_columns +from osc_lib.cli import format_columns +from osc_lib.cli import parseractions +from osc_lib import exceptions +from osc_lib import utils + +from openstackclient import command +from openstackclient.i18n import _ +from openstackclient.identity import common as identity_common + + +LOG = logging.getLogger(__name__) + + +class EncryptionInfoColumn(cliff_columns.FormattableColumn[ty.Any]): + """Formattable column for encryption info column. + + Unlike the parent FormattableColumn class, the initializer of the + class takes encryption_data as the second argument. + osc_lib.utils.get_item_properties instantiate cliff FormattableColumn + object with a single parameter "column value", so you need to pass + a partially initialized class like + ``functools.partial(EncryptionInfoColumn encryption_data)``. + """ + + def __init__(self, value, encryption_data=None): + super().__init__(value) + self._encryption_data = encryption_data or {} + + def _get_encryption_info(self): + type_id = self._value + return self._encryption_data.get(type_id) + + def human_readable(self): + encryption_info = self._get_encryption_info() + if encryption_info: + return utils.format_dict(encryption_info) + else: + return '-' + + def machine_readable(self): + return self._get_encryption_info() + + +def _create_encryption_type(volume_client, volume_type, parsed_args): + if not parsed_args.encryption_provider: + msg = _( + "'--encryption-provider' should be specified while " + "creating a new encryption type" + ) + raise exceptions.CommandError(msg) + # set the default of control location while creating + control_location = 'front-end' + if parsed_args.encryption_control_location: + control_location = parsed_args.encryption_control_location + body = { + 'provider': parsed_args.encryption_provider, + 'cipher': parsed_args.encryption_cipher, + 'key_size': parsed_args.encryption_key_size, + 'control_location': control_location, + } + encryption = volume_client.volume_encryption_types.create( + volume_type, body + ) + return encryption + + +def _set_encryption_type(volume_client, volume_type, parsed_args): + # update the existing encryption type + body = {} + for attr in ['provider', 'cipher', 'key_size', 'control_location']: + info = getattr(parsed_args, 'encryption_' + attr, None) + if info is not None: + body[attr] = info + try: + volume_client.volume_encryption_types.update(volume_type, body) + except Exception as e: + if type(e).__name__ == 'NotFound': + # create new encryption type + LOG.warning( + _( + "No existing encryption type found, creating " + "new encryption type for this volume type ..." + ) + ) + _create_encryption_type(volume_client, volume_type, parsed_args) + + +class CreateVolumeType(command.ShowOne): + _description = _("Create new volume type") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "name", + metavar="", + help=_("Volume type name"), + ) + parser.add_argument( + "--description", + metavar="", + help=_("Volume type description"), + ) + public_group = parser.add_mutually_exclusive_group() + public_group.add_argument( + "--public", + action="store_true", + dest="is_public", + default=None, + help=_("Volume type is accessible to the public"), + ) + public_group.add_argument( + "--private", + action="store_false", + dest="is_public", + default=None, + help=_("Volume type is not accessible to the public"), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Set a property on this volume type ' + '(repeat option to set multiple properties)' + ), + ) + parser.add_argument( + '--multiattach', + action='store_true', + default=False, + help=_( + "Enable multi-attach for this volume type " + "(this is an alias for '--property multiattach= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--cacheable', + action='store_true', + default=False, + help=_( + "Enable caching for this volume type " + "(this is an alias for '--property cacheable= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--replicated', + action='store_true', + default=False, + help=_( + "Enabled replication for this volume type " + "(this is an alias for " + "'--property replication_enabled= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--availability-zone', + action='append', + dest='availability_zones', + help=_( + "Set an availability zone for this volume type " + "(this is an alias for " + "'--property RESKEY:availability_zones:') " + "(repeat option to set multiple availability zones)" + ), + ) + parser.add_argument( + '--project', + metavar='', + help=_( + "Allow to access private type (name or ID) " + "(must be used with --private option)" + ), + ) + identity_common.add_project_domain_option_to_parser(parser) + # TODO(Huanxuan Ao): Add choices for each "--encryption-*" option. + parser.add_argument( + '--encryption-provider', + metavar='', + help=_( + 'Set the encryption provider format for ' + 'this volume type (e.g "luks" or "plain") (admin only) ' + '(this option is required when setting encryption type ' + 'of a volume; consider using other encryption options ' + 'such as: "--encryption-cipher", "--encryption-key-size" ' + 'and "--encryption-control-location")' + ), + ) + parser.add_argument( + '--encryption-cipher', + metavar='', + help=_( + 'Set the encryption algorithm or mode for this ' + 'volume type (e.g "aes-xts-plain64") (admin only)' + ), + ) + parser.add_argument( + '--encryption-key-size', + metavar='', + type=int, + help=_( + 'Set the size of the encryption key of this ' + 'volume type (e.g "128" or "256") (admin only)' + ), + ) + parser.add_argument( + '--encryption-control-location', + metavar='', + choices=['front-end', 'back-end'], + help=_( + 'Set the notional service where the encryption is ' + 'performed ("front-end" or "back-end") (admin only) ' + '(The default value for this option is "front-end" ' + 'when setting encryption type of a volume. Consider ' + 'using other encryption options such as: ' + '"--encryption-cipher", "--encryption-key-size" and ' + '"--encryption-provider")' + ), + ) + return parser + + def take_action(self, parsed_args): + identity_client = self.app.client_manager.identity + volume_client = self.app.client_manager.volume + + if parsed_args.project and parsed_args.is_public is not False: + msg = _("--project is only allowed with --private") + raise exceptions.CommandError(msg) + + kwargs = {} + + if parsed_args.is_public is not None: + kwargs['is_public'] = parsed_args.is_public + + volume_type = volume_client.volume_types.create( + parsed_args.name, + description=parsed_args.description, + **kwargs, + ) + volume_type._info.pop('extra_specs') + + if parsed_args.project: + try: + project_id = identity_common.find_project( + identity_client, + parsed_args.project, + parsed_args.project_domain, + ).id + volume_client.volume_type_access.add_project_access( + volume_type.id, project_id + ) + except Exception as e: + msg = _( + "Failed to add project %(project)s access to type: %(e)s" + ) + LOG.error(msg % {'project': parsed_args.project, 'e': e}) + + properties = {} + if parsed_args.properties: + properties.update(parsed_args.properties) + if parsed_args.multiattach: + properties['multiattach'] = ' True' + if parsed_args.cacheable: + properties['cacheable'] = ' True' + if parsed_args.replicated: + properties['replication_enabled'] = ' True' + if parsed_args.availability_zones: + properties['RESKEY:availability_zones'] = ','.join( + parsed_args.availability_zones + ) + if properties: + result = volume_type.set_keys(properties) + volume_type._info.update( + {'properties': format_columns.DictColumn(result)} + ) + + if ( + parsed_args.encryption_provider + or parsed_args.encryption_cipher + or parsed_args.encryption_key_size + or parsed_args.encryption_control_location + ): + try: + # create new encryption + encryption = _create_encryption_type( + volume_client, volume_type, parsed_args + ) + except Exception as e: + LOG.error( + _( + "Failed to set encryption information for this " + "volume type: %s" + ), + e, + ) + # add encryption info in result + encryption._info.pop("volume_type_id", None) + volume_type._info.update( + {'encryption': format_columns.DictColumn(encryption._info)} + ) + + volume_type._info.pop("os-volume-type-access:is_public", None) + + return zip(*sorted(volume_type._info.items())) + + +class DeleteVolumeType(command.Command): + _description = _("Delete volume type(s)") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "volume_types", + metavar="", + nargs="+", + help=_("Volume type(s) to delete (name or ID)"), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + result = 0 + + for volume_type in parsed_args.volume_types: + try: + vol_type = utils.find_resource( + volume_client.volume_types, volume_type + ) + + volume_client.volume_types.delete(vol_type) + except Exception as e: + result += 1 + LOG.error( + _( + "Failed to delete volume type with " + "name or ID '%(volume_type)s': %(e)s" + ) + % {'volume_type': volume_type, 'e': e} + ) + + if result > 0: + total = len(parsed_args.volume_types) + msg = _( + "%(result)s of %(total)s volume types failed to delete." + ) % {'result': result, 'total': total} + raise exceptions.CommandError(msg) + + +class ListVolumeType(command.Lister): + _description = _("List volume types") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + '--long', + action='store_true', + default=False, + help=_('List additional fields in output'), + ) + public_group = parser.add_mutually_exclusive_group() + public_group.add_argument( + "--default", + action='store_true', + default=False, + help=_('List the default volume type'), + ) + public_group.add_argument( + "--public", + action="store_true", + dest="is_public", + default=None, + help=_("List only public types"), + ) + public_group.add_argument( + "--private", + action="store_false", + dest="is_public", + default=None, + help=_("List only private types (admin only)"), + ) + parser.add_argument( + "--encryption-type", + action="store_true", + help=_( + "Display encryption information for each volume type " + "(admin only)" + ), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Filter by a property on the volume types ' + '(repeat option to filter by multiple properties) ' + '(admin only except for user-visible extra specs) ' + '(supported by --os-volume-api-version 3.52 or above)' + ), + ) + parser.add_argument( + '--multiattach', + action='store_true', + default=False, + help=_( + "List only volume types with multi-attach enabled " + "(this is an alias for '--property multiattach= True') " + "(supported by --os-volume-api-version 3.52 or above)" + ), + ) + parser.add_argument( + '--cacheable', + action='store_true', + default=False, + help=_( + "List only volume types with caching enabled " + "(this is an alias for '--property cacheable= True') " + "(admin only) " + "(supported by --os-volume-api-version 3.52 or above)" + ), + ) + parser.add_argument( + '--replicated', + action='store_true', + default=False, + help=_( + "List only volume types with replication enabled " + "(this is an alias for " + "'--property replication_enabled= True') " + "(supported by --os-volume-api-version 3.52 or above)" + ), + ) + parser.add_argument( + '--availability-zone', + action='append', + dest='availability_zones', + help=_( + "List only volume types with this availability configured " + "(this is an alias for " + "'--property RESKEY:availability_zones:') " + "(repeat option to filter on multiple availability zones)" + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + + if parsed_args.long: + columns = [ + 'ID', + 'Name', + 'Is Public', + 'Description', + 'Extra Specs', + ] + column_headers = [ + 'ID', + 'Name', + 'Is Public', + 'Description', + 'Properties', + ] + else: + columns = ['ID', 'Name', 'Is Public'] + column_headers = ['ID', 'Name', 'Is Public'] + + if parsed_args.default: + data = [volume_client.volume_types.default()] + else: + search_opts = {} + properties = {} + if parsed_args.properties: + properties.update(parsed_args.properties) + if parsed_args.multiattach: + properties['multiattach'] = ' True' + if parsed_args.cacheable: + properties['cacheable'] = ' True' + if parsed_args.replicated: + properties['replication_enabled'] = ' True' + if parsed_args.availability_zones: + properties['RESKEY:availability_zones'] = ','.join( + parsed_args.availability_zones + ) + if properties: + if volume_client.api_version < api_versions.APIVersion('3.52'): + msg = _( + "--os-volume-api-version 3.52 or greater is required " + "to use the '--property' option or any of the alias " + "options" + ) + raise exceptions.CommandError(msg) + + search_opts['extra_specs'] = properties + + data = volume_client.volume_types.list( + search_opts=search_opts, + is_public=parsed_args.is_public, + ) + + formatters = {'Extra Specs': format_columns.DictColumn} + + if parsed_args.encryption_type: + encryption = {} + for d in volume_client.volume_encryption_types.list(): + volume_type_id = d._info['volume_type_id'] + # remove some redundant information + del_key = [ + 'deleted', + 'created_at', + 'updated_at', + 'deleted_at', + 'volume_type_id', + ] + for key in del_key: + d._info.pop(key, None) + # save the encryption information with their volume type ID + encryption[volume_type_id] = d._info + # We need to get volume type ID, then show encryption + # information according to the ID, so use "id" to keep + # difference to the real "ID" column. + columns += ['id'] + column_headers += ['Encryption'] + + _EncryptionInfoColumn = functools.partial( + EncryptionInfoColumn, encryption_data=encryption + ) + formatters['id'] = _EncryptionInfoColumn + + return ( + column_headers, + ( + utils.get_item_properties( + s, + columns, + formatters=formatters, + ) + for s in data + ), + ) + + +class SetVolumeType(command.Command): + _description = _("Set volume type properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'volume_type', + metavar='', + help=_('Volume type to modify (name or ID)'), + ) + parser.add_argument( + '--name', + metavar='', + help=_('Set volume type name'), + ) + parser.add_argument( + '--description', + metavar='', + help=_('Set volume type description'), + ) + parser.add_argument( + '--property', + metavar='', + action=parseractions.KeyValueAction, + dest='properties', + help=_( + 'Set a property on this volume type ' + '(repeat option to set multiple properties)' + ), + ) + parser.add_argument( + '--multiattach', + action='store_true', + default=False, + help=_( + "Enable multi-attach for this volume type " + "(this is an alias for '--property multiattach= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--cacheable', + action='store_true', + default=False, + help=_( + "Enable caching for this volume type " + "(this is an alias for '--property cacheable= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--replicated', + action='store_true', + default=False, + help=_( + "Enabled replication for this volume type " + "(this is an alias for " + "'--property replication_enabled= True') " + "(requires driver support)" + ), + ) + parser.add_argument( + '--availability-zone', + action='append', + dest='availability_zones', + help=_( + "Set an availability zone for this volume type " + "(this is an alias for " + "'--property RESKEY:availability_zones:') " + "(repeat option to set multiple availability zones)" + ), + ) + parser.add_argument( + '--project', + metavar='', + help=_( + 'Set volume type access to project (name or ID) (admin only)' + ), + ) + public_group = parser.add_mutually_exclusive_group() + public_group.add_argument( + '--public', + action='store_true', + dest='is_public', + default=None, + help=_('Volume type is accessible to the public'), + ) + public_group.add_argument( + '--private', + action='store_false', + dest='is_public', + default=None, + help=_("Volume type is not accessible to the public"), + ) + identity_common.add_project_domain_option_to_parser(parser) + # TODO(Huanxuan Ao): Add choices for each "--encryption-*" option. + parser.add_argument( + '--encryption-provider', + metavar='', + help=_( + 'Set the encryption provider format for ' + 'this volume type (e.g "luks" or "plain") (admin only) ' + '(This option is required when setting encryption type ' + 'of a volume for the first time. Consider using other ' + 'encryption options such as: "--encryption-cipher", ' + '"--encryption-key-size" and ' + '"--encryption-control-location")' + ), + ) + parser.add_argument( + '--encryption-cipher', + metavar='', + help=_( + 'Set the encryption algorithm or mode for this ' + 'volume type (e.g "aes-xts-plain64") (admin only)' + ), + ) + parser.add_argument( + '--encryption-key-size', + metavar='', + type=int, + help=_( + 'Set the size of the encryption key of this ' + 'volume type (e.g "128" or "256") (admin only)' + ), + ) + parser.add_argument( + '--encryption-control-location', + metavar='', + choices=['front-end', 'back-end'], + help=_( + 'Set the notional service where the encryption is ' + 'performed ("front-end" or "back-end") (admin only) ' + '(The default value for this option is "front-end" ' + 'when setting encryption type of a volume for the ' + 'first time. Consider using other encryption options ' + 'such as: "--encryption-cipher", "--encryption-key-size" ' + 'and "--encryption-provider")' + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + identity_client = self.app.client_manager.identity + + volume_type = utils.find_resource( + volume_client.volume_types, + parsed_args.volume_type, + ) + + result = 0 + kwargs = {} + + if parsed_args.name: + kwargs['name'] = parsed_args.name + + if parsed_args.description: + kwargs['description'] = parsed_args.description + + if parsed_args.is_public is not None: + kwargs['is_public'] = parsed_args.is_public + + if kwargs: + try: + volume_client.volume_types.update(volume_type.id, **kwargs) + except Exception as e: + LOG.error( + _("Failed to update volume type name or description: %s"), + e, + ) + result += 1 + + properties: dict[str, str] = {} + if parsed_args.properties: + properties.update(parsed_args.properties) + if parsed_args.multiattach: + properties['multiattach'] = ' True' + if parsed_args.cacheable: + properties['cacheable'] = ' True' + if parsed_args.replicated: + properties['replication_enabled'] = ' True' + if parsed_args.availability_zones: + properties['RESKEY:availability_zones'] = ','.join( + parsed_args.availability_zones + ) + if properties: + try: + volume_type.set_keys(properties) + except Exception as e: + LOG.error(_("Failed to set volume type properties: %s"), e) + result += 1 + + if parsed_args.project: + project_info = None + try: + project_info = identity_common.find_project( + identity_client, + parsed_args.project, + parsed_args.project_domain, + ) + + volume_client.volume_type_access.add_project_access( + volume_type.id, project_info.id + ) + except Exception as e: + LOG.error( + _("Failed to set volume type access to project: %s"), e + ) + result += 1 + + if ( + parsed_args.encryption_provider + or parsed_args.encryption_cipher + or parsed_args.encryption_key_size + or parsed_args.encryption_control_location + ): + try: + _set_encryption_type(volume_client, volume_type, parsed_args) + except Exception as e: + LOG.error( + _( + "Failed to set encryption information for this " + "volume type: %s" + ), + e, + ) + result += 1 + + if result > 0: + raise exceptions.CommandError( + _("Command Failed: One or more of the operations failed") + ) + + +class ShowVolumeType(command.ShowOne): + _description = _("Display volume type details") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + "volume_type", + metavar="", + help=_("Volume type to display (name or ID)"), + ) + parser.add_argument( + "--encryption-type", + action="store_true", + help=_( + "Display encryption information of this volume type " + "(admin only)" + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + volume_type = utils.find_resource( + volume_client.volume_types, parsed_args.volume_type + ) + properties = format_columns.DictColumn( + volume_type._info.pop('extra_specs', {}) + ) + volume_type._info.update({'properties': properties}) + access_project_ids = None + if not volume_type.is_public: + try: + volume_type_access = volume_client.volume_type_access.list( + volume_type.id + ) + project_ids = [ + utils.get_field(item, 'project_id') + for item in volume_type_access + ] + # TODO(Rui Chen): This format list case can be removed after + # patch https://review.opendev.org/#/c/330223/ merged. + access_project_ids = format_columns.ListColumn(project_ids) + except Exception as e: + msg = _( + 'Failed to get access project list for volume type ' + '%(type)s: %(e)s' + ) + LOG.error(msg % {'type': volume_type.id, 'e': e}) + volume_type._info.update({'access_project_ids': access_project_ids}) + if parsed_args.encryption_type: + # show encryption type information for this volume type + try: + encryption = volume_client.volume_encryption_types.get( + volume_type.id + ) + encryption._info.pop("volume_type_id", None) + volume_type._info.update( + {'encryption': format_columns.DictColumn(encryption._info)} + ) + except Exception as e: + LOG.error( + _( + "Failed to display the encryption information " + "of this volume type: %s" + ), + e, + ) + volume_type._info.pop("os-volume-type-access:is_public", None) + return zip(*sorted(volume_type._info.items())) + + +class UnsetVolumeType(command.Command): + _description = _("Unset volume type properties") + + def get_parser(self, prog_name): + parser = super().get_parser(prog_name) + parser.add_argument( + 'volume_type', + metavar='', + help=_('Volume type to modify (name or ID)'), + ) + parser.add_argument( + '--property', + metavar='', + action='append', + dest='properties', + help=_( + 'Remove a property from this volume type ' + '(repeat option to remove multiple properties)' + ), + ) + parser.add_argument( + '--project', + metavar='', + help=_( + 'Removes volume type access to project (name or ID) ' + '(admin only)' + ), + ) + identity_common.add_project_domain_option_to_parser(parser) + parser.add_argument( + "--encryption-type", + action="store_true", + help=_( + "Remove the encryption type for this volume type (admin only)" + ), + ) + return parser + + def take_action(self, parsed_args): + volume_client = self.app.client_manager.volume + identity_client = self.app.client_manager.identity + + volume_type = utils.find_resource( + volume_client.volume_types, + parsed_args.volume_type, + ) + + result = 0 + if parsed_args.properties: + try: + volume_type.unset_keys(parsed_args.properties) + except Exception as e: + LOG.error(_("Failed to unset volume type properties: %s"), e) + result += 1 + + if parsed_args.project: + project_info = None + try: + project_info = identity_common.find_project( + identity_client, + parsed_args.project, + parsed_args.project_domain, + ) + + volume_client.volume_type_access.remove_project_access( + volume_type.id, project_info.id + ) + except Exception as e: + LOG.error( + _("Failed to remove volume type access from project: %s"), + e, + ) + result += 1 + if parsed_args.encryption_type: + try: + volume_client.volume_encryption_types.delete(volume_type) + except Exception as e: + LOG.error( + _( + "Failed to remove the encryption type for this " + "volume type: %s" + ), + e, + ) + result += 1 + + if result > 0: + raise exceptions.CommandError( + _("Command Failed: One or more of the operations failed") + ) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..37fb8d0c26 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,772 @@ +[build-system] +requires = ["pbr>=6.1.1"] +build-backend = "pbr.build" + +[project] +name = "python-openstackclient" +description = "OpenStack Command-line Client" +authors = [ + {name = "OpenStack", email = "openstack-discuss@lists.openstack.org"}, +] +readme = {file = "README.rst", content-type = "text/x-rst"} +license = {text = "Apache-2.0"} +dynamic = ["version", "dependencies"] +# dependencies = [ ] +requires-python = ">=3.10" +classifiers = [ + "Environment :: OpenStack", + "Intended Audience :: Information Technology", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] + +# [project.optional-dependencies] +# test = [ +# ] + +[project.urls] +Homepage = "https://docs.openstack.org/python-openstackclient/" +Repository = "https://opendev.org/openstack/python-openstackclient/" + +[project.scripts] +openstack = "openstackclient.shell:main" + +[project.entry-points."openstack.cli"] +command_list = "openstackclient.common.module:ListCommand" +module_list = "openstackclient.common.module:ListModule" + +[project.entry-points."openstack.cli.base"] +compute = "openstackclient.compute.client" +identity = "openstackclient.identity.client" +image = "openstackclient.image.client" +network = "openstackclient.network.client" +object_store = "openstackclient.object.client" +volume = "openstackclient.volume.client" + +[project.entry-points."openstack.common"] +availability_zone_list = "openstackclient.common.availability_zone:ListAvailabilityZone" +configuration_show = "openstackclient.common.configuration:ShowConfiguration" +extension_list = "openstackclient.common.extension:ListExtension" +extension_show = "openstackclient.common.extension:ShowExtension" +limits_show = "openstackclient.common.limits:ShowLimits" +project_cleanup = "openstackclient.common.project_cleanup:ProjectCleanup" +quota_list = "openstackclient.common.quota:ListQuota" +quota_set = "openstackclient.common.quota:SetQuota" +quota_show = "openstackclient.common.quota:ShowQuota" +quota_delete = "openstackclient.common.quota:DeleteQuota" +versions_show = "openstackclient.common.versions:ShowVersions" + +[project.entry-points."openstack.compute.v2"] +aggregate_add_host = "openstackclient.compute.v2.aggregate:AddAggregateHost" +aggregate_create = "openstackclient.compute.v2.aggregate:CreateAggregate" +aggregate_delete = "openstackclient.compute.v2.aggregate:DeleteAggregate" +aggregate_list = "openstackclient.compute.v2.aggregate:ListAggregate" +aggregate_remove_host = "openstackclient.compute.v2.aggregate:RemoveAggregateHost" +aggregate_set = "openstackclient.compute.v2.aggregate:SetAggregate" +aggregate_show = "openstackclient.compute.v2.aggregate:ShowAggregate" +aggregate_unset = "openstackclient.compute.v2.aggregate:UnsetAggregate" +aggregate_cache_image = "openstackclient.compute.v2.aggregate:CacheImageForAggregate" +compute_agent_create = "openstackclient.compute.v2.agent:CreateAgent" +compute_agent_delete = "openstackclient.compute.v2.agent:DeleteAgent" +compute_agent_list = "openstackclient.compute.v2.agent:ListAgent" +compute_agent_set = "openstackclient.compute.v2.agent:SetAgent" +compute_service_delete = "openstackclient.compute.v2.service:DeleteService" +compute_service_list = "openstackclient.compute.v2.service:ListService" +compute_service_set = "openstackclient.compute.v2.service:SetService" +console_log_show = "openstackclient.compute.v2.console:ShowConsoleLog" +console_url_show = "openstackclient.compute.v2.console:ShowConsoleURL" +console_connection_show = "openstackclient.compute.v2.console_connection:ShowConsoleConnectionInformation" +flavor_create = "openstackclient.compute.v2.flavor:CreateFlavor" +flavor_delete = "openstackclient.compute.v2.flavor:DeleteFlavor" +flavor_list = "openstackclient.compute.v2.flavor:ListFlavor" +flavor_show = "openstackclient.compute.v2.flavor:ShowFlavor" +flavor_set = "openstackclient.compute.v2.flavor:SetFlavor" +flavor_unset = "openstackclient.compute.v2.flavor:UnsetFlavor" +host_list = "openstackclient.compute.v2.host:ListHost" +host_set = "openstackclient.compute.v2.host:SetHost" +host_show = "openstackclient.compute.v2.host:ShowHost" +hypervisor_list = "openstackclient.compute.v2.hypervisor:ListHypervisor" +hypervisor_show = "openstackclient.compute.v2.hypervisor:ShowHypervisor" +hypervisor_stats_show = "openstackclient.compute.v2.hypervisor_stats:ShowHypervisorStats" +keypair_create = "openstackclient.compute.v2.keypair:CreateKeypair" +keypair_delete = "openstackclient.compute.v2.keypair:DeleteKeypair" +keypair_list = "openstackclient.compute.v2.keypair:ListKeypair" +keypair_show = "openstackclient.compute.v2.keypair:ShowKeypair" +server_add_fixed_ip = "openstackclient.compute.v2.server:AddFixedIP" +server_add_floating_ip = "openstackclient.compute.v2.server:AddFloatingIP" +server_add_port = "openstackclient.compute.v2.server:AddPort" +server_add_network = "openstackclient.compute.v2.server:AddNetwork" +server_add_security_group = "openstackclient.compute.v2.server:AddServerSecurityGroup" +server_add_volume = "openstackclient.compute.v2.server:AddServerVolume" +server_create = "openstackclient.compute.v2.server:CreateServer" +server_delete = "openstackclient.compute.v2.server:DeleteServer" +server_dump_create = "openstackclient.compute.v2.server:CreateServerDump" +server_evacuate = "openstackclient.compute.v2.server:EvacuateServer" +server_list = "openstackclient.compute.v2.server:ListServer" +server_lock = "openstackclient.compute.v2.server:LockServer" +server_migrate = "openstackclient.compute.v2.server:MigrateServer" +server_migrate_confirm = "openstackclient.compute.v2.server:MigrateConfirm" +server_migrate_revert = "openstackclient.compute.v2.server:MigrateRevert" +server_migration_confirm = "openstackclient.compute.v2.server:ConfirmMigration" +server_migration_revert = "openstackclient.compute.v2.server:RevertMigration" +server_pause = "openstackclient.compute.v2.server:PauseServer" +server_reboot = "openstackclient.compute.v2.server:RebootServer" +server_rebuild = "openstackclient.compute.v2.server:RebuildServer" +server_remove_fixed_ip = "openstackclient.compute.v2.server:RemoveFixedIP" +server_remove_floating_ip = "openstackclient.compute.v2.server:RemoveFloatingIP" +server_remove_port = "openstackclient.compute.v2.server:RemovePort" +server_remove_network = "openstackclient.compute.v2.server:RemoveNetwork" +server_remove_security_group = "openstackclient.compute.v2.server:RemoveServerSecurityGroup" +server_remove_volume = "openstackclient.compute.v2.server:RemoveServerVolume" +server_rescue = "openstackclient.compute.v2.server:RescueServer" +server_resize = "openstackclient.compute.v2.server:ResizeServer" +server_resize_confirm = "openstackclient.compute.v2.server:ResizeConfirm" +server_resize_revert = "openstackclient.compute.v2.server:ResizeRevert" +server_restore = "openstackclient.compute.v2.server:RestoreServer" +server_resume = "openstackclient.compute.v2.server:ResumeServer" +server_set = "openstackclient.compute.v2.server:SetServer" +server_shelve = "openstackclient.compute.v2.server:ShelveServer" +server_show = "openstackclient.compute.v2.server:ShowServer" +server_ssh = "openstackclient.compute.v2.server:SshServer" +server_start = "openstackclient.compute.v2.server:StartServer" +server_stop = "openstackclient.compute.v2.server:StopServer" +server_suspend = "openstackclient.compute.v2.server:SuspendServer" +server_unlock = "openstackclient.compute.v2.server:UnlockServer" +server_unpause = "openstackclient.compute.v2.server:UnpauseServer" +server_unrescue = "openstackclient.compute.v2.server:UnrescueServer" +server_unset = "openstackclient.compute.v2.server:UnsetServer" +server_unshelve = "openstackclient.compute.v2.server:UnshelveServer" +server_backup_create = "openstackclient.compute.v2.server_backup:CreateServerBackup" +server_event_list = "openstackclient.compute.v2.server_event:ListServerEvent" +server_event_show = "openstackclient.compute.v2.server_event:ShowServerEvent" +server_group_create = "openstackclient.compute.v2.server_group:CreateServerGroup" +server_group_delete = "openstackclient.compute.v2.server_group:DeleteServerGroup" +server_group_list = "openstackclient.compute.v2.server_group:ListServerGroup" +server_group_show = "openstackclient.compute.v2.server_group:ShowServerGroup" +server_image_create = "openstackclient.compute.v2.server_image:CreateServerImage" +server_migration_abort = "openstackclient.compute.v2.server_migration:AbortMigration" +server_migration_force_complete = "openstackclient.compute.v2.server_migration:ForceCompleteMigration" +server_migration_list = "openstackclient.compute.v2.server_migration:ListMigration" +server_migration_show = "openstackclient.compute.v2.server_migration:ShowMigration" +server_volume_list = "openstackclient.compute.v2.server_volume:ListServerVolume" +server_volume_set = "openstackclient.compute.v2.server_volume:SetServerVolume" +server_volume_update = "openstackclient.compute.v2.server_volume:UpdateServerVolume" +usage_list = "openstackclient.compute.v2.usage:ListUsage" +usage_show = "openstackclient.compute.v2.usage:ShowUsage" + +[project.entry-points."openstack.identity.v2"] +catalog_list = "openstackclient.identity.v2_0.catalog:ListCatalog" +catalog_show = "openstackclient.identity.v2_0.catalog:ShowCatalog" +ec2_credentials_create = "openstackclient.identity.v2_0.ec2creds:CreateEC2Creds" +ec2_credentials_delete = "openstackclient.identity.v2_0.ec2creds:DeleteEC2Creds" +ec2_credentials_list = "openstackclient.identity.v2_0.ec2creds:ListEC2Creds" +ec2_credentials_show = "openstackclient.identity.v2_0.ec2creds:ShowEC2Creds" +endpoint_create = "openstackclient.identity.v2_0.endpoint:CreateEndpoint" +endpoint_delete = "openstackclient.identity.v2_0.endpoint:DeleteEndpoint" +endpoint_list = "openstackclient.identity.v2_0.endpoint:ListEndpoint" +endpoint_show = "openstackclient.identity.v2_0.endpoint:ShowEndpoint" +project_create = "openstackclient.identity.v2_0.project:CreateProject" +project_delete = "openstackclient.identity.v2_0.project:DeleteProject" +project_list = "openstackclient.identity.v2_0.project:ListProject" +project_set = "openstackclient.identity.v2_0.project:SetProject" +project_show = "openstackclient.identity.v2_0.project:ShowProject" +project_unset = "openstackclient.identity.v2_0.project:UnsetProject" +role_add = "openstackclient.identity.v2_0.role:AddRole" +role_create = "openstackclient.identity.v2_0.role:CreateRole" +role_delete = "openstackclient.identity.v2_0.role:DeleteRole" +role_list = "openstackclient.identity.v2_0.role:ListRole" +role_remove = "openstackclient.identity.v2_0.role:RemoveRole" +role_show = "openstackclient.identity.v2_0.role:ShowRole" +role_assignment_list = "openstackclient.identity.v2_0.role_assignment:ListRoleAssignment" +service_create = "openstackclient.identity.v2_0.service:CreateService" +service_delete = "openstackclient.identity.v2_0.service:DeleteService" +service_list = "openstackclient.identity.v2_0.service:ListService" +service_show = "openstackclient.identity.v2_0.service:ShowService" +token_issue = "openstackclient.identity.v2_0.token:IssueToken" +token_revoke = "openstackclient.identity.v2_0.token:RevokeToken" +user_create = "openstackclient.identity.v2_0.user:CreateUser" +user_delete = "openstackclient.identity.v2_0.user:DeleteUser" +user_list = "openstackclient.identity.v2_0.user:ListUser" +user_set = "openstackclient.identity.v2_0.user:SetUser" +user_show = "openstackclient.identity.v2_0.user:ShowUser" + +[project.entry-points."openstack.identity.v3"] +access_token_create = "openstackclient.identity.v3.token:CreateAccessToken" +access_rule_delete = "openstackclient.identity.v3.access_rule:DeleteAccessRule" +access_rule_list = "openstackclient.identity.v3.access_rule:ListAccessRule" +access_rule_show = "openstackclient.identity.v3.access_rule:ShowAccessRule" +application_credential_create = "openstackclient.identity.v3.application_credential:CreateApplicationCredential" +application_credential_delete = "openstackclient.identity.v3.application_credential:DeleteApplicationCredential" +application_credential_list = "openstackclient.identity.v3.application_credential:ListApplicationCredential" +application_credential_show = "openstackclient.identity.v3.application_credential:ShowApplicationCredential" +catalog_list = "openstackclient.identity.v3.catalog:ListCatalog" +catalog_show = "openstackclient.identity.v3.catalog:ShowCatalog" +consumer_create = "openstackclient.identity.v3.consumer:CreateConsumer" +consumer_delete = "openstackclient.identity.v3.consumer:DeleteConsumer" +consumer_list = "openstackclient.identity.v3.consumer:ListConsumer" +consumer_set = "openstackclient.identity.v3.consumer:SetConsumer" +consumer_show = "openstackclient.identity.v3.consumer:ShowConsumer" +credential_create = "openstackclient.identity.v3.credential:CreateCredential" +credential_delete = "openstackclient.identity.v3.credential:DeleteCredential" +credential_list = "openstackclient.identity.v3.credential:ListCredential" +credential_set = "openstackclient.identity.v3.credential:SetCredential" +credential_show = "openstackclient.identity.v3.credential:ShowCredential" +domain_create = "openstackclient.identity.v3.domain:CreateDomain" +domain_delete = "openstackclient.identity.v3.domain:DeleteDomain" +domain_list = "openstackclient.identity.v3.domain:ListDomain" +domain_set = "openstackclient.identity.v3.domain:SetDomain" +domain_show = "openstackclient.identity.v3.domain:ShowDomain" +ec2_credentials_create = "openstackclient.identity.v3.ec2creds:CreateEC2Creds" +ec2_credentials_delete = "openstackclient.identity.v3.ec2creds:DeleteEC2Creds" +ec2_credentials_list = "openstackclient.identity.v3.ec2creds:ListEC2Creds" +ec2_credentials_show = "openstackclient.identity.v3.ec2creds:ShowEC2Creds" +endpoint_add_project = "openstackclient.identity.v3.endpoint:AddProjectToEndpoint" +endpoint_create = "openstackclient.identity.v3.endpoint:CreateEndpoint" +endpoint_delete = "openstackclient.identity.v3.endpoint:DeleteEndpoint" +endpoint_list = "openstackclient.identity.v3.endpoint:ListEndpoint" +endpoint_remove_project = "openstackclient.identity.v3.endpoint:RemoveProjectFromEndpoint" +endpoint_set = "openstackclient.identity.v3.endpoint:SetEndpoint" +endpoint_show = "openstackclient.identity.v3.endpoint:ShowEndpoint" +endpoint_group_add_project = "openstackclient.identity.v3.endpoint_group:AddProjectToEndpointGroup" +endpoint_group_create = "openstackclient.identity.v3.endpoint_group:CreateEndpointGroup" +endpoint_group_delete = "openstackclient.identity.v3.endpoint_group:DeleteEndpointGroup" +endpoint_group_list = "openstackclient.identity.v3.endpoint_group:ListEndpointGroup" +endpoint_group_remove_project = "openstackclient.identity.v3.endpoint_group:RemoveProjectFromEndpointGroup" +endpoint_group_set = "openstackclient.identity.v3.endpoint_group:SetEndpointGroup" +endpoint_group_show = "openstackclient.identity.v3.endpoint_group:ShowEndpointGroup" +federation_domain_list = "openstackclient.identity.v3.unscoped_saml:ListAccessibleDomains" +federation_project_list = "openstackclient.identity.v3.unscoped_saml:ListAccessibleProjects" +federation_protocol_create = "openstackclient.identity.v3.federation_protocol:CreateProtocol" +federation_protocol_delete = "openstackclient.identity.v3.federation_protocol:DeleteProtocol" +federation_protocol_list = "openstackclient.identity.v3.federation_protocol:ListProtocols" +federation_protocol_set = "openstackclient.identity.v3.federation_protocol:SetProtocol" +federation_protocol_show = "openstackclient.identity.v3.federation_protocol:ShowProtocol" +group_add_user = "openstackclient.identity.v3.group:AddUserToGroup" +group_contains_user = "openstackclient.identity.v3.group:CheckUserInGroup" +group_create = "openstackclient.identity.v3.group:CreateGroup" +group_delete = "openstackclient.identity.v3.group:DeleteGroup" +group_list = "openstackclient.identity.v3.group:ListGroup" +group_remove_user = "openstackclient.identity.v3.group:RemoveUserFromGroup" +group_set = "openstackclient.identity.v3.group:SetGroup" +group_show = "openstackclient.identity.v3.group:ShowGroup" +identity_provider_create = "openstackclient.identity.v3.identity_provider:CreateIdentityProvider" +identity_provider_delete = "openstackclient.identity.v3.identity_provider:DeleteIdentityProvider" +identity_provider_list = "openstackclient.identity.v3.identity_provider:ListIdentityProvider" +identity_provider_set = "openstackclient.identity.v3.identity_provider:SetIdentityProvider" +identity_provider_show = "openstackclient.identity.v3.identity_provider:ShowIdentityProvider" +implied_role_create = "openstackclient.identity.v3.implied_role:CreateImpliedRole" +implied_role_delete = "openstackclient.identity.v3.implied_role:DeleteImpliedRole" +implied_role_list = "openstackclient.identity.v3.implied_role:ListImpliedRole" +limit_create = "openstackclient.identity.v3.limit:CreateLimit" +limit_delete = "openstackclient.identity.v3.limit:DeleteLimit" +limit_list = "openstackclient.identity.v3.limit:ListLimit" +limit_set = "openstackclient.identity.v3.limit:SetLimit" +limit_show = "openstackclient.identity.v3.limit:ShowLimit" +mapping_create = "openstackclient.identity.v3.mapping:CreateMapping" +mapping_delete = "openstackclient.identity.v3.mapping:DeleteMapping" +mapping_list = "openstackclient.identity.v3.mapping:ListMapping" +mapping_set = "openstackclient.identity.v3.mapping:SetMapping" +mapping_show = "openstackclient.identity.v3.mapping:ShowMapping" +policy_create = "openstackclient.identity.v3.policy:CreatePolicy" +policy_delete = "openstackclient.identity.v3.policy:DeletePolicy" +policy_list = "openstackclient.identity.v3.policy:ListPolicy" +policy_set = "openstackclient.identity.v3.policy:SetPolicy" +policy_show = "openstackclient.identity.v3.policy:ShowPolicy" +project_create = "openstackclient.identity.v3.project:CreateProject" +project_delete = "openstackclient.identity.v3.project:DeleteProject" +project_list = "openstackclient.identity.v3.project:ListProject" +project_set = "openstackclient.identity.v3.project:SetProject" +project_show = "openstackclient.identity.v3.project:ShowProject" +region_create = "openstackclient.identity.v3.region:CreateRegion" +region_delete = "openstackclient.identity.v3.region:DeleteRegion" +region_list = "openstackclient.identity.v3.region:ListRegion" +region_set = "openstackclient.identity.v3.region:SetRegion" +region_show = "openstackclient.identity.v3.region:ShowRegion" +registered_limit_create = "openstackclient.identity.v3.registered_limit:CreateRegisteredLimit" +registered_limit_delete = "openstackclient.identity.v3.registered_limit:DeleteRegisteredLimit" +registered_limit_list = "openstackclient.identity.v3.registered_limit:ListRegisteredLimit" +registered_limit_set = "openstackclient.identity.v3.registered_limit:SetRegisteredLimit" +registered_limit_show = "openstackclient.identity.v3.registered_limit:ShowRegisteredLimit" +request_token_authorize = "openstackclient.identity.v3.token:AuthorizeRequestToken" +request_token_create = "openstackclient.identity.v3.token:CreateRequestToken" +role_add = "openstackclient.identity.v3.role:AddRole" +role_create = "openstackclient.identity.v3.role:CreateRole" +role_delete = "openstackclient.identity.v3.role:DeleteRole" +role_list = "openstackclient.identity.v3.role:ListRole" +role_remove = "openstackclient.identity.v3.role:RemoveRole" +role_show = "openstackclient.identity.v3.role:ShowRole" +role_set = "openstackclient.identity.v3.role:SetRole" +role_assignment_list = "openstackclient.identity.v3.role_assignment:ListRoleAssignment" +service_create = "openstackclient.identity.v3.service:CreateService" +service_delete = "openstackclient.identity.v3.service:DeleteService" +service_list = "openstackclient.identity.v3.service:ListService" +service_show = "openstackclient.identity.v3.service:ShowService" +service_set = "openstackclient.identity.v3.service:SetService" +service_provider_create = "openstackclient.identity.v3.service_provider:CreateServiceProvider" +service_provider_delete = "openstackclient.identity.v3.service_provider:DeleteServiceProvider" +service_provider_list = "openstackclient.identity.v3.service_provider:ListServiceProvider" +service_provider_set = "openstackclient.identity.v3.service_provider:SetServiceProvider" +service_provider_show = "openstackclient.identity.v3.service_provider:ShowServiceProvider" +token_issue = "openstackclient.identity.v3.token:IssueToken" +token_revoke = "openstackclient.identity.v3.token:RevokeToken" +trust_create = "openstackclient.identity.v3.trust:CreateTrust" +trust_delete = "openstackclient.identity.v3.trust:DeleteTrust" +trust_list = "openstackclient.identity.v3.trust:ListTrust" +trust_show = "openstackclient.identity.v3.trust:ShowTrust" +user_create = "openstackclient.identity.v3.user:CreateUser" +user_delete = "openstackclient.identity.v3.user:DeleteUser" +user_list = "openstackclient.identity.v3.user:ListUser" +user_set = "openstackclient.identity.v3.user:SetUser" +user_password_set = "openstackclient.identity.v3.user:SetPasswordUser" +user_show = "openstackclient.identity.v3.user:ShowUser" + +[project.entry-points."openstack.image.v1"] +image_create = "openstackclient.image.v1.image:CreateImage" +image_delete = "openstackclient.image.v1.image:DeleteImage" +image_list = "openstackclient.image.v1.image:ListImage" +image_save = "openstackclient.image.v1.image:SaveImage" +image_set = "openstackclient.image.v1.image:SetImage" +image_show = "openstackclient.image.v1.image:ShowImage" + +[project.entry-points."openstack.image.v2"] +image_add_project = "openstackclient.image.v2.image:AddProjectToImage" +image_create = "openstackclient.image.v2.image:CreateImage" +image_delete = "openstackclient.image.v2.image:DeleteImage" +image_list = "openstackclient.image.v2.image:ListImage" +image_member_list = "openstackclient.image.v2.image:ListImageProjects" +image_remove_project = "openstackclient.image.v2.image:RemoveProjectImage" +image_member_get = "openstackclient.image.v2.image:ShowProjectImage" +image_save = "openstackclient.image.v2.image:SaveImage" +image_show = "openstackclient.image.v2.image:ShowImage" +image_set = "openstackclient.image.v2.image:SetImage" +image_unset = "openstackclient.image.v2.image:UnsetImage" +image_stage = "openstackclient.image.v2.image:StageImage" +image_task_show = "openstackclient.image.v2.task:ShowTask" +image_task_list = "openstackclient.image.v2.task:ListTask" +image_import_info = "openstackclient.image.v2.info:ImportInfo" +image_import = "openstackclient.image.v2.image:ImportImage" +image_stores_list = "openstackclient.image.v2.image:StoresInfo" +image_metadef_namespace_create = "openstackclient.image.v2.metadef_namespaces:CreateMetadefNamespace" +image_metadef_namespace_delete = "openstackclient.image.v2.metadef_namespaces:DeleteMetadefNamespace" +image_metadef_namespace_list = "openstackclient.image.v2.metadef_namespaces:ListMetadefNamespace" +image_metadef_namespace_set = "openstackclient.image.v2.metadef_namespaces:SetMetadefNamespace" +image_metadef_namespace_show = "openstackclient.image.v2.metadef_namespaces:ShowMetadefNamespace" +image_metadef_object_create = "openstackclient.image.v2.metadef_objects:CreateMetadefObjects" +image_metadef_object_show = "openstackclient.image.v2.metadef_objects:ShowMetadefObjects" +image_metadef_object_list = "openstackclient.image.v2.metadef_objects:ListMetadefObjects" +image_metadef_object_delete = "openstackclient.image.v2.metadef_objects:DeleteMetadefObject" +image_metadef_object_update = "openstackclient.image.v2.metadef_objects:SetMetadefObject" +image_metadef_object_property_show = "openstackclient.image.v2.metadef_objects:ShowMetadefObjectProperty" +image_metadef_property_create = "openstackclient.image.v2.metadef_properties:CreateMetadefProperty" +image_metadef_property_delete = "openstackclient.image.v2.metadef_properties:DeleteMetadefProperty" +image_metadef_property_list = "openstackclient.image.v2.metadef_properties:ListMetadefProperties" +image_metadef_property_set = "openstackclient.image.v2.metadef_properties:SetMetadefProperty" +image_metadef_property_show = "openstackclient.image.v2.metadef_properties:ShowMetadefProperty" +image_metadef_resource_type_list = "openstackclient.image.v2.metadef_resource_types:ListMetadefResourceTypes" +image_metadef_resource_type_association_create = "openstackclient.image.v2.metadef_resource_type_association:CreateMetadefResourceTypeAssociation" +image_metadef_resource_type_association_delete = "openstackclient.image.v2.metadef_resource_type_association:DeleteMetadefResourceTypeAssociation" +image_metadef_resource_type_association_list = "openstackclient.image.v2.metadef_resource_type_association:ListMetadefResourceTypeAssociations" +cached_image_list = "openstackclient.image.v2.cache:ListCachedImage" +cached_image_queue = "openstackclient.image.v2.cache:QueueCachedImage" +cached_image_delete = "openstackclient.image.v2.cache:DeleteCachedImage" +cached_image_clear = "openstackclient.image.v2.cache:ClearCachedImage" + +[project.entry-points."openstack.network.v2"] +address_group_create = "openstackclient.network.v2.address_group:CreateAddressGroup" +address_group_delete = "openstackclient.network.v2.address_group:DeleteAddressGroup" +address_group_list = "openstackclient.network.v2.address_group:ListAddressGroup" +address_group_set = "openstackclient.network.v2.address_group:SetAddressGroup" +address_group_show = "openstackclient.network.v2.address_group:ShowAddressGroup" +address_group_unset = "openstackclient.network.v2.address_group:UnsetAddressGroup" +address_scope_create = "openstackclient.network.v2.address_scope:CreateAddressScope" +address_scope_delete = "openstackclient.network.v2.address_scope:DeleteAddressScope" +address_scope_list = "openstackclient.network.v2.address_scope:ListAddressScope" +address_scope_set = "openstackclient.network.v2.address_scope:SetAddressScope" +address_scope_show = "openstackclient.network.v2.address_scope:ShowAddressScope" +floating_ip_create = "openstackclient.network.v2.floating_ip:CreateFloatingIP" +floating_ip_delete = "openstackclient.network.v2.floating_ip:DeleteFloatingIP" +floating_ip_list = "openstackclient.network.v2.floating_ip:ListFloatingIP" +floating_ip_set = "openstackclient.network.v2.floating_ip:SetFloatingIP" +floating_ip_show = "openstackclient.network.v2.floating_ip:ShowFloatingIP" +floating_ip_unset = "openstackclient.network.v2.floating_ip:UnsetFloatingIP" +floating_ip_pool_list = "openstackclient.network.v2.floating_ip_pool:ListFloatingIPPool" +floating_ip_port_forwarding_create = "openstackclient.network.v2.floating_ip_port_forwarding:CreateFloatingIPPortForwarding" +floating_ip_port_forwarding_delete = "openstackclient.network.v2.floating_ip_port_forwarding:DeleteFloatingIPPortForwarding" +floating_ip_port_forwarding_list = "openstackclient.network.v2.floating_ip_port_forwarding:ListFloatingIPPortForwarding" +floating_ip_port_forwarding_set = "openstackclient.network.v2.floating_ip_port_forwarding:SetFloatingIPPortForwarding" +floating_ip_port_forwarding_show = "openstackclient.network.v2.floating_ip_port_forwarding:ShowFloatingIPPortForwarding" +ip_availability_list = "openstackclient.network.v2.ip_availability:ListIPAvailability" +ip_availability_show = "openstackclient.network.v2.ip_availability:ShowIPAvailability" +local_ip_create = "openstackclient.network.v2.local_ip:CreateLocalIP" +local_ip_delete = "openstackclient.network.v2.local_ip:DeleteLocalIP" +local_ip_list = "openstackclient.network.v2.local_ip:ListLocalIP" +local_ip_set = "openstackclient.network.v2.local_ip:SetLocalIP" +local_ip_show = "openstackclient.network.v2.local_ip:ShowLocalIP" +local_ip_association_create = "openstackclient.network.v2.local_ip_association:CreateLocalIPAssociation" +local_ip_association_delete = "openstackclient.network.v2.local_ip_association:DeleteLocalIPAssociation" +local_ip_association_list = "openstackclient.network.v2.local_ip_association:ListLocalIPAssociation" +network_agent_add_network = "openstackclient.network.v2.network_agent:AddNetworkToAgent" +network_agent_add_router = "openstackclient.network.v2.network_agent:AddRouterToAgent" +network_agent_delete = "openstackclient.network.v2.network_agent:DeleteNetworkAgent" +network_agent_list = "openstackclient.network.v2.network_agent:ListNetworkAgent" +network_agent_remove_network = "openstackclient.network.v2.network_agent:RemoveNetworkFromAgent" +network_agent_remove_router = "openstackclient.network.v2.network_agent:RemoveRouterFromAgent" +network_agent_set = "openstackclient.network.v2.network_agent:SetNetworkAgent" +network_agent_show = "openstackclient.network.v2.network_agent:ShowNetworkAgent" +network_auto_allocated_topology_create = "openstackclient.network.v2.network_auto_allocated_topology:CreateAutoAllocatedTopology" +network_auto_allocated_topology_delete = "openstackclient.network.v2.network_auto_allocated_topology:DeleteAutoAllocatedTopology" +network_flavor_add_profile = "openstackclient.network.v2.network_flavor:AddNetworkFlavorToProfile" +network_flavor_create = "openstackclient.network.v2.network_flavor:CreateNetworkFlavor" +network_flavor_delete = "openstackclient.network.v2.network_flavor:DeleteNetworkFlavor" +network_flavor_list = "openstackclient.network.v2.network_flavor:ListNetworkFlavor" +network_flavor_remove_profile = "openstackclient.network.v2.network_flavor:RemoveNetworkFlavorFromProfile" +network_flavor_set = "openstackclient.network.v2.network_flavor:SetNetworkFlavor" +network_flavor_show = "openstackclient.network.v2.network_flavor:ShowNetworkFlavor" +network_flavor_profile_create = "openstackclient.network.v2.network_flavor_profile:CreateNetworkFlavorProfile" +network_flavor_profile_delete = "openstackclient.network.v2.network_flavor_profile:DeleteNetworkFlavorProfile" +network_flavor_profile_list = "openstackclient.network.v2.network_flavor_profile:ListNetworkFlavorProfile" +network_flavor_profile_set = "openstackclient.network.v2.network_flavor_profile:SetNetworkFlavorProfile" +network_flavor_profile_show = "openstackclient.network.v2.network_flavor_profile:ShowNetworkFlavorProfile" +network_create = "openstackclient.network.v2.network:CreateNetwork" +network_delete = "openstackclient.network.v2.network:DeleteNetwork" +network_list = "openstackclient.network.v2.network:ListNetwork" +network_set = "openstackclient.network.v2.network:SetNetwork" +network_show = "openstackclient.network.v2.network:ShowNetwork" +network_unset = "openstackclient.network.v2.network:UnsetNetwork" +network_l3_conntrack_helper_create = "openstackclient.network.v2.l3_conntrack_helper:CreateConntrackHelper" +network_l3_conntrack_helper_delete = "openstackclient.network.v2.l3_conntrack_helper:DeleteConntrackHelper" +network_l3_conntrack_helper_list = "openstackclient.network.v2.l3_conntrack_helper:ListConntrackHelper" +network_l3_conntrack_helper_set = "openstackclient.network.v2.l3_conntrack_helper:SetConntrackHelper" +network_l3_conntrack_helper_show = "openstackclient.network.v2.l3_conntrack_helper:ShowConntrackHelper" +network_meter_create = "openstackclient.network.v2.network_meter:CreateMeter" +network_meter_delete = "openstackclient.network.v2.network_meter:DeleteMeter" +network_meter_list = "openstackclient.network.v2.network_meter:ListMeter" +network_meter_show = "openstackclient.network.v2.network_meter:ShowMeter" +network_meter_rule_create = "openstackclient.network.v2.network_meter_rule:CreateMeterRule" +network_meter_rule_delete = "openstackclient.network.v2.network_meter_rule:DeleteMeterRule" +network_meter_rule_list = "openstackclient.network.v2.network_meter_rule:ListMeterRule" +network_meter_rule_show = "openstackclient.network.v2.network_meter_rule:ShowMeterRule" +network_qos_policy_create = "openstackclient.network.v2.network_qos_policy:CreateNetworkQosPolicy" +network_qos_policy_delete = "openstackclient.network.v2.network_qos_policy:DeleteNetworkQosPolicy" +network_qos_policy_list = "openstackclient.network.v2.network_qos_policy:ListNetworkQosPolicy" +network_qos_policy_set = "openstackclient.network.v2.network_qos_policy:SetNetworkQosPolicy" +network_qos_policy_show = "openstackclient.network.v2.network_qos_policy:ShowNetworkQosPolicy" +network_qos_rule_create = "openstackclient.network.v2.network_qos_rule:CreateNetworkQosRule" +network_qos_rule_delete = "openstackclient.network.v2.network_qos_rule:DeleteNetworkQosRule" +network_qos_rule_list = "openstackclient.network.v2.network_qos_rule:ListNetworkQosRule" +network_qos_rule_set = "openstackclient.network.v2.network_qos_rule:SetNetworkQosRule" +network_qos_rule_show = "openstackclient.network.v2.network_qos_rule:ShowNetworkQosRule" +network_qos_rule_type_list = "openstackclient.network.v2.network_qos_rule_type:ListNetworkQosRuleType" +network_qos_rule_type_show = "openstackclient.network.v2.network_qos_rule_type:ShowNetworkQosRuleType" +network_rbac_create = "openstackclient.network.v2.network_rbac:CreateNetworkRBAC" +network_rbac_delete = "openstackclient.network.v2.network_rbac:DeleteNetworkRBAC" +network_rbac_list = "openstackclient.network.v2.network_rbac:ListNetworkRBAC" +network_rbac_set = "openstackclient.network.v2.network_rbac:SetNetworkRBAC" +network_rbac_show = "openstackclient.network.v2.network_rbac:ShowNetworkRBAC" +network_segment_create = "openstackclient.network.v2.network_segment:CreateNetworkSegment" +network_segment_delete = "openstackclient.network.v2.network_segment:DeleteNetworkSegment" +network_segment_list = "openstackclient.network.v2.network_segment:ListNetworkSegment" +network_segment_set = "openstackclient.network.v2.network_segment:SetNetworkSegment" +network_segment_show = "openstackclient.network.v2.network_segment:ShowNetworkSegment" +network_segment_range_create = "openstackclient.network.v2.network_segment_range:CreateNetworkSegmentRange" +network_segment_range_delete = "openstackclient.network.v2.network_segment_range:DeleteNetworkSegmentRange" +network_segment_range_list = "openstackclient.network.v2.network_segment_range:ListNetworkSegmentRange" +network_segment_range_set = "openstackclient.network.v2.network_segment_range:SetNetworkSegmentRange" +network_segment_range_show = "openstackclient.network.v2.network_segment_range:ShowNetworkSegmentRange" +network_service_provider_list = "openstackclient.network.v2.network_service_provider:ListNetworkServiceProvider" +network_subport_list = "openstackclient.network.v2.network_trunk:ListNetworkSubport" +network_trunk_create = "openstackclient.network.v2.network_trunk:CreateNetworkTrunk" +network_trunk_delete = "openstackclient.network.v2.network_trunk:DeleteNetworkTrunk" +network_trunk_list = "openstackclient.network.v2.network_trunk:ListNetworkTrunk" +network_trunk_set = "openstackclient.network.v2.network_trunk:SetNetworkTrunk" +network_trunk_show = "openstackclient.network.v2.network_trunk:ShowNetworkTrunk" +network_trunk_unset = "openstackclient.network.v2.network_trunk:UnsetNetworkTrunk" +port_create = "openstackclient.network.v2.port:CreatePort" +port_delete = "openstackclient.network.v2.port:DeletePort" +port_list = "openstackclient.network.v2.port:ListPort" +port_set = "openstackclient.network.v2.port:SetPort" +port_show = "openstackclient.network.v2.port:ShowPort" +port_unset = "openstackclient.network.v2.port:UnsetPort" +router_add_gateway = "openstackclient.network.v2.router:AddGatewayToRouter" +router_add_port = "openstackclient.network.v2.router:AddPortToRouter" +router_add_route = "openstackclient.network.v2.router:AddExtraRoutesToRouter" +router_add_subnet = "openstackclient.network.v2.router:AddSubnetToRouter" +router_create = "openstackclient.network.v2.router:CreateRouter" +router_delete = "openstackclient.network.v2.router:DeleteRouter" +router_list = "openstackclient.network.v2.router:ListRouter" +router_remove_gateway = "openstackclient.network.v2.router:RemoveGatewayFromRouter" +router_remove_port = "openstackclient.network.v2.router:RemovePortFromRouter" +router_remove_route = "openstackclient.network.v2.router:RemoveExtraRoutesFromRouter" +router_remove_subnet = "openstackclient.network.v2.router:RemoveSubnetFromRouter" +router_set = "openstackclient.network.v2.router:SetRouter" +router_show = "openstackclient.network.v2.router:ShowRouter" +router_unset = "openstackclient.network.v2.router:UnsetRouter" +router_ndp_proxy_create = "openstackclient.network.v2.ndp_proxy:CreateNDPProxy" +router_ndp_proxy_delete = "openstackclient.network.v2.ndp_proxy:DeleteNDPProxy" +router_ndp_proxy_list = "openstackclient.network.v2.ndp_proxy:ListNDPProxy" +router_ndp_proxy_set = "openstackclient.network.v2.ndp_proxy:SetNDPProxy" +router_ndp_proxy_show = "openstackclient.network.v2.ndp_proxy:ShowNDPProxy" +security_group_create = "openstackclient.network.v2.security_group:CreateSecurityGroup" +security_group_delete = "openstackclient.network.v2.security_group:DeleteSecurityGroup" +security_group_list = "openstackclient.network.v2.security_group:ListSecurityGroup" +security_group_set = "openstackclient.network.v2.security_group:SetSecurityGroup" +security_group_show = "openstackclient.network.v2.security_group:ShowSecurityGroup" +security_group_unset = "openstackclient.network.v2.security_group:UnsetSecurityGroup" +security_group_rule_create = "openstackclient.network.v2.security_group_rule:CreateSecurityGroupRule" +security_group_rule_delete = "openstackclient.network.v2.security_group_rule:DeleteSecurityGroupRule" +security_group_rule_list = "openstackclient.network.v2.security_group_rule:ListSecurityGroupRule" +security_group_rule_show = "openstackclient.network.v2.security_group_rule:ShowSecurityGroupRule" +default_security_group_rule_create = "openstackclient.network.v2.default_security_group_rule:CreateDefaultSecurityGroupRule" +default_security_group_rule_delete = "openstackclient.network.v2.default_security_group_rule:DeleteDefaultSecurityGroupRule" +default_security_group_rule_list = "openstackclient.network.v2.default_security_group_rule:ListDefaultSecurityGroupRule" +default_security_group_rule_show = "openstackclient.network.v2.default_security_group_rule:ShowDefaultSecurityGroupRule" +subnet_create = "openstackclient.network.v2.subnet:CreateSubnet" +subnet_delete = "openstackclient.network.v2.subnet:DeleteSubnet" +subnet_list = "openstackclient.network.v2.subnet:ListSubnet" +subnet_set = "openstackclient.network.v2.subnet:SetSubnet" +subnet_show = "openstackclient.network.v2.subnet:ShowSubnet" +subnet_unset = "openstackclient.network.v2.subnet:UnsetSubnet" +subnet_pool_create = "openstackclient.network.v2.subnet_pool:CreateSubnetPool" +subnet_pool_delete = "openstackclient.network.v2.subnet_pool:DeleteSubnetPool" +subnet_pool_list = "openstackclient.network.v2.subnet_pool:ListSubnetPool" +subnet_pool_set = "openstackclient.network.v2.subnet_pool:SetSubnetPool" +subnet_pool_show = "openstackclient.network.v2.subnet_pool:ShowSubnetPool" +subnet_pool_unset = "openstackclient.network.v2.subnet_pool:UnsetSubnetPool" + +# Tap-as-a-Service +tap_flow_create = "openstackclient.network.v2.taas.tap_flow:CreateTapFlow" +tap_flow_delete = "openstackclient.network.v2.taas.tap_flow:DeleteTapFlow" +tap_flow_list = "openstackclient.network.v2.taas.tap_flow:ListTapFlow" +tap_flow_show = "openstackclient.network.v2.taas.tap_flow:ShowTapFlow" +tap_flow_update = "openstackclient.network.v2.taas.tap_flow:UpdateTapFlow" +tap_mirror_create = "openstackclient.network.v2.taas.tap_mirror:CreateTapMirror" +tap_mirror_delete = "openstackclient.network.v2.taas.tap_mirror:DeleteTapMirror" +tap_mirror_list = "openstackclient.network.v2.taas.tap_mirror:ListTapMirror" +tap_mirror_show = "openstackclient.network.v2.taas.tap_mirror:ShowTapMirror" +tap_mirror_update = "openstackclient.network.v2.taas.tap_mirror:UpdateTapMirror" +tap_service_create = "openstackclient.network.v2.taas.tap_service:CreateTapService" +tap_service_delete = "openstackclient.network.v2.taas.tap_service:DeleteTapService" +tap_service_list = "openstackclient.network.v2.taas.tap_service:ListTapService" +tap_service_show = "openstackclient.network.v2.taas.tap_service:ShowTapService" +tap_service_update = "openstackclient.network.v2.taas.tap_service:UpdateTapService" + +[project.entry-points."openstack.object_store.v1"] +object_store_account_set = "openstackclient.object.v1.account:SetAccount" +object_store_account_show = "openstackclient.object.v1.account:ShowAccount" +object_store_account_unset = "openstackclient.object.v1.account:UnsetAccount" +container_create = "openstackclient.object.v1.container:CreateContainer" +container_delete = "openstackclient.object.v1.container:DeleteContainer" +container_list = "openstackclient.object.v1.container:ListContainer" +container_save = "openstackclient.object.v1.container:SaveContainer" +container_set = "openstackclient.object.v1.container:SetContainer" +container_show = "openstackclient.object.v1.container:ShowContainer" +container_unset = "openstackclient.object.v1.container:UnsetContainer" +object_create = "openstackclient.object.v1.object:CreateObject" +object_delete = "openstackclient.object.v1.object:DeleteObject" +object_list = "openstackclient.object.v1.object:ListObject" +object_save = "openstackclient.object.v1.object:SaveObject" +object_set = "openstackclient.object.v1.object:SetObject" +object_show = "openstackclient.object.v1.object:ShowObject" +object_unset = "openstackclient.object.v1.object:UnsetObject" + +[project.entry-points."openstack.volume.v2"] +consistency_group_add_volume = "openstackclient.volume.v2.consistency_group:AddVolumeToConsistencyGroup" +consistency_group_create = "openstackclient.volume.v2.consistency_group:CreateConsistencyGroup" +consistency_group_delete = "openstackclient.volume.v2.consistency_group:DeleteConsistencyGroup" +consistency_group_list = "openstackclient.volume.v2.consistency_group:ListConsistencyGroup" +consistency_group_remove_volume = "openstackclient.volume.v2.consistency_group:RemoveVolumeFromConsistencyGroup" +consistency_group_set = "openstackclient.volume.v2.consistency_group:SetConsistencyGroup" +consistency_group_show = "openstackclient.volume.v2.consistency_group:ShowConsistencyGroup" +consistency_group_snapshot_create = "openstackclient.volume.v2.consistency_group_snapshot:CreateConsistencyGroupSnapshot" +consistency_group_snapshot_delete = "openstackclient.volume.v2.consistency_group_snapshot:DeleteConsistencyGroupSnapshot" +consistency_group_snapshot_list = "openstackclient.volume.v2.consistency_group_snapshot:ListConsistencyGroupSnapshot" +consistency_group_snapshot_show = "openstackclient.volume.v2.consistency_group_snapshot:ShowConsistencyGroupSnapshot" +volume_create = "openstackclient.volume.v2.volume:CreateVolume" +volume_delete = "openstackclient.volume.v2.volume:DeleteVolume" +volume_list = "openstackclient.volume.v2.volume:ListVolume" +volume_migrate = "openstackclient.volume.v2.volume:MigrateVolume" +volume_set = "openstackclient.volume.v2.volume:SetVolume" +volume_show = "openstackclient.volume.v2.volume:ShowVolume" +volume_unset = "openstackclient.volume.v2.volume:UnsetVolume" +volume_backup_create = "openstackclient.volume.v2.volume_backup:CreateVolumeBackup" +volume_backup_delete = "openstackclient.volume.v2.volume_backup:DeleteVolumeBackup" +volume_backup_list = "openstackclient.volume.v2.volume_backup:ListVolumeBackup" +volume_backup_restore = "openstackclient.volume.v2.volume_backup:RestoreVolumeBackup" +volume_backup_set = "openstackclient.volume.v2.volume_backup:SetVolumeBackup" +volume_backup_show = "openstackclient.volume.v2.volume_backup:ShowVolumeBackup" +volume_backup_record_export = "openstackclient.volume.v2.backup_record:ExportBackupRecord" +volume_backup_record_import = "openstackclient.volume.v2.backup_record:ImportBackupRecord" +volume_backend_capability_show = "openstackclient.volume.v2.volume_backend:ShowCapability" +volume_backend_pool_list = "openstackclient.volume.v2.volume_backend:ListPool" +volume_host_failover = "openstackclient.volume.v2.volume_host:FailoverVolumeHost" +volume_host_set = "openstackclient.volume.v2.volume_host:SetVolumeHost" +volume_snapshot_create = "openstackclient.volume.v2.volume_snapshot:CreateVolumeSnapshot" +volume_snapshot_delete = "openstackclient.volume.v2.volume_snapshot:DeleteVolumeSnapshot" +volume_snapshot_list = "openstackclient.volume.v2.volume_snapshot:ListVolumeSnapshot" +volume_snapshot_set = "openstackclient.volume.v2.volume_snapshot:SetVolumeSnapshot" +volume_snapshot_show = "openstackclient.volume.v2.volume_snapshot:ShowVolumeSnapshot" +volume_snapshot_unset = "openstackclient.volume.v2.volume_snapshot:UnsetVolumeSnapshot" +volume_type_create = "openstackclient.volume.v2.volume_type:CreateVolumeType" +volume_type_delete = "openstackclient.volume.v2.volume_type:DeleteVolumeType" +volume_type_list = "openstackclient.volume.v2.volume_type:ListVolumeType" +volume_type_set = "openstackclient.volume.v2.volume_type:SetVolumeType" +volume_type_show = "openstackclient.volume.v2.volume_type:ShowVolumeType" +volume_type_unset = "openstackclient.volume.v2.volume_type:UnsetVolumeType" +volume_qos_associate = "openstackclient.volume.v2.qos_specs:AssociateQos" +volume_qos_create = "openstackclient.volume.v2.qos_specs:CreateQos" +volume_qos_delete = "openstackclient.volume.v2.qos_specs:DeleteQos" +volume_qos_disassociate = "openstackclient.volume.v2.qos_specs:DisassociateQos" +volume_qos_list = "openstackclient.volume.v2.qos_specs:ListQos" +volume_qos_set = "openstackclient.volume.v2.qos_specs:SetQos" +volume_qos_show = "openstackclient.volume.v2.qos_specs:ShowQos" +volume_qos_unset = "openstackclient.volume.v2.qos_specs:UnsetQos" +volume_service_list = "openstackclient.volume.v2.service:ListService" +volume_service_set = "openstackclient.volume.v2.service:SetService" +volume_transfer_request_accept = "openstackclient.volume.v2.volume_transfer_request:AcceptTransferRequest" +volume_transfer_request_create = "openstackclient.volume.v2.volume_transfer_request:CreateTransferRequest" +volume_transfer_request_delete = "openstackclient.volume.v2.volume_transfer_request:DeleteTransferRequest" +volume_transfer_request_list = "openstackclient.volume.v2.volume_transfer_request:ListTransferRequest" +volume_transfer_request_show = "openstackclient.volume.v2.volume_transfer_request:ShowTransferRequest" + +[project.entry-points."openstack.volume.v3"] +block_storage_log_level_list = "openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelList" +block_storage_log_level_set = "openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelSet" +block_storage_cleanup = "openstackclient.volume.v3.block_storage_cleanup:BlockStorageCleanup" +block_storage_volume_manageable_list = "openstackclient.volume.v3.block_storage_manage:BlockStorageManageVolumes" +block_storage_snapshot_manageable_list = "openstackclient.volume.v3.block_storage_manage:BlockStorageManageSnapshots" +consistency_group_add_volume = "openstackclient.volume.v2.consistency_group:AddVolumeToConsistencyGroup" +consistency_group_create = "openstackclient.volume.v2.consistency_group:CreateConsistencyGroup" +consistency_group_delete = "openstackclient.volume.v2.consistency_group:DeleteConsistencyGroup" +consistency_group_list = "openstackclient.volume.v2.consistency_group:ListConsistencyGroup" +consistency_group_remove_volume = "openstackclient.volume.v2.consistency_group:RemoveVolumeFromConsistencyGroup" +consistency_group_set = "openstackclient.volume.v2.consistency_group:SetConsistencyGroup" +consistency_group_show = "openstackclient.volume.v2.consistency_group:ShowConsistencyGroup" +consistency_group_snapshot_create = "openstackclient.volume.v2.consistency_group_snapshot:CreateConsistencyGroupSnapshot" +consistency_group_snapshot_delete = "openstackclient.volume.v2.consistency_group_snapshot:DeleteConsistencyGroupSnapshot" +consistency_group_snapshot_list = "openstackclient.volume.v2.consistency_group_snapshot:ListConsistencyGroupSnapshot" +consistency_group_snapshot_show = "openstackclient.volume.v2.consistency_group_snapshot:ShowConsistencyGroupSnapshot" +volume_create = "openstackclient.volume.v3.volume:CreateVolume" +volume_delete = "openstackclient.volume.v3.volume:DeleteVolume" +volume_list = "openstackclient.volume.v3.volume:ListVolume" +volume_migrate = "openstackclient.volume.v3.volume:MigrateVolume" +volume_set = "openstackclient.volume.v3.volume:SetVolume" +volume_show = "openstackclient.volume.v3.volume:ShowVolume" +volume_unset = "openstackclient.volume.v3.volume:UnsetVolume" +volume_attachment_create = "openstackclient.volume.v3.volume_attachment:CreateVolumeAttachment" +volume_attachment_delete = "openstackclient.volume.v3.volume_attachment:DeleteVolumeAttachment" +volume_attachment_list = "openstackclient.volume.v3.volume_attachment:ListVolumeAttachment" +volume_attachment_complete = "openstackclient.volume.v3.volume_attachment:CompleteVolumeAttachment" +volume_attachment_set = "openstackclient.volume.v3.volume_attachment:SetVolumeAttachment" +volume_attachment_show = "openstackclient.volume.v3.volume_attachment:ShowVolumeAttachment" +volume_backup_create = "openstackclient.volume.v3.volume_backup:CreateVolumeBackup" +volume_backup_delete = "openstackclient.volume.v3.volume_backup:DeleteVolumeBackup" +volume_backup_list = "openstackclient.volume.v3.volume_backup:ListVolumeBackup" +volume_backup_restore = "openstackclient.volume.v3.volume_backup:RestoreVolumeBackup" +volume_backup_set = "openstackclient.volume.v3.volume_backup:SetVolumeBackup" +volume_backup_unset = "openstackclient.volume.v3.volume_backup:UnsetVolumeBackup" +volume_backup_show = "openstackclient.volume.v3.volume_backup:ShowVolumeBackup" +volume_backend_capability_show = "openstackclient.volume.v2.volume_backend:ShowCapability" +volume_backend_pool_list = "openstackclient.volume.v2.volume_backend:ListPool" +volume_backup_record_export = "openstackclient.volume.v2.backup_record:ExportBackupRecord" +volume_backup_record_import = "openstackclient.volume.v2.backup_record:ImportBackupRecord" +volume_group_create = "openstackclient.volume.v3.volume_group:CreateVolumeGroup" +volume_group_delete = "openstackclient.volume.v3.volume_group:DeleteVolumeGroup" +volume_group_list = "openstackclient.volume.v3.volume_group:ListVolumeGroup" +volume_group_failover = "openstackclient.volume.v3.volume_group:FailoverVolumeGroup" +volume_group_set = "openstackclient.volume.v3.volume_group:SetVolumeGroup" +volume_group_show = "openstackclient.volume.v3.volume_group:ShowVolumeGroup" +volume_group_snapshot_create = "openstackclient.volume.v3.volume_group_snapshot:CreateVolumeGroupSnapshot" +volume_group_snapshot_delete = "openstackclient.volume.v3.volume_group_snapshot:DeleteVolumeGroupSnapshot" +volume_group_snapshot_list = "openstackclient.volume.v3.volume_group_snapshot:ListVolumeGroupSnapshot" +volume_group_snapshot_show = "openstackclient.volume.v3.volume_group_snapshot:ShowVolumeGroupSnapshot" +volume_group_type_create = "openstackclient.volume.v3.volume_group_type:CreateVolumeGroupType" +volume_group_type_delete = "openstackclient.volume.v3.volume_group_type:DeleteVolumeGroupType" +volume_group_type_list = "openstackclient.volume.v3.volume_group_type:ListVolumeGroupType" +volume_group_type_set = "openstackclient.volume.v3.volume_group_type:SetVolumeGroupType" +volume_group_type_show = "openstackclient.volume.v3.volume_group_type:ShowVolumeGroupType" +volume_host_set = "openstackclient.volume.v2.volume_host:SetVolumeHost" +volume_message_delete = "openstackclient.volume.v3.volume_message:DeleteMessage" +volume_message_list = "openstackclient.volume.v3.volume_message:ListMessages" +volume_message_show = "openstackclient.volume.v3.volume_message:ShowMessage" +block_storage_cluster_list = "openstackclient.volume.v3.block_storage_cluster:ListBlockStorageCluster" +block_storage_cluster_set = "openstackclient.volume.v3.block_storage_cluster:SetBlockStorageCluster" +block_storage_cluster_show = "openstackclient.volume.v3.block_storage_cluster:ShowBlockStorageCluster" +block_storage_resource_filter_list = "openstackclient.volume.v3.block_storage_resource_filter:ListBlockStorageResourceFilter" +block_storage_resource_filter_show = "openstackclient.volume.v3.block_storage_resource_filter:ShowBlockStorageResourceFilter" +volume_snapshot_create = "openstackclient.volume.v3.volume_snapshot:CreateVolumeSnapshot" +volume_snapshot_delete = "openstackclient.volume.v3.volume_snapshot:DeleteVolumeSnapshot" +volume_snapshot_list = "openstackclient.volume.v3.volume_snapshot:ListVolumeSnapshot" +volume_snapshot_set = "openstackclient.volume.v3.volume_snapshot:SetVolumeSnapshot" +volume_snapshot_show = "openstackclient.volume.v3.volume_snapshot:ShowVolumeSnapshot" +volume_snapshot_unset = "openstackclient.volume.v3.volume_snapshot:UnsetVolumeSnapshot" +volume_type_create = "openstackclient.volume.v3.volume_type:CreateVolumeType" +volume_type_delete = "openstackclient.volume.v3.volume_type:DeleteVolumeType" +volume_type_list = "openstackclient.volume.v3.volume_type:ListVolumeType" +volume_type_set = "openstackclient.volume.v3.volume_type:SetVolumeType" +volume_type_show = "openstackclient.volume.v3.volume_type:ShowVolumeType" +volume_type_unset = "openstackclient.volume.v3.volume_type:UnsetVolumeType" +volume_qos_associate = "openstackclient.volume.v2.qos_specs:AssociateQos" +volume_qos_create = "openstackclient.volume.v2.qos_specs:CreateQos" +volume_qos_delete = "openstackclient.volume.v2.qos_specs:DeleteQos" +volume_qos_disassociate = "openstackclient.volume.v2.qos_specs:DisassociateQos" +volume_qos_list = "openstackclient.volume.v2.qos_specs:ListQos" +volume_qos_set = "openstackclient.volume.v2.qos_specs:SetQos" +volume_qos_show = "openstackclient.volume.v2.qos_specs:ShowQos" +volume_qos_unset = "openstackclient.volume.v2.qos_specs:UnsetQos" +volume_service_list = "openstackclient.volume.v3.service:ListService" +volume_service_set = "openstackclient.volume.v3.service:SetService" +volume_transfer_request_accept = "openstackclient.volume.v3.volume_transfer_request:AcceptTransferRequest" +volume_transfer_request_create = "openstackclient.volume.v3.volume_transfer_request:CreateTransferRequest" +volume_transfer_request_delete = "openstackclient.volume.v3.volume_transfer_request:DeleteTransferRequest" +volume_transfer_request_list = "openstackclient.volume.v3.volume_transfer_request:ListTransferRequest" +volume_transfer_request_show = "openstackclient.volume.v3.volume_transfer_request:ShowTransferRequest" +volume_summary = "openstackclient.volume.v3.volume:VolumeSummary" +volume_revert = "openstackclient.volume.v3.volume:VolumeRevertToSnapshot" + +[tool.setuptools] +packages = [ + "openstackclient" +] + +[tool.mypy] +python_version = "3.10" +show_column_numbers = true +show_error_context = true +ignore_missing_imports = true +follow_imports = "normal" +incremental = true +check_untyped_defs = true +warn_unused_ignores = true +# keep this in-sync with 'mypy.exclude' in '.pre-commit-config.yaml' +exclude = ''' +(?x)( + doc + | examples + | hacking + | releasenotes + ) +''' + +[[tool.mypy.overrides]] +module = ["openstackclient.tests.unit.*"] +ignore_errors = true + +[tool.ruff] +line-length = 79 + +[tool.ruff.format] +quote-style = "preserve" +docstring-code-format = true + +[tool.ruff.lint] +select = ["E4", "E5", "E7", "E9", "F", "S", "UP"] + +[tool.ruff.lint.per-file-ignores] +"openstackclient/tests/*" = ["E501", "S"] diff --git a/releasenotes/notes/Add-default-security-group-rule-CRUD-2916568f829ea38c.yaml b/releasenotes/notes/Add-default-security-group-rule-CRUD-2916568f829ea38c.yaml new file mode 100644 index 0000000000..955d501550 --- /dev/null +++ b/releasenotes/notes/Add-default-security-group-rule-CRUD-2916568f829ea38c.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Add ``default security group rule create``, ``default security group rule + delete``, ``default security group rule list`` and ``default security group + rule show`` commands to support Neutron Default Security Group Rule CRUD + operations. + [Bug `1983053 `_] diff --git a/releasenotes/notes/Add-trusted-vif-to-the-port-0a0c76d9da8f3da0.yaml b/releasenotes/notes/Add-trusted-vif-to-the-port-0a0c76d9da8f3da0.yaml new file mode 100644 index 0000000000..ffa9669540 --- /dev/null +++ b/releasenotes/notes/Add-trusted-vif-to-the-port-0a0c76d9da8f3da0.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add ``trusted`` attribute to the ``port create`` and ``port set`` commands. + It can be set to ``true`` with ``--trusted`` and to ``false`` with + ``--not-trusted`` CLI arguments passed to the ``port create`` and ``port + set`` commands`` diff --git a/releasenotes/notes/Router-flavor-accepts-name-or-id-e9cecafcddf81cb2.yaml b/releasenotes/notes/Router-flavor-accepts-name-or-id-e9cecafcddf81cb2.yaml new file mode 100644 index 0000000000..be5559d061 --- /dev/null +++ b/releasenotes/notes/Router-flavor-accepts-name-or-id-e9cecafcddf81cb2.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + The ``router create --flavor-id`` parameter has been deprecated + in favour of the ``--flavor`` parameter, which accepts both + flavor names and flavor IDs. diff --git a/releasenotes/notes/add-cache-commands-a6f046348a3a0b1f.yaml b/releasenotes/notes/add-cache-commands-a6f046348a3a0b1f.yaml new file mode 100644 index 0000000000..81243cfa99 --- /dev/null +++ b/releasenotes/notes/add-cache-commands-a6f046348a3a0b1f.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add commands for the image Cache API, to list, queue, + delete and clear images in the cache. diff --git a/releasenotes/notes/add-chunk-size-to-image-save-37871f9e62693264.yaml b/releasenotes/notes/add-chunk-size-to-image-save-37871f9e62693264.yaml new file mode 100644 index 0000000000..433dc87733 --- /dev/null +++ b/releasenotes/notes/add-chunk-size-to-image-save-37871f9e62693264.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``--chunk-size`` option to ``image save`` command to control the size + of bytes to read at one time. diff --git a/releasenotes/notes/add-cluster-to-service-list-5eab3e828de7547e.yaml b/releasenotes/notes/add-cluster-to-service-list-5eab3e828de7547e.yaml new file mode 100644 index 0000000000..14418107a8 --- /dev/null +++ b/releasenotes/notes/add-cluster-to-service-list-5eab3e828de7547e.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added the ``Cluster`` and ``Backend State`` columns to + ``openstack volume service list`` command. Note that the + ``Cluster`` parameter is available since microversion 3.7 + and ``Backend State`` parameter is available since + microversion 3.49. diff --git a/releasenotes/notes/add-flavor-id-to-router-create-76e916e129b5b80c.yaml b/releasenotes/notes/add-flavor-id-to-router-create-76e916e129b5b80c.yaml new file mode 100644 index 0000000000..56ccf6d511 --- /dev/null +++ b/releasenotes/notes/add-flavor-id-to-router-create-76e916e129b5b80c.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add the ``--flavor-id`` option to the ``router create`` command. diff --git a/releasenotes/notes/add-image-member-get-25e913ef2b861bf3.yaml b/releasenotes/notes/add-image-member-get-25e913ef2b861bf3.yaml new file mode 100644 index 0000000000..9293f3c6ec --- /dev/null +++ b/releasenotes/notes/add-image-member-get-25e913ef2b861bf3.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add ``image member get`` command which accepts an + image_id and member_id and displays the detail of + the particular meber associated to the image. diff --git a/releasenotes/notes/add-image-metadef-namespace-object-delete-b6b2de24fc66e602.yaml b/releasenotes/notes/add-image-metadef-namespace-object-delete-b6b2de24fc66e602.yaml new file mode 100644 index 0000000000..7861a0c7a0 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-namespace-object-delete-b6b2de24fc66e602.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds operation which deletes all metadef object inside a namespace. diff --git a/releasenotes/notes/add-image-metadef-object-property-show-4ab2c957451ea230.yaml b/releasenotes/notes/add-image-metadef-object-property-show-4ab2c957451ea230.yaml new file mode 100644 index 0000000000..32b259c129 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-object-property-show-4ab2c957451ea230.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef object property show`` command which + shows a particular property inside metadef object. diff --git a/releasenotes/notes/add-image-metadef-object-update-f4880e423bf4faba.yaml b/releasenotes/notes/add-image-metadef-object-update-f4880e423bf4faba.yaml new file mode 100644 index 0000000000..6cb8efe052 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-object-update-f4880e423bf4faba.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef object update`` command which + updates the attributes of an object. diff --git a/releasenotes/notes/add-image-metadef-property-delete-1e1bb8410130d901.yaml b/releasenotes/notes/add-image-metadef-property-delete-1e1bb8410130d901.yaml new file mode 100644 index 0000000000..09aff5905c --- /dev/null +++ b/releasenotes/notes/add-image-metadef-property-delete-1e1bb8410130d901.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``image property delete`` command will now delete all properties in + the provided namespace if no property is provided. diff --git a/releasenotes/notes/add-image-metadef-resource-type-association-commands-4d373d7d8eca5d55.yaml b/releasenotes/notes/add-image-metadef-resource-type-association-commands-4d373d7d8eca5d55.yaml new file mode 100644 index 0000000000..eb03566fab --- /dev/null +++ b/releasenotes/notes/add-image-metadef-resource-type-association-commands-4d373d7d8eca5d55.yaml @@ -0,0 +1,17 @@ +--- +features: + - | + Added ``image metadef resource type association list`` + to list resource type associations for the image service. + This is equivalent to the + ``md-namespace-resource-type-list`` command in glance. + - | + Added ``image metadef resource type association create`` + to create a resource type association for the image service. + This is equivalent to the + ``md-resource-type-associate`` command in glance. + - | + Added ``image metadef resource type association delete`` + to delete a resource type association for the image service. + This is equivalent to the + ``md-resource-type-deassociate`` command in glance. diff --git a/releasenotes/notes/add-image-metadef-resource-type-list-command-020adcaa2ad14e07.yaml b/releasenotes/notes/add-image-metadef-resource-type-list-command-020adcaa2ad14e07.yaml new file mode 100644 index 0000000000..01857b1c70 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-resource-type-list-command-020adcaa2ad14e07.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added ``image metadef resource type list`` command. This is equivalent to + the ``+md-namespace-resource-type-list`` command in glanceclient. diff --git a/releasenotes/notes/add-image-options-dcbc4ead7822c495.yaml b/releasenotes/notes/add-image-options-dcbc4ead7822c495.yaml new file mode 100644 index 0000000000..7a528d7c9d --- /dev/null +++ b/releasenotes/notes/add-image-options-dcbc4ead7822c495.yaml @@ -0,0 +1,4 @@ +--- +features: + - The ``os_hash_algo`` and ``os_hash_value image`` attributes are now shown + in the ``image list --long`` output. diff --git a/releasenotes/notes/add-import-info-stores-delete-c50b5222c21e1077.yaml b/releasenotes/notes/add-import-info-stores-delete-c50b5222c21e1077.yaml new file mode 100644 index 0000000000..f6038c3c5e --- /dev/null +++ b/releasenotes/notes/add-import-info-stores-delete-c50b5222c21e1077.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add ``image import info`` command, allowing users to know available import + methods, and `--store` option to ``image delete``, allowing users to delete + image from particular store. diff --git a/releasenotes/notes/add-metadef-object-create-3939ee1453585484.yaml b/releasenotes/notes/add-metadef-object-create-3939ee1453585484.yaml new file mode 100644 index 0000000000..250a1452a8 --- /dev/null +++ b/releasenotes/notes/add-metadef-object-create-3939ee1453585484.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef object show`` command to create the + metadata definitions objects inside a specific namespace diff --git a/releasenotes/notes/add-metadef-object-list-c8831e73c696b9d9.yaml b/releasenotes/notes/add-metadef-object-list-c8831e73c696b9d9.yaml new file mode 100644 index 0000000000..bfa89036a5 --- /dev/null +++ b/releasenotes/notes/add-metadef-object-list-c8831e73c696b9d9.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef object list`` command to list the + metadata definitions objects inside a specific namespace diff --git a/releasenotes/notes/add-metadef-object-show-1b05dd33ecf42210.yaml b/releasenotes/notes/add-metadef-object-show-1b05dd33ecf42210.yaml new file mode 100644 index 0000000000..d3df6f2aaa --- /dev/null +++ b/releasenotes/notes/add-metadef-object-show-1b05dd33ecf42210.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef object show`` command to show the + metadata definitions objects inside a specific namespace diff --git a/releasenotes/notes/add-metadef-property-create-c9a4ec2bced892af.yaml b/releasenotes/notes/add-metadef-property-create-c9a4ec2bced892af.yaml new file mode 100644 index 0000000000..11862e1671 --- /dev/null +++ b/releasenotes/notes/add-metadef-property-create-c9a4ec2bced892af.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef property create`` command to create a new + metadef property inside a specific namespace. diff --git a/releasenotes/notes/add-metadef-property-delete-ebb999d92a588ad4.yaml b/releasenotes/notes/add-metadef-property-delete-ebb999d92a588ad4.yaml new file mode 100644 index 0000000000..69e43dd00a --- /dev/null +++ b/releasenotes/notes/add-metadef-property-delete-ebb999d92a588ad4.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef property delete`` command to delete a + metadef property inside a specific namespace. diff --git a/releasenotes/notes/add-metadef-property-list-fe89ae8ff9780002.yaml b/releasenotes/notes/add-metadef-property-list-fe89ae8ff9780002.yaml new file mode 100644 index 0000000000..643c2c5e32 --- /dev/null +++ b/releasenotes/notes/add-metadef-property-list-fe89ae8ff9780002.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef property list`` command to list the + metadata definitions properties inside a specific namespace. diff --git a/releasenotes/notes/add-metadef-property-set-ab9cdcb73adf6397.yaml b/releasenotes/notes/add-metadef-property-set-ab9cdcb73adf6397.yaml new file mode 100644 index 0000000000..cf07d527de --- /dev/null +++ b/releasenotes/notes/add-metadef-property-set-ab9cdcb73adf6397.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef property set`` command to update a + metadef property inside a specific namespace. diff --git a/releasenotes/notes/add-metadef-property-show-8bf2ec421f74cb2d.yaml b/releasenotes/notes/add-metadef-property-show-8bf2ec421f74cb2d.yaml new file mode 100644 index 0000000000..9411a533c2 --- /dev/null +++ b/releasenotes/notes/add-metadef-property-show-8bf2ec421f74cb2d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``image metadef property show`` command to show details + about a metadef property inside a specific namespace. diff --git a/releasenotes/notes/add-port-hardware-offload-type-011c98ab748357d7.yaml b/releasenotes/notes/add-port-hardware-offload-type-011c98ab748357d7.yaml new file mode 100644 index 0000000000..87a72cdbd5 --- /dev/null +++ b/releasenotes/notes/add-port-hardware-offload-type-011c98ab748357d7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add the port hardware offload attribute to the ``port create`` command. + Once defined, the value cannot be modified. diff --git a/releasenotes/notes/add-port-hints-attribute-be1779e640a47d0d.yaml b/releasenotes/notes/add-port-hints-attribute-be1779e640a47d0d.yaml new file mode 100644 index 0000000000..5a52f3c13b --- /dev/null +++ b/releasenotes/notes/add-port-hints-attribute-be1779e640a47d0d.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Enable management of Neutron port hints: ``port create --hint HINT``, + ``set port --hint HINT and ``unset port --hint``. Port hints allow + passing backend specific hints to Neutron mainly to tune backend + performance. The first hint controls Open vSwitch Tx steering. diff --git a/releasenotes/notes/add-port-list-status-option-f51da0aed0528a5d.yaml b/releasenotes/notes/add-port-list-status-option-f51da0aed0528a5d.yaml new file mode 100644 index 0000000000..762ea6dcaa --- /dev/null +++ b/releasenotes/notes/add-port-list-status-option-f51da0aed0528a5d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``--status`` option to ``port list`` command. + [Bug `1672680 `_] diff --git a/releasenotes/notes/add-port-numa-affinity-policy-socket-5a986b14033e0f6e.yaml b/releasenotes/notes/add-port-numa-affinity-policy-socket-5a986b14033e0f6e.yaml new file mode 100644 index 0000000000..d464992eca --- /dev/null +++ b/releasenotes/notes/add-port-numa-affinity-policy-socket-5a986b14033e0f6e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add a new NUMA affinity policy option: "socket". That applies to any new + port (using ``port create``) or any existing port (using ``port set``). diff --git a/releasenotes/notes/add-remove-multiple-security-groups-2c0b2d599124c9c9.yaml b/releasenotes/notes/add-remove-multiple-security-groups-2c0b2d599124c9c9.yaml new file mode 100644 index 0000000000..1938f7121c --- /dev/null +++ b/releasenotes/notes/add-remove-multiple-security-groups-2c0b2d599124c9c9.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``server add security group`` and ``server remove security group`` + commands now accept multiple security groups. diff --git a/releasenotes/notes/add-snapshot-unmanage-command-d4c0c8fd8b638d48.yaml b/releasenotes/notes/add-snapshot-unmanage-command-d4c0c8fd8b638d48.yaml new file mode 100644 index 0000000000..a0abd5e582 --- /dev/null +++ b/releasenotes/notes/add-snapshot-unmanage-command-d4c0c8fd8b638d48.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for unmanaging snapshots with the + ``openstack snapshot delete --remote`` command. diff --git a/releasenotes/notes/add-stores-info-9f1488dd29013767.yaml b/releasenotes/notes/add-stores-info-9f1488dd29013767.yaml new file mode 100644 index 0000000000..36eef8b4c8 --- /dev/null +++ b/releasenotes/notes/add-stores-info-9f1488dd29013767.yaml @@ -0,0 +1,3 @@ +features: + - | + Add ``image stores info`` command, allowing users to know available backends. diff --git a/releasenotes/notes/add-user-project-enabled-filters-9f2090cdcc97b667.yaml b/releasenotes/notes/add-user-project-enabled-filters-9f2090cdcc97b667.yaml new file mode 100644 index 0000000000..a812ca6b73 --- /dev/null +++ b/releasenotes/notes/add-user-project-enabled-filters-9f2090cdcc97b667.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add filters to search for enabled and disabled users and projects. diff --git a/releasenotes/notes/add-vlan_qinq-to-the-network-3556c094aeedc0de.yaml b/releasenotes/notes/add-vlan_qinq-to-the-network-3556c094aeedc0de.yaml new file mode 100644 index 0000000000..f89575b40c --- /dev/null +++ b/releasenotes/notes/add-vlan_qinq-to-the-network-3556c094aeedc0de.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add ``qinq-vlan`` and ``no-qinq-vlan`` arguments to the ``network create`` + command. It will enable/disable QinQ feature for the created network. + This new argument is mutually exclusive with the ``transparent-vlan`` - only + one of them can be set to ``True`` for the network. diff --git a/releasenotes/notes/add-volume-backup-project-filter-6c09b2c8aba83341.yaml b/releasenotes/notes/add-volume-backup-project-filter-6c09b2c8aba83341.yaml new file mode 100644 index 0000000000..06bc7a4f02 --- /dev/null +++ b/releasenotes/notes/add-volume-backup-project-filter-6c09b2c8aba83341.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``--project`` option to ``volume backup list`` command, + to allow filtering for projects when listing volume backups. diff --git a/releasenotes/notes/add-volume-list-property-option-62008dc24762663b.yaml b/releasenotes/notes/add-volume-list-property-option-62008dc24762663b.yaml new file mode 100644 index 0000000000..ab4b60025a --- /dev/null +++ b/releasenotes/notes/add-volume-list-property-option-62008dc24762663b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add ``--property`` option to ``volume list`` command to filter volumes. diff --git a/releasenotes/notes/add-volume-manage-command-088890446d0e81c7.yaml b/releasenotes/notes/add-volume-manage-command-088890446d0e81c7.yaml new file mode 100644 index 0000000000..e30def1936 --- /dev/null +++ b/releasenotes/notes/add-volume-manage-command-088890446d0e81c7.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add support for managing volumes with + ``openstack volume create --remote-source + --host `` command. diff --git a/releasenotes/notes/add-volume-qos-set-no-property-option-348480dfc42a0a64.yaml b/releasenotes/notes/add-volume-qos-set-no-property-option-348480dfc42a0a64.yaml new file mode 100644 index 0000000000..51b2145068 --- /dev/null +++ b/releasenotes/notes/add-volume-qos-set-no-property-option-348480dfc42a0a64.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add ``--no-property`` option in ``volume qos set``. diff --git a/releasenotes/notes/add-volume-type-set-public-private-opts-891fc7ab5de9bb6a.yaml b/releasenotes/notes/add-volume-type-set-public-private-opts-891fc7ab5de9bb6a.yaml new file mode 100644 index 0000000000..219712efa2 --- /dev/null +++ b/releasenotes/notes/add-volume-type-set-public-private-opts-891fc7ab5de9bb6a.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``volume type set`` command now supports ``--public`` and ``--private`` + options. diff --git a/releasenotes/notes/add-volume-unmanage-support-9b7139e5e948de77.yaml b/releasenotes/notes/add-volume-unmanage-support-9b7139e5e948de77.yaml new file mode 100644 index 0000000000..cba1e60cd3 --- /dev/null +++ b/releasenotes/notes/add-volume-unmanage-support-9b7139e5e948de77.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for unmanaging volumes with + ``openstack volume delete --remote `` command. diff --git a/releasenotes/notes/aggregate-list-uuid-column-808a0d051006a5ef.yaml b/releasenotes/notes/aggregate-list-uuid-column-808a0d051006a5ef.yaml new file mode 100644 index 0000000000..49e9e557d4 --- /dev/null +++ b/releasenotes/notes/aggregate-list-uuid-column-808a0d051006a5ef.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``aggregate list`` command will now include the UUIDs of the aggregates + when the cloud supports it. diff --git a/releasenotes/notes/block-storage-x-manageable-list-long-option-a16a4641acfcf781.yaml b/releasenotes/notes/block-storage-x-manageable-list-long-option-a16a4641acfcf781.yaml new file mode 100644 index 0000000000..60566ba570 --- /dev/null +++ b/releasenotes/notes/block-storage-x-manageable-list-long-option-a16a4641acfcf781.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + The ``--detailed`` option of the ``block storage volume manageable list`` + and ``block storage snapshot manageable list`` commands has been deprecated + in favour of a ``--long`` option. These commands will no longer default to + detailed output by default. diff --git a/releasenotes/notes/bp-add-locked-reason-425efd2def1144f1.yaml b/releasenotes/notes/bp-add-locked-reason-425efd2def1144f1.yaml index e9f6cc6d9e..9ee0325389 100644 --- a/releasenotes/notes/bp-add-locked-reason-425efd2def1144f1.yaml +++ b/releasenotes/notes/bp-add-locked-reason-425efd2def1144f1.yaml @@ -10,4 +10,3 @@ features: servers. Requires ``–os-compute-api-version`` 2.73 or greater. [Blueprint `add-locked-reason `_] - \ No newline at end of file diff --git a/releasenotes/notes/bp-neutron-floating-ip-rate-limit-8387c040a6fb9acd.yaml b/releasenotes/notes/bp-neutron-floating-ip-rate-limit-8387c040a6fb9acd.yaml index d0aa9cd121..86f3da1048 100644 --- a/releasenotes/notes/bp-neutron-floating-ip-rate-limit-8387c040a6fb9acd.yaml +++ b/releasenotes/notes/bp-neutron-floating-ip-rate-limit-8387c040a6fb9acd.yaml @@ -2,7 +2,7 @@ features: - | Add support for attaching and removing qos policy to floating IPs. - + Add option ``--qos-policy`` to the ``floating ip create`` and ``floating ip set`` commands to add qos policy to a floating IP. diff --git a/releasenotes/notes/bp-project-tags-b544aef9672d415b.yaml b/releasenotes/notes/bp-project-tags-b544aef9672d415b.yaml index 0da35ac37d..2d3f519e2f 100644 --- a/releasenotes/notes/bp-project-tags-b544aef9672d415b.yaml +++ b/releasenotes/notes/bp-project-tags-b544aef9672d415b.yaml @@ -2,7 +2,7 @@ features: - | Add ``--tag`` option to ``project create`` command, ``--tag``, ``--clear-tags``, and - ``--remove-tag`` options to ``project set`` command. Add ``--tags``, ``--tags-any``, + ``--remove-tag`` options to ``project set`` command. Add ``--tags``, ``--tags-any``, ``--not-tags``, and ``--not-tags-any`` options to ``project list`` command to filter list results by different projects based on their tags. [`blueprint project-tags `_] diff --git a/releasenotes/notes/bug-1647406-c936581034a1b6e4.yaml b/releasenotes/notes/bug-1647406-c936581034a1b6e4.yaml index 2f327517de..29a4036a23 100644 --- a/releasenotes/notes/bug-1647406-c936581034a1b6e4.yaml +++ b/releasenotes/notes/bug-1647406-c936581034a1b6e4.yaml @@ -9,7 +9,7 @@ fixes: The device name of the boot volume specificed in the ``--volume`` option is no longer assumed to be *'vda'* but now uses the hypervisor's boot index to obtain the device name. This maintains the status quo for - **QEMU/KVM** hypervisors but **XEN**, **parallels** and others + **QEMU/KVM** hypervisors but **XEN**, **parallels** and others *virt types* that have device naming is different from ``vd*`` should now also work correctly. [:lpbug:`1497845`] diff --git a/releasenotes/notes/bug-1648317-2d12dabc357c4d52.yaml b/releasenotes/notes/bug-1648317-2d12dabc357c4d52.yaml new file mode 100644 index 0000000000..ffd1ff90b6 --- /dev/null +++ b/releasenotes/notes/bug-1648317-2d12dabc357c4d52.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Add ``--project`` and --project-domain`` options to the following network + commands: + + - ``openstack security group rule list`` + + [Bug `1648317 `_] + diff --git a/releasenotes/notes/bug-2084580-cb1e8c47501e730c.yaml b/releasenotes/notes/bug-2084580-cb1e8c47501e730c.yaml new file mode 100644 index 0000000000..5ac30b3733 --- /dev/null +++ b/releasenotes/notes/bug-2084580-cb1e8c47501e730c.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + The ``quota set`` and ``limits show`` commands will now check for the + ``block-storage`` and ``block-store`` service types along with ``volume``, + ``volumev2`` and ``volumev3``. + + [Bug `2084580 `_] diff --git a/releasenotes/notes/bug-2126565-a119ac242d9ac795.yaml b/releasenotes/notes/bug-2126565-a119ac242d9ac795.yaml new file mode 100644 index 0000000000..87fe689b18 --- /dev/null +++ b/releasenotes/notes/bug-2126565-a119ac242d9ac795.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Running ``openstack application credential show`` on + a non-existent application credential does not + raise an exception. + + [Bug `2126565 `_] diff --git a/releasenotes/notes/bug-2137636-fix-quota-usage-display-2d8f07dccc21f79c.yaml b/releasenotes/notes/bug-2137636-fix-quota-usage-display-2d8f07dccc21f79c.yaml new file mode 100644 index 0000000000..818e29ae82 --- /dev/null +++ b/releasenotes/notes/bug-2137636-fix-quota-usage-display-2d8f07dccc21f79c.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix ``openstack quota show --usage`` to correctly display resource usage + and reservation. diff --git a/releasenotes/notes/compute-add-validate-console-auth-token-1eda2bd62060ccfa.yaml b/releasenotes/notes/compute-add-validate-console-auth-token-1eda2bd62060ccfa.yaml new file mode 100644 index 0000000000..5b03061efb --- /dev/null +++ b/releasenotes/notes/compute-add-validate-console-auth-token-1eda2bd62060ccfa.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add support for the new ``spice-direct`` console type, as well as the + exposing the ability for admins to lookup console connection information + via the new ``console connection show`` command. diff --git a/releasenotes/notes/confirm-reset-state-24497c8b24990aa7.yaml b/releasenotes/notes/confirm-reset-state-24497c8b24990aa7.yaml new file mode 100644 index 0000000000..b381e5a821 --- /dev/null +++ b/releasenotes/notes/confirm-reset-state-24497c8b24990aa7.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The ``openstack server set`` command has been extended with a new + parameter ``--auto-approve`` and the existing ``--state`` parameter + has been modified to require confirmation before resetting the state. diff --git a/releasenotes/notes/drop-python-38-9dcbd2b2b51f24f2.yaml b/releasenotes/notes/drop-python-38-9dcbd2b2b51f24f2.yaml new file mode 100644 index 0000000000..6d61b9e7aa --- /dev/null +++ b/releasenotes/notes/drop-python-38-9dcbd2b2b51f24f2.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Support for Python 3.8 has been dropped. diff --git a/releasenotes/notes/drop-python-39-fc95c2d17a862e3e.yaml b/releasenotes/notes/drop-python-39-fc95c2d17a862e3e.yaml new file mode 100644 index 0000000000..7d8ca21ffc --- /dev/null +++ b/releasenotes/notes/drop-python-39-fc95c2d17a862e3e.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Support for Python 3.9 has been dropped. diff --git a/releasenotes/notes/fip-filter-opts-a847f8743fef467f.yaml b/releasenotes/notes/fip-filter-opts-a847f8743fef467f.yaml new file mode 100644 index 0000000000..997075c272 --- /dev/null +++ b/releasenotes/notes/fip-filter-opts-a847f8743fef467f.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``--network``, ``--port``, and ``--router`` options of the ``floating + ip list`` command can now be specified multiple times. diff --git a/releasenotes/notes/fix-backup-incremental-d1c1e6886cf32256.yaml b/releasenotes/notes/fix-backup-incremental-d1c1e6886cf32256.yaml new file mode 100644 index 0000000000..7942a6f293 --- /dev/null +++ b/releasenotes/notes/fix-backup-incremental-d1c1e6886cf32256.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed issue with creating incremental volume backup. + Previously, ``incremental`` value was not passed in the + API request which is now included in the backup + create request. diff --git a/releasenotes/notes/fix-image-set-project-accept-owner-bug-2136795.yaml b/releasenotes/notes/fix-image-set-project-accept-owner-bug-2136795.yaml new file mode 100644 index 0000000000..4cd755b3fe --- /dev/null +++ b/releasenotes/notes/fix-image-set-project-accept-owner-bug-2136795.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + Fix a bug where using ``openstack image set --project + --accept `` incorrectly changed the image owner to the specified + project instead of only updating the member status. The ``--project`` + parameter when used with ``--accept``, ``--reject``, or ``--pending`` + should only identify which member's status to update, not change the + image ownership. + [Bug `2136795 `_] diff --git a/releasenotes/notes/fix-restore-resp-e664a643a723cd2e.yaml b/releasenotes/notes/fix-restore-resp-e664a643a723cd2e.yaml new file mode 100644 index 0000000000..2ee8f216d0 --- /dev/null +++ b/releasenotes/notes/fix-restore-resp-e664a643a723cd2e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed the output of ``volume backup restore`` command. diff --git a/releasenotes/notes/fix-show-backup-by-name-0759c55396be77a3.yaml b/releasenotes/notes/fix-show-backup-by-name-0759c55396be77a3.yaml new file mode 100644 index 0000000000..7ed9d72cc8 --- /dev/null +++ b/releasenotes/notes/fix-show-backup-by-name-0759c55396be77a3.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed the ``openstack volume backup show`` command + to show a backup by name. diff --git a/releasenotes/notes/fix-story-2010775-953dbdf03b2b6746.yaml b/releasenotes/notes/fix-story-2010775-953dbdf03b2b6746.yaml new file mode 100644 index 0000000000..e4c98b7443 --- /dev/null +++ b/releasenotes/notes/fix-story-2010775-953dbdf03b2b6746.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed a bug in "access rule" subcommands where the client logic incorrectly + assumed that access rules have a "name" property which resulted in + unpredictable behaviors. e.g. "access rule delete {non-existent-id}" now + results in a not-found error instead of sometimes deleting an unrelated + rule. diff --git a/releasenotes/notes/flavor-id-auto-e21157f97dc1d7f2.yaml b/releasenotes/notes/flavor-id-auto-e21157f97dc1d7f2.yaml new file mode 100644 index 0000000000..96b56b8df6 --- /dev/null +++ b/releasenotes/notes/flavor-id-auto-e21157f97dc1d7f2.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + The ``--id auto`` alias for the ``flavor create`` command is deprecated + for removal. Omit the option entirely to ensure the server creates the + ID for you. diff --git a/releasenotes/notes/floatingip_dns_integration-f26c7575694d098d.yaml b/releasenotes/notes/floatingip_dns_integration-f26c7575694d098d.yaml index 9c1d4cf5d2..b730658ad4 100644 --- a/releasenotes/notes/floatingip_dns_integration-f26c7575694d098d.yaml +++ b/releasenotes/notes/floatingip_dns_integration-f26c7575694d098d.yaml @@ -1,7 +1,7 @@ --- features: - | - Add ``--dns-domain`` and ``--dns-name`` options to the + Add ``--dns-domain`` and ``--dns-name`` options to the ``floating ip create`` commands. These options set the DNS domain and name for the floating IP. diff --git a/releasenotes/notes/keypair-create-client-side-generation-73d8dd36192f70c9.yaml b/releasenotes/notes/keypair-create-client-side-generation-73d8dd36192f70c9.yaml new file mode 100644 index 0000000000..bf5fd5b759 --- /dev/null +++ b/releasenotes/notes/keypair-create-client-side-generation-73d8dd36192f70c9.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + The ``openstack keypair create`` command will now generate keypairs on the + client side in ssh-ed25519 format. The Compute service no longer supports + server-side key generation starting with ``--os-compute-api-version 2.92`` + while the use of ssh-ed25519 is necessary as support for ssh-rsa has been + disabled by default starting in OpenSSH 8.8, which prevents its use in + guests using this version of OpenSSH in the default configuration. + ssh-ed25519 support is widespread and is supported by OpenSSH 6.5 or later + and Dropbear 2020.79 or later. diff --git a/releasenotes/notes/keystone-create-user-no-password-619bcddcd046dda8.yaml b/releasenotes/notes/keystone-create-user-no-password-619bcddcd046dda8.yaml new file mode 100644 index 0000000000..7cd6acba0e --- /dev/null +++ b/releasenotes/notes/keystone-create-user-no-password-619bcddcd046dda8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + [Bug `2136148 `_] Keystone allows + users to be created with no password but no value should be submitted for + the password instead of a ``null`` value. diff --git a/releasenotes/notes/migrate-access-rule-to-sdk-923682b4c71fea8a.yaml b/releasenotes/notes/migrate-access-rule-to-sdk-923682b4c71fea8a.yaml new file mode 100644 index 0000000000..9f6deb8d4e --- /dev/null +++ b/releasenotes/notes/migrate-access-rule-to-sdk-923682b4c71fea8a.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``access rule list`` + - ``access rule delete`` + - ``access rule show`` diff --git a/releasenotes/notes/migrate-agent-commands-1c50ffcb75f91418.yaml b/releasenotes/notes/migrate-agent-commands-1c50ffcb75f91418.yaml new file mode 100644 index 0000000000..7f55cfc302 --- /dev/null +++ b/releasenotes/notes/migrate-agent-commands-1c50ffcb75f91418.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The ``compute agent *`` commands have been migrated to SDK. diff --git a/releasenotes/notes/migrate-application-credential-to-sdk-c79d8dfc3c8e1d9f.yaml b/releasenotes/notes/migrate-application-credential-to-sdk-c79d8dfc3c8e1d9f.yaml new file mode 100644 index 0000000000..22b7a2971f --- /dev/null +++ b/releasenotes/notes/migrate-application-credential-to-sdk-c79d8dfc3c8e1d9f.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The following commands have been migrated to SDK: + + - ``application credential create`` + - ``application credential delete`` + - ``application credential list`` + - ``application credential show`` diff --git a/releasenotes/notes/migrate-backup-commands-0becc8f18cf9737b.yaml b/releasenotes/notes/migrate-backup-commands-0becc8f18cf9737b.yaml new file mode 100644 index 0000000000..290a8d7869 --- /dev/null +++ b/releasenotes/notes/migrate-backup-commands-0becc8f18cf9737b.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Migrated the following backup commands to SDK: + + * Create Backup + * Show Backup + * List Backup + * Restore Backup + * Delete Backup diff --git a/releasenotes/notes/migrate-credential-to-sdk-33a841847fe7d568.yaml b/releasenotes/notes/migrate-credential-to-sdk-33a841847fe7d568.yaml new file mode 100644 index 0000000000..e83001273f --- /dev/null +++ b/releasenotes/notes/migrate-credential-to-sdk-33a841847fe7d568.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``credential create`` + - ``credential delete`` + - ``credential list`` + - ``credential set`` + - ``credential show`` diff --git a/releasenotes/notes/migrate-domain-to-sdk-da6ec38221e79a37.yaml b/releasenotes/notes/migrate-domain-to-sdk-da6ec38221e79a37.yaml new file mode 100644 index 0000000000..ce26909b4a --- /dev/null +++ b/releasenotes/notes/migrate-domain-to-sdk-da6ec38221e79a37.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``domain create`` + - ``domain delete`` + - ``domain list`` + - ``domain set`` + - ``domain show`` diff --git a/releasenotes/notes/migrate-endpoint-to-sdk-8ca5a34794b6bd7e.yaml b/releasenotes/notes/migrate-endpoint-to-sdk-8ca5a34794b6bd7e.yaml new file mode 100644 index 0000000000..ab715d1643 --- /dev/null +++ b/releasenotes/notes/migrate-endpoint-to-sdk-8ca5a34794b6bd7e.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``endpoint create`` + - ``endpoint delete`` + - ``endpoint list`` + - ``endpoint show`` + - ``endpoint set`` diff --git a/releasenotes/notes/migrate-group-to-sdk-59beef31a7c40bbb.yaml b/releasenotes/notes/migrate-group-to-sdk-59beef31a7c40bbb.yaml new file mode 100644 index 0000000000..3cbb9b3dd1 --- /dev/null +++ b/releasenotes/notes/migrate-group-to-sdk-59beef31a7c40bbb.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Migrate ``group`` commands from keystoneclient to SDK. diff --git a/releasenotes/notes/migrate-host-set-438997eb6f81f2b1.yaml b/releasenotes/notes/migrate-host-set-438997eb6f81f2b1.yaml new file mode 100644 index 0000000000..f935660919 --- /dev/null +++ b/releasenotes/notes/migrate-host-set-438997eb6f81f2b1.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The ``host set`` command has been migrated to SDK. diff --git a/releasenotes/notes/migrate-limits-show-f586c9762dfd7d0c.yaml b/releasenotes/notes/migrate-limits-show-f586c9762dfd7d0c.yaml new file mode 100644 index 0000000000..81a5119ada --- /dev/null +++ b/releasenotes/notes/migrate-limits-show-f586c9762dfd7d0c.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The ``limits show`` command has been migrated to SDK. diff --git a/releasenotes/notes/migrate-region-to-sdk-fbd27bceaa1db9dc.yaml b/releasenotes/notes/migrate-region-to-sdk-fbd27bceaa1db9dc.yaml new file mode 100644 index 0000000000..31a5e6d3e2 --- /dev/null +++ b/releasenotes/notes/migrate-region-to-sdk-fbd27bceaa1db9dc.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``region create`` + - ``region list`` + - ``region delete`` + - ``region set`` + - ``region show`` diff --git a/releasenotes/notes/migrate-resource-filter-commands-2a353edb965723d1.yaml b/releasenotes/notes/migrate-resource-filter-commands-2a353edb965723d1.yaml new file mode 100644 index 0000000000..bf5ce3b60f --- /dev/null +++ b/releasenotes/notes/migrate-resource-filter-commands-2a353edb965723d1.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Migrated ``block storage resource filters list`` and + ``block storage resource filters show`` commands to SDK. diff --git a/releasenotes/notes/migrate-role-assignment-to-sdk-e6e52bef467b4e4c.yaml b/releasenotes/notes/migrate-role-assignment-to-sdk-e6e52bef467b4e4c.yaml new file mode 100644 index 0000000000..faf59e9371 --- /dev/null +++ b/releasenotes/notes/migrate-role-assignment-to-sdk-e6e52bef467b4e4c.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Migrate ``role assignment`` commands from keystoneclient to SDK. diff --git a/releasenotes/notes/migrate-server-evacuate-to-sdk-a0415988ef5451b2.yaml b/releasenotes/notes/migrate-server-evacuate-to-sdk-a0415988ef5451b2.yaml new file mode 100644 index 0000000000..0b334b4f9b --- /dev/null +++ b/releasenotes/notes/migrate-server-evacuate-to-sdk-a0415988ef5451b2.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The ``server evacuate`` command has been migrated to SDK. diff --git a/releasenotes/notes/migrate-server-events-to-sdk-6a1f5dce582df245.yaml b/releasenotes/notes/migrate-server-events-to-sdk-6a1f5dce582df245.yaml new file mode 100644 index 0000000000..b62e5dab32 --- /dev/null +++ b/releasenotes/notes/migrate-server-events-to-sdk-6a1f5dce582df245.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``server event list`` and ``server event show`` commands have been + migrated to SDK. diff --git a/releasenotes/notes/migrate-server-reboot-to-sdk-a49822810def4c8a.yaml b/releasenotes/notes/migrate-server-reboot-to-sdk-a49822810def4c8a.yaml new file mode 100644 index 0000000000..e4c8c3ca25 --- /dev/null +++ b/releasenotes/notes/migrate-server-reboot-to-sdk-a49822810def4c8a.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Migrate ``server reboot`` command from novaclient to SDK. diff --git a/releasenotes/notes/migrate-server-restore-to-sdk-4540f26753031779.yaml b/releasenotes/notes/migrate-server-restore-to-sdk-4540f26753031779.yaml new file mode 100644 index 0000000000..1adc40beae --- /dev/null +++ b/releasenotes/notes/migrate-server-restore-to-sdk-4540f26753031779.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + The ``server restore`` command has been migrated to SDK. diff --git a/releasenotes/notes/migrate-server-set-unset-to-sdk-ae32ebcced845b06.yaml b/releasenotes/notes/migrate-server-set-unset-to-sdk-ae32ebcced845b06.yaml new file mode 100644 index 0000000000..d01c09a909 --- /dev/null +++ b/releasenotes/notes/migrate-server-set-unset-to-sdk-ae32ebcced845b06.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + The ``server set`` and ``server unset`` commands have been migrated to SDK. diff --git a/releasenotes/notes/migrate-server-shelve-unshelve-to-sdk-8fce77586aa68a51.yaml b/releasenotes/notes/migrate-server-shelve-unshelve-to-sdk-8fce77586aa68a51.yaml new file mode 100644 index 0000000000..f0969fc0ae --- /dev/null +++ b/releasenotes/notes/migrate-server-shelve-unshelve-to-sdk-8fce77586aa68a51.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``server shelve`` and ``server unshelve`` commands have been migrated + to SDK. diff --git a/releasenotes/notes/migrate-server-start-stop-to-sdk-55edd4e1ff5e6ac7.yaml b/releasenotes/notes/migrate-server-start-stop-to-sdk-55edd4e1ff5e6ac7.yaml new file mode 100644 index 0000000000..afcca53ddb --- /dev/null +++ b/releasenotes/notes/migrate-server-start-stop-to-sdk-55edd4e1ff5e6ac7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Migrate ``server start`` and ``server stop`` commands from novaclient to + sdk. diff --git a/releasenotes/notes/migrate-service-provider-to-sdk-74dc48b227f21a05.yaml b/releasenotes/notes/migrate-service-provider-to-sdk-74dc48b227f21a05.yaml new file mode 100644 index 0000000000..d9914cf972 --- /dev/null +++ b/releasenotes/notes/migrate-service-provider-to-sdk-74dc48b227f21a05.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``service provider create`` + - ``service provider delete`` + - ``service provider set`` + - ``service provider list`` + - ``service provider show`` diff --git a/releasenotes/notes/migrate-service-to-sdk-6ff62ebf7e41db7c.yaml b/releasenotes/notes/migrate-service-to-sdk-6ff62ebf7e41db7c.yaml new file mode 100644 index 0000000000..90a55fcaab --- /dev/null +++ b/releasenotes/notes/migrate-service-to-sdk-6ff62ebf7e41db7c.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + The following commands have been migrated to SDK: + + - ``service create`` + - ``service delete`` + - ``service set`` + - ``service list`` + - ``service show`` diff --git a/releasenotes/notes/migrate-trust-to-sdk-9397c9cfddcb636a.yaml b/releasenotes/notes/migrate-trust-to-sdk-9397c9cfddcb636a.yaml new file mode 100644 index 0000000000..e45218f312 --- /dev/null +++ b/releasenotes/notes/migrate-trust-to-sdk-9397c9cfddcb636a.yaml @@ -0,0 +1,9 @@ +--- +upgrade: + - | + The following commands have been migrated to SDK: + + - ``trust create`` + - ``trust list`` + - ``trust delete`` + - ``trust show`` diff --git a/releasenotes/notes/migrate-volume-attachment-commands-4309409bca1ca5d4.yaml b/releasenotes/notes/migrate-volume-attachment-commands-4309409bca1ca5d4.yaml new file mode 100644 index 0000000000..b02abfd8cd --- /dev/null +++ b/releasenotes/notes/migrate-volume-attachment-commands-4309409bca1ca5d4.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Migrated volume attachment commands to SDK. diff --git a/releasenotes/notes/migrate-volume-backend-commands-259e553e213c71b0.yaml b/releasenotes/notes/migrate-volume-backend-commands-259e553e213c71b0.yaml new file mode 100644 index 0000000000..266f69565f --- /dev/null +++ b/releasenotes/notes/migrate-volume-backend-commands-259e553e213c71b0.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Migrated following volume backends commands to SDK. + + * Capability Show + * Pool List diff --git a/releasenotes/notes/migrate-volume-revert-to-sdk-1e399853d80ba5f8.yaml b/releasenotes/notes/migrate-volume-revert-to-sdk-1e399853d80ba5f8.yaml new file mode 100644 index 0000000000..30f12e80f5 --- /dev/null +++ b/releasenotes/notes/migrate-volume-revert-to-sdk-1e399853d80ba5f8.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + The ``volume revert`` command has been migrated to SDK. diff --git a/releasenotes/notes/migrate-volume-summary-to-sdk-96ff58f653e0feaa.yaml b/releasenotes/notes/migrate-volume-summary-to-sdk-96ff58f653e0feaa.yaml new file mode 100644 index 0000000000..9c495f8289 --- /dev/null +++ b/releasenotes/notes/migrate-volume-summary-to-sdk-96ff58f653e0feaa.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + The ``volume summary`` command has been migrated to SDK. diff --git a/releasenotes/notes/network-ovn-agents-bdfced3a6d25e7d2.yaml b/releasenotes/notes/network-ovn-agents-bdfced3a6d25e7d2.yaml new file mode 100644 index 0000000000..7d9909fb0b --- /dev/null +++ b/releasenotes/notes/network-ovn-agents-bdfced3a6d25e7d2.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added four new network agent types to the list method filter: + ``ovn-controller``, ``ovn-controller-gateway``, ``ovn-metadata`` and + ``ovn-agent``. diff --git a/releasenotes/notes/network-quota-no-force-default-0975bdf15655070c.yaml b/releasenotes/notes/network-quota-no-force-default-0975bdf15655070c.yaml new file mode 100644 index 0000000000..e1a1359053 --- /dev/null +++ b/releasenotes/notes/network-quota-no-force-default-0975bdf15655070c.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - The ``openstack quota set`` command previously defaulted to ``--force`` + behavior for network quotas. This behavior has now changed and the command + now defaults to ``--no-force`` behavior. Users should specify the + ``--force`` option if they wish to retain previous behavior. diff --git a/releasenotes/notes/osc4-identity-6564257c67d43106.yaml b/releasenotes/notes/osc4-identity-6564257c67d43106.yaml index a5105c801c..b352ee3ef3 100644 --- a/releasenotes/notes/osc4-identity-6564257c67d43106.yaml +++ b/releasenotes/notes/osc4-identity-6564257c67d43106.yaml @@ -7,6 +7,6 @@ upgrade: Remove deprecated ``user role list`` command. Use ``role assignment list`` options ``--project`` and ``--user`` instead. - | - Remove deprecated ``service create`` option ``--type``. + Remove deprecated ``service create`` option ``--type``. The type is supplied as a positional argument in The ``service create --name type`` command. diff --git a/releasenotes/notes/port-unset-device-id-and-owner-9fce242155c82992.yaml b/releasenotes/notes/port-unset-device-id-and-owner-9fce242155c82992.yaml new file mode 100644 index 0000000000..e14baacdd3 --- /dev/null +++ b/releasenotes/notes/port-unset-device-id-and-owner-9fce242155c82992.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added ``--device`` and ``--device-owner`` parameter to the + ``port unset`` command. diff --git a/releasenotes/notes/port_uplink_status_propagation_updatable-d1e155c19247b666.yaml b/releasenotes/notes/port_uplink_status_propagation_updatable-d1e155c19247b666.yaml new file mode 100644 index 0000000000..0e7b776207 --- /dev/null +++ b/releasenotes/notes/port_uplink_status_propagation_updatable-d1e155c19247b666.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``--enable-uplink-status-propagation`` option and + ``--disable-uplink-status-propagation`` option to ``port update`` command. diff --git a/releasenotes/notes/project-cleanup-skip-resource-option-4f80db0d8cf36fdb.yaml b/releasenotes/notes/project-cleanup-skip-resource-option-4f80db0d8cf36fdb.yaml new file mode 100644 index 0000000000..ee1ef89dea --- /dev/null +++ b/releasenotes/notes/project-cleanup-skip-resource-option-4f80db0d8cf36fdb.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + A new option ``--skip-resource`` has been added to the + ``project cleanup`` command. This allows to exclude + certain resources from project cleanups, e. g. + ``--skip-resource "block_storage.backup"`` to keep + Cinder backups. diff --git a/releasenotes/notes/quota-set-default-option-bc26d37dc150533b.yaml b/releasenotes/notes/quota-set-default-option-bc26d37dc150533b.yaml new file mode 100644 index 0000000000..d3ba34d652 --- /dev/null +++ b/releasenotes/notes/quota-set-default-option-bc26d37dc150533b.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The ``quota set`` command now supports a ``--default`` option. When + provided, this will allow you to set quotas for the default quota class + which is the only quota class supported by the Compute and Block Storage + services. This replaces the deprecated ``quota set --class`` option. diff --git a/releasenotes/notes/remove-deprecated-quota-show-class-option-2109a6ff7ac18e80.yaml b/releasenotes/notes/remove-deprecated-quota-show-class-option-2109a6ff7ac18e80.yaml new file mode 100644 index 0000000000..ab1ce58639 --- /dev/null +++ b/releasenotes/notes/remove-deprecated-quota-show-class-option-2109a6ff7ac18e80.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The ``--class`` options of the ``quota show`` command, which was deprecated + in 6.1.0 (Antelope), has now been removed in favour of the ``--default`` + option. Quota classes were never fully implemented and the compute and + volume services only support a single ``default`` quota class while the + network service does not support quota classes at all. diff --git a/releasenotes/notes/remove-project-purge-d372374b1a7c4641.yaml b/releasenotes/notes/remove-project-purge-d372374b1a7c4641.yaml new file mode 100644 index 0000000000..049ed26b2e --- /dev/null +++ b/releasenotes/notes/remove-project-purge-d372374b1a7c4641.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The ``project purge`` command has been removed. This has been superseded by + the ``project cleanup`` command, was not tested, and has not been + functional for some time hence its removal without a deprecation period. + The replacement is ``project cleanup``, which is more powerful and more + flexible. diff --git a/releasenotes/notes/remove-volume-v1-commands-bfa14e9cae54929f.yaml b/releasenotes/notes/remove-volume-v1-commands-bfa14e9cae54929f.yaml new file mode 100644 index 0000000000..0aa6929b07 --- /dev/null +++ b/releasenotes/notes/remove-volume-v1-commands-bfa14e9cae54929f.yaml @@ -0,0 +1,35 @@ +--- +upgrade: + - | + Support for the Block Storage (Cinder) v1 API has been officially removed + as it had been broken for some time. If you haven't noticed then you likely + don't need to do anything. However, in the unlikely event that your cloud + is using the Block Storage v1 API - or incorrectly advertises the Block + Storage v1 API - consider overriding the API version to use v2 as this + behaves very similarly. It may also be necessary to set an endpoint + override for the Block Storage API if your clouds service catalog is not + configured correctly. For example: + + .. code-block:: yaml + + example: + regions: + - name: regionOne + values: + block_storage_endpoint_override: 'https://blockstorage.api.cloud.example/' + volume_api_version: 2 + + If using a public cloud provider, there may also be a profile already + published that sets these. These are listed in the `Vendor Support`__ + doc. For example: + + .. code-block:: yaml + + example: + profile: rackspace + + Alternatively, consider use versions of OSC < 3.19 and python-cinderclient + < 5.0 (both Stein), since these were the last versions to fully support + Cinder v1. + + .. __: https://docs.openstack.org/openstacksdk/latest/user/config/vendor-support.html diff --git a/releasenotes/notes/rename-volume-set-retype-policy-6bacb7dd92f1ad82.yaml b/releasenotes/notes/rename-volume-set-retype-policy-6bacb7dd92f1ad82.yaml new file mode 100644 index 0000000000..93380c1c23 --- /dev/null +++ b/releasenotes/notes/rename-volume-set-retype-policy-6bacb7dd92f1ad82.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The ``volume set --retype-policy`` parameter has been renamed to + ``--migration-policy`` to better convey the correct meaning of the options + usage. The migration policy determines whether we are going to perform the + migration in the retype opearation or not and is not related to the actual + retype which just changes the volume type of the volume. diff --git a/releasenotes/notes/router-create-with-qos-policy-b94967a35351cddd.yaml b/releasenotes/notes/router-create-with-qos-policy-b94967a35351cddd.yaml new file mode 100644 index 0000000000..67150ece9d --- /dev/null +++ b/releasenotes/notes/router-create-with-qos-policy-b94967a35351cddd.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The router creation command now has the parameter ``--qos-policy``, that + allows to set a QoS policy for the provided external gateways (one or + many). It is mandatory to define an external gateway if the QoS policy is + set. diff --git a/releasenotes/notes/server-create-no-security-group-option-627697bddae429b1.yaml b/releasenotes/notes/server-create-no-security-group-option-627697bddae429b1.yaml new file mode 100644 index 0000000000..b2169d36e3 --- /dev/null +++ b/releasenotes/notes/server-create-no-security-group-option-627697bddae429b1.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The ``server create`` command now supports a ``--no-security-group`` + option. When provided, no security groups will be associated with ports + created and attached to the server during server creation. This does not + affect pre-created ports. diff --git a/releasenotes/notes/server-create-server-group-a5b630f2a64de28d.yaml b/releasenotes/notes/server-create-server-group-a5b630f2a64de28d.yaml new file mode 100644 index 0000000000..f9aaf4b46d --- /dev/null +++ b/releasenotes/notes/server-create-server-group-a5b630f2a64de28d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``server create`` command now accepts a new option, ``--server-group``, + which is a shortcut for configuring the ``group`` scheduler hint. diff --git a/releasenotes/notes/story-2010751-server-rebuild-wait-shutoff-c84cddcd3f15e9ce.yaml b/releasenotes/notes/story-2010751-server-rebuild-wait-shutoff-c84cddcd3f15e9ce.yaml new file mode 100644 index 0000000000..58c67e277b --- /dev/null +++ b/releasenotes/notes/story-2010751-server-rebuild-wait-shutoff-c84cddcd3f15e9ce.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + ``openstack server rebuild`` command now fails early if the server is + not in a state supported for rebuild - either ``ACTIVE``, ``ERROR`` or + ``SHUTOFF``. + See `OpenStack Compute API reference for server rebuild action + `_. +fixes: + - | + ``openstack server rebuild --wait`` now properly works for servers in + ``SHUTOFF`` state without hanging. + [Story `2010751 `_] diff --git a/releasenotes/notes/switch-server-lock-to-sdk-d5dd17e4987233a5.yaml b/releasenotes/notes/switch-server-lock-to-sdk-d5dd17e4987233a5.yaml new file mode 100644 index 0000000000..8e9a519af4 --- /dev/null +++ b/releasenotes/notes/switch-server-lock-to-sdk-d5dd17e4987233a5.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + The ``server lock`` and ``server unlock`` commands now use SDK. diff --git a/releasenotes/notes/volume-backup-created-at-list-v3-47400b31be5143bc.yaml b/releasenotes/notes/volume-backup-created-at-list-v3-47400b31be5143bc.yaml new file mode 100644 index 0000000000..88aa7143fa --- /dev/null +++ b/releasenotes/notes/volume-backup-created-at-list-v3-47400b31be5143bc.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Listing volume backups now shows the created_at column when + volume v3 API is used. diff --git a/releasenotes/notes/volume-service-set-fix-345a8bc84267f743.yaml b/releasenotes/notes/volume-service-set-fix-345a8bc84267f743.yaml new file mode 100644 index 0000000000..e21e12dbbc --- /dev/null +++ b/releasenotes/notes/volume-service-set-fix-345a8bc84267f743.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + The 'volume service set' command could not work due to a bad API call. + [Bug `2116969 `_] diff --git a/releasenotes/notes/volume-type-extra-specs-22a22fcb6e269832.yaml b/releasenotes/notes/volume-type-extra-specs-22a22fcb6e269832.yaml new file mode 100644 index 0000000000..aab21d00a3 --- /dev/null +++ b/releasenotes/notes/volume-type-extra-specs-22a22fcb6e269832.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The ``volume type create``, ``volume type set``, ``volume type list`` + commands now accept four new options - ``--multiattach``, ``--cacheable``, + ``--replicated``, and ``--availability-zone`` - which are short cuts for + setting or filtering on the relevant properties on the volume type. diff --git a/releasenotes/notes/volume-type-list-properties-filter-8532f96d16733915.yaml b/releasenotes/notes/volume-type-list-properties-filter-8532f96d16733915.yaml new file mode 100644 index 0000000000..178622c475 --- /dev/null +++ b/releasenotes/notes/volume-type-list-properties-filter-8532f96d16733915.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The ``volume type list`` command now accepts a ``--property =`` + option, allowing users to filter volume types by their extra spec + properties. diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst index d1238479ba..2c9a36fae4 100644 --- a/releasenotes/source/2023.1.rst +++ b/releasenotes/source/2023.1.rst @@ -3,4 +3,4 @@ =========================== .. release-notes:: - :branch: stable/2023.1 + :branch: unmaintained/2023.1 diff --git a/releasenotes/source/2023.2.rst b/releasenotes/source/2023.2.rst new file mode 100644 index 0000000000..a4838d7d0e --- /dev/null +++ b/releasenotes/source/2023.2.rst @@ -0,0 +1,6 @@ +=========================== +2023.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2023.2 diff --git a/releasenotes/source/2024.1.rst b/releasenotes/source/2024.1.rst new file mode 100644 index 0000000000..6896656be6 --- /dev/null +++ b/releasenotes/source/2024.1.rst @@ -0,0 +1,6 @@ +=========================== +2024.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: unmaintained/2024.1 diff --git a/releasenotes/source/2024.2.rst b/releasenotes/source/2024.2.rst new file mode 100644 index 0000000000..aaebcbc8c3 --- /dev/null +++ b/releasenotes/source/2024.2.rst @@ -0,0 +1,6 @@ +=========================== +2024.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2024.2 diff --git a/releasenotes/source/2025.1.rst b/releasenotes/source/2025.1.rst new file mode 100644 index 0000000000..3add0e53aa --- /dev/null +++ b/releasenotes/source/2025.1.rst @@ -0,0 +1,6 @@ +=========================== +2025.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.1 diff --git a/releasenotes/source/2025.2.rst b/releasenotes/source/2025.2.rst new file mode 100644 index 0000000000..4dae18d869 --- /dev/null +++ b/releasenotes/source/2025.2.rst @@ -0,0 +1,6 @@ +=========================== +2025.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.2 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py index 35f061afa2..fa48392f25 100644 --- a/releasenotes/source/conf.py +++ b/releasenotes/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -43,7 +42,7 @@ # https://github.com/sphinx-doc/sphinx/issues/10112 this may be applied as a # dirty hack until the issue with replacing extlinks is resolved linklogger = logging.getLogger('sphinx.ext.extlinks') -linklogger.setLevel(40) # Ignore messages less severe than ERROR +linklogger.setLevel(40) # Ignore messages less severe than ERROR extensions = [ 'openstackdocstheme', @@ -53,7 +52,7 @@ # openstackdocstheme options openstackdocs_repo_name = 'openstack/python-openstackclient' -openstackdocs_use_storyboard = True +openstackdocs_use_storyboard = False openstackdocs_auto_name = False # Set aliases for extlinks @@ -63,15 +62,15 @@ extlinks = { 'lpbug': ( 'https://bugs.launchpad.net/bugs/%s', - 'Bug ', + 'Bug %s', ), 'oscbp': ( 'https://blueprints.launchpad.net/python-openstackclient/+spec/%s', - '', + None, ), 'oscdoc': ( 'https://docs.openstack.org/python-openstackclient/latest/%s.html', - '', + None, ), } @@ -224,10 +223,8 @@ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # 'preamble': '', } @@ -235,13 +232,15 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [( - 'index', - 'OpenStackClientReleaseNotes.tex', - 'OpenStackClient Release Notes Documentation', - 'OpenStackClient Developers', - 'manual', -)] +latex_documents = [ + ( + 'index', + 'OpenStackClientReleaseNotes.tex', + 'OpenStackClient Release Notes Documentation', + 'OpenStackClient Developers', + 'manual', + ) +] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -268,13 +267,15 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [( - 'index', - 'openstackclientreleasenotes', - 'OpenStackClient Release Notes Documentation', - ['OpenStackClient Developers'], - 1, -)] +man_pages = [ + ( + 'index', + 'openstackclientreleasenotes', + 'OpenStackClient Release Notes Documentation', + ['OpenStackClient Developers'], + 1, + ) +] # If true, show URL addresses after external links. # man_show_urls = False @@ -285,15 +286,17 @@ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [( - 'index', - 'OpenStackClientReleaseNotes', - 'OpenStackclient Release Notes Documentation', - 'OpenStackclient Developers', - 'OpenStackClientReleaseNotes', - 'A unified command-line client for OpenStack.', - 'Miscellaneous', -)] +texinfo_documents = [ + ( + 'index', + 'OpenStackClientReleaseNotes', + 'OpenStackclient Release Notes Documentation', + 'OpenStackclient Developers', + 'OpenStackClientReleaseNotes', + 'A unified command-line client for OpenStack.', + 'Miscellaneous', + ) +] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index de09e52e2a..2b28cfb92e 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -6,6 +6,11 @@ OpenStackClient Release Notes :maxdepth: 1 unreleased + 2025.2 + 2025.1 + 2024.2 + 2024.1 + 2023.2 2023.1 zed yoga @@ -27,14 +32,22 @@ OpenStackClient Release Notes OpenStack Releases ------------------ -OpenStackClient is compatible with all currently supported OpenStack releases, -it does not require maintaining a 'Mitaka' version to match to a Mitala-release -cloud. The OpenStackClient release that was current when the corresponding -OpenStack release was made is shown below: +OpenStackClient is intended to be backwards compatible with all currently +supported OpenStack releases so that it does not require using certain release +version of the tool to run on a corresponding release version cloud. The +OpenStackClient release that was current when the corresponding OpenStack +release was made is shown below: ================= ======================= OpenStack Release OpenStackClient Release ================= ======================= +Antelope/2023.1 6.1.0 +Zed 6.0.0 +Yoga 5.7.0 +Xena 5.6.0 +Wallaby 5.5.0 +Victoria 5.3.0 +Ussuri 5.0.0 Train 4.0.0 Stein 3.18.0 Rocky 3.16.0 diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst index 220d13c2cd..77156c35bb 100644 --- a/releasenotes/source/unreleased.rst +++ b/releasenotes/source/unreleased.rst @@ -3,4 +3,4 @@ Current Release Notes ===================== .. release-notes:: - :earliest-version: 3.0.0 + :earliest-version: 6.1.0 diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst index 4efc7b6f3b..8ce9334198 100644 --- a/releasenotes/source/victoria.rst +++ b/releasenotes/source/victoria.rst @@ -3,4 +3,4 @@ Victoria Series Release Notes ============================= .. release-notes:: - :branch: stable/victoria + :branch: unmaintained/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst index d77b565995..bcf35c5f80 100644 --- a/releasenotes/source/wallaby.rst +++ b/releasenotes/source/wallaby.rst @@ -3,4 +3,4 @@ Wallaby Series Release Notes ============================ .. release-notes:: - :branch: stable/wallaby + :branch: unmaintained/wallaby diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst index 1be85be3eb..d19eda4886 100644 --- a/releasenotes/source/xena.rst +++ b/releasenotes/source/xena.rst @@ -3,4 +3,4 @@ Xena Series Release Notes ========================= .. release-notes:: - :branch: stable/xena + :branch: unmaintained/xena diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst index 7cd5e908a7..43cafdea89 100644 --- a/releasenotes/source/yoga.rst +++ b/releasenotes/source/yoga.rst @@ -3,4 +3,4 @@ Yoga Series Release Notes ========================= .. release-notes:: - :branch: stable/yoga + :branch: unmaintained/yoga diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst index 9608c05e45..6cc2b1554c 100644 --- a/releasenotes/source/zed.rst +++ b/releasenotes/source/zed.rst @@ -3,4 +3,4 @@ Zed Series Release Notes ======================== .. release-notes:: - :branch: stable/zed + :branch: unmaintained/zed diff --git a/requirements.txt b/requirements.txt index 1ae8cec422..fc31d78206 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,13 +4,13 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0 -cliff>=3.5.0 # Apache-2.0 +cryptography>=2.7 # BSD/Apache-2.0 +cliff>=4.13.0 # Apache-2.0 iso8601>=0.1.11 # MIT -openstacksdk>=0.103.0 # Apache-2.0 +openstacksdk>=4.6.0 # Apache-2.0 osc-lib>=2.3.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 -oslo.utils>=3.33.0 # Apache-2.0 python-keystoneclient>=3.22.0 # Apache-2.0 -python-novaclient>=18.1.0 # Apache-2.0 python-cinderclient>=3.3.0 # Apache-2.0 +requests>=2.27.0 # Apache-2.0 stevedore>=2.0.1 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg index aee5e99b83..28a5b7809f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,830 +1,2 @@ [metadata] name = python-openstackclient -summary = OpenStack Command-line Client -description_file = - README.rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/python-openstackclient/latest/ -python_requires = >=3.8 -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - -[files] -packages = - openstackclient - -[entry_points] -console_scripts = - openstack = openstackclient.shell:main - -openstack.cli = - command_list = openstackclient.common.module:ListCommand - module_list = openstackclient.common.module:ListModule - -openstack.cli.base = - compute = openstackclient.compute.client - identity = openstackclient.identity.client - image = openstackclient.image.client - network = openstackclient.network.client - object_store = openstackclient.object.client - volume = openstackclient.volume.client - -openstack.common = - availability_zone_list = openstackclient.common.availability_zone:ListAvailabilityZone - configuration_show = openstackclient.common.configuration:ShowConfiguration - extension_list = openstackclient.common.extension:ListExtension - extension_show = openstackclient.common.extension:ShowExtension - limits_show = openstackclient.common.limits:ShowLimits - project_cleanup = openstackclient.common.project_cleanup:ProjectCleanup - project_purge = openstackclient.common.project_purge:ProjectPurge - quota_list = openstackclient.common.quota:ListQuota - quota_set = openstackclient.common.quota:SetQuota - quota_show = openstackclient.common.quota:ShowQuota - quota_delete = openstackclient.common.quota:DeleteQuota - versions_show = openstackclient.common.versions:ShowVersions - -openstack.compute.v2 = - compute_agent_create = openstackclient.compute.v2.agent:CreateAgent - compute_agent_delete = openstackclient.compute.v2.agent:DeleteAgent - compute_agent_list = openstackclient.compute.v2.agent:ListAgent - compute_agent_set = openstackclient.compute.v2.agent:SetAgent - - aggregate_add_host = openstackclient.compute.v2.aggregate:AddAggregateHost - aggregate_create = openstackclient.compute.v2.aggregate:CreateAggregate - aggregate_delete = openstackclient.compute.v2.aggregate:DeleteAggregate - aggregate_list = openstackclient.compute.v2.aggregate:ListAggregate - aggregate_remove_host = openstackclient.compute.v2.aggregate:RemoveAggregateHost - aggregate_set = openstackclient.compute.v2.aggregate:SetAggregate - aggregate_show = openstackclient.compute.v2.aggregate:ShowAggregate - aggregate_unset = openstackclient.compute.v2.aggregate:UnsetAggregate - aggregate_cache_image = openstackclient.compute.v2.aggregate:CacheImageForAggregate - - compute_service_delete = openstackclient.compute.v2.service:DeleteService - compute_service_list = openstackclient.compute.v2.service:ListService - compute_service_set = openstackclient.compute.v2.service:SetService - - console_log_show = openstackclient.compute.v2.console:ShowConsoleLog - console_url_show = openstackclient.compute.v2.console:ShowConsoleURL - - flavor_create = openstackclient.compute.v2.flavor:CreateFlavor - flavor_delete = openstackclient.compute.v2.flavor:DeleteFlavor - flavor_list = openstackclient.compute.v2.flavor:ListFlavor - flavor_show = openstackclient.compute.v2.flavor:ShowFlavor - flavor_set = openstackclient.compute.v2.flavor:SetFlavor - flavor_unset = openstackclient.compute.v2.flavor:UnsetFlavor - - host_list = openstackclient.compute.v2.host:ListHost - host_set = openstackclient.compute.v2.host:SetHost - host_show = openstackclient.compute.v2.host:ShowHost - - hypervisor_list = openstackclient.compute.v2.hypervisor:ListHypervisor - hypervisor_show = openstackclient.compute.v2.hypervisor:ShowHypervisor - - hypervisor_stats_show = openstackclient.compute.v2.hypervisor_stats:ShowHypervisorStats - - keypair_create = openstackclient.compute.v2.keypair:CreateKeypair - keypair_delete = openstackclient.compute.v2.keypair:DeleteKeypair - keypair_list = openstackclient.compute.v2.keypair:ListKeypair - keypair_show = openstackclient.compute.v2.keypair:ShowKeypair - - server_add_fixed_ip = openstackclient.compute.v2.server:AddFixedIP - server_add_floating_ip = openstackclient.compute.v2.server:AddFloatingIP - server_add_port = openstackclient.compute.v2.server:AddPort - server_add_network = openstackclient.compute.v2.server:AddNetwork - server_add_security_group = openstackclient.compute.v2.server:AddServerSecurityGroup - server_add_volume = openstackclient.compute.v2.server:AddServerVolume - server_create = openstackclient.compute.v2.server:CreateServer - server_delete = openstackclient.compute.v2.server:DeleteServer - server_dump_create = openstackclient.compute.v2.server:CreateServerDump - server_evacuate = openstackclient.compute.v2.server:EvacuateServer - server_list = openstackclient.compute.v2.server:ListServer - server_lock = openstackclient.compute.v2.server:LockServer - server_migrate = openstackclient.compute.v2.server:MigrateServer - server_migrate_confirm = openstackclient.compute.v2.server:MigrateConfirm - server_migrate_revert = openstackclient.compute.v2.server:MigrateRevert - server_migration_confirm = openstackclient.compute.v2.server:ConfirmMigration - server_migration_revert = openstackclient.compute.v2.server:RevertMigration - server_pause = openstackclient.compute.v2.server:PauseServer - server_reboot = openstackclient.compute.v2.server:RebootServer - server_rebuild = openstackclient.compute.v2.server:RebuildServer - server_remove_fixed_ip = openstackclient.compute.v2.server:RemoveFixedIP - server_remove_floating_ip = openstackclient.compute.v2.server:RemoveFloatingIP - server_remove_port = openstackclient.compute.v2.server:RemovePort - server_remove_network = openstackclient.compute.v2.server:RemoveNetwork - server_remove_security_group = openstackclient.compute.v2.server:RemoveServerSecurityGroup - server_remove_volume = openstackclient.compute.v2.server:RemoveServerVolume - server_rescue = openstackclient.compute.v2.server:RescueServer - server_resize = openstackclient.compute.v2.server:ResizeServer - server_resize_confirm = openstackclient.compute.v2.server:ResizeConfirm - server_resize_revert = openstackclient.compute.v2.server:ResizeRevert - server_restore = openstackclient.compute.v2.server:RestoreServer - server_resume = openstackclient.compute.v2.server:ResumeServer - server_set = openstackclient.compute.v2.server:SetServer - server_shelve = openstackclient.compute.v2.server:ShelveServer - server_show = openstackclient.compute.v2.server:ShowServer - server_ssh = openstackclient.compute.v2.server:SshServer - server_start = openstackclient.compute.v2.server:StartServer - server_stop = openstackclient.compute.v2.server:StopServer - server_suspend = openstackclient.compute.v2.server:SuspendServer - server_unlock = openstackclient.compute.v2.server:UnlockServer - server_unpause = openstackclient.compute.v2.server:UnpauseServer - server_unrescue = openstackclient.compute.v2.server:UnrescueServer - server_unset = openstackclient.compute.v2.server:UnsetServer - server_unshelve = openstackclient.compute.v2.server:UnshelveServer - - server_backup_create = openstackclient.compute.v2.server_backup:CreateServerBackup - - server_event_list = openstackclient.compute.v2.server_event:ListServerEvent - server_event_show = openstackclient.compute.v2.server_event:ShowServerEvent - - server_group_create = openstackclient.compute.v2.server_group:CreateServerGroup - server_group_delete = openstackclient.compute.v2.server_group:DeleteServerGroup - server_group_list = openstackclient.compute.v2.server_group:ListServerGroup - server_group_show = openstackclient.compute.v2.server_group:ShowServerGroup - - server_image_create = openstackclient.compute.v2.server_image:CreateServerImage - - server_migration_abort = openstackclient.compute.v2.server_migration:AbortMigration - server_migration_force_complete = openstackclient.compute.v2.server_migration:ForceCompleteMigration - server_migration_list = openstackclient.compute.v2.server_migration:ListMigration - server_migration_show = openstackclient.compute.v2.server_migration:ShowMigration - - server_volume_list = openstackclient.compute.v2.server_volume:ListServerVolume - server_volume_set = openstackclient.compute.v2.server_volume:SetServerVolume - server_volume_update = openstackclient.compute.v2.server_volume:UpdateServerVolume - - usage_list = openstackclient.compute.v2.usage:ListUsage - usage_show = openstackclient.compute.v2.usage:ShowUsage - -openstack.identity.v2 = - catalog_list = openstackclient.identity.v2_0.catalog:ListCatalog - catalog_show = openstackclient.identity.v2_0.catalog:ShowCatalog - - ec2_credentials_create = openstackclient.identity.v2_0.ec2creds:CreateEC2Creds - ec2_credentials_delete = openstackclient.identity.v2_0.ec2creds:DeleteEC2Creds - ec2_credentials_list = openstackclient.identity.v2_0.ec2creds:ListEC2Creds - ec2_credentials_show = openstackclient.identity.v2_0.ec2creds:ShowEC2Creds - - endpoint_create = openstackclient.identity.v2_0.endpoint:CreateEndpoint - endpoint_delete = openstackclient.identity.v2_0.endpoint:DeleteEndpoint - endpoint_list = openstackclient.identity.v2_0.endpoint:ListEndpoint - endpoint_show = openstackclient.identity.v2_0.endpoint:ShowEndpoint - - project_create = openstackclient.identity.v2_0.project:CreateProject - project_delete = openstackclient.identity.v2_0.project:DeleteProject - project_list = openstackclient.identity.v2_0.project:ListProject - project_set = openstackclient.identity.v2_0.project:SetProject - project_show = openstackclient.identity.v2_0.project:ShowProject - project_unset = openstackclient.identity.v2_0.project:UnsetProject - - role_add = openstackclient.identity.v2_0.role:AddRole - role_create = openstackclient.identity.v2_0.role:CreateRole - role_delete = openstackclient.identity.v2_0.role:DeleteRole - role_list = openstackclient.identity.v2_0.role:ListRole - role_remove = openstackclient.identity.v2_0.role:RemoveRole - role_show = openstackclient.identity.v2_0.role:ShowRole - role_assignment_list = openstackclient.identity.v2_0.role_assignment:ListRoleAssignment - - service_create = openstackclient.identity.v2_0.service:CreateService - service_delete = openstackclient.identity.v2_0.service:DeleteService - service_list = openstackclient.identity.v2_0.service:ListService - service_show = openstackclient.identity.v2_0.service:ShowService - - token_issue = openstackclient.identity.v2_0.token:IssueToken - token_revoke = openstackclient.identity.v2_0.token:RevokeToken - - user_create = openstackclient.identity.v2_0.user:CreateUser - user_delete = openstackclient.identity.v2_0.user:DeleteUser - user_list = openstackclient.identity.v2_0.user:ListUser - user_set = openstackclient.identity.v2_0.user:SetUser - user_show = openstackclient.identity.v2_0.user:ShowUser - -openstack.identity.v3 = - access_token_create = openstackclient.identity.v3.token:CreateAccessToken - - access_rule_delete = openstackclient.identity.v3.access_rule:DeleteAccessRule - access_rule_list = openstackclient.identity.v3.access_rule:ListAccessRule - access_rule_show = openstackclient.identity.v3.access_rule:ShowAccessRule - - application_credential_create = openstackclient.identity.v3.application_credential:CreateApplicationCredential - application_credential_delete = openstackclient.identity.v3.application_credential:DeleteApplicationCredential - application_credential_list = openstackclient.identity.v3.application_credential:ListApplicationCredential - application_credential_show = openstackclient.identity.v3.application_credential:ShowApplicationCredential - - catalog_list = openstackclient.identity.v3.catalog:ListCatalog - catalog_show = openstackclient.identity.v3.catalog:ShowCatalog - - consumer_create = openstackclient.identity.v3.consumer:CreateConsumer - consumer_delete = openstackclient.identity.v3.consumer:DeleteConsumer - consumer_list = openstackclient.identity.v3.consumer:ListConsumer - consumer_set = openstackclient.identity.v3.consumer:SetConsumer - consumer_show = openstackclient.identity.v3.consumer:ShowConsumer - - credential_create = openstackclient.identity.v3.credential:CreateCredential - credential_delete = openstackclient.identity.v3.credential:DeleteCredential - credential_list = openstackclient.identity.v3.credential:ListCredential - credential_set = openstackclient.identity.v3.credential:SetCredential - credential_show = openstackclient.identity.v3.credential:ShowCredential - - domain_create = openstackclient.identity.v3.domain:CreateDomain - domain_delete = openstackclient.identity.v3.domain:DeleteDomain - domain_list = openstackclient.identity.v3.domain:ListDomain - domain_set = openstackclient.identity.v3.domain:SetDomain - domain_show = openstackclient.identity.v3.domain:ShowDomain - - ec2_credentials_create = openstackclient.identity.v3.ec2creds:CreateEC2Creds - ec2_credentials_delete = openstackclient.identity.v3.ec2creds:DeleteEC2Creds - ec2_credentials_list = openstackclient.identity.v3.ec2creds:ListEC2Creds - ec2_credentials_show = openstackclient.identity.v3.ec2creds:ShowEC2Creds - - endpoint_add_project = openstackclient.identity.v3.endpoint:AddProjectToEndpoint - endpoint_create = openstackclient.identity.v3.endpoint:CreateEndpoint - endpoint_delete = openstackclient.identity.v3.endpoint:DeleteEndpoint - endpoint_list = openstackclient.identity.v3.endpoint:ListEndpoint - endpoint_remove_project = openstackclient.identity.v3.endpoint:RemoveProjectFromEndpoint - endpoint_set = openstackclient.identity.v3.endpoint:SetEndpoint - endpoint_show = openstackclient.identity.v3.endpoint:ShowEndpoint - - endpoint_group_add_project = openstackclient.identity.v3.endpoint_group:AddProjectToEndpointGroup - endpoint_group_create = openstackclient.identity.v3.endpoint_group:CreateEndpointGroup - endpoint_group_delete = openstackclient.identity.v3.endpoint_group:DeleteEndpointGroup - endpoint_group_list = openstackclient.identity.v3.endpoint_group:ListEndpointGroup - endpoint_group_remove_project = openstackclient.identity.v3.endpoint_group:RemoveProjectFromEndpointGroup - endpoint_group_set = openstackclient.identity.v3.endpoint_group:SetEndpointGroup - endpoint_group_show = openstackclient.identity.v3.endpoint_group:ShowEndpointGroup - - federation_domain_list = openstackclient.identity.v3.unscoped_saml:ListAccessibleDomains - federation_project_list = openstackclient.identity.v3.unscoped_saml:ListAccessibleProjects - - federation_protocol_create = openstackclient.identity.v3.federation_protocol:CreateProtocol - federation_protocol_delete = openstackclient.identity.v3.federation_protocol:DeleteProtocol - federation_protocol_list = openstackclient.identity.v3.federation_protocol:ListProtocols - federation_protocol_set = openstackclient.identity.v3.federation_protocol:SetProtocol - federation_protocol_show = openstackclient.identity.v3.federation_protocol:ShowProtocol - - group_add_user = openstackclient.identity.v3.group:AddUserToGroup - group_contains_user = openstackclient.identity.v3.group:CheckUserInGroup - group_create = openstackclient.identity.v3.group:CreateGroup - group_delete = openstackclient.identity.v3.group:DeleteGroup - group_list = openstackclient.identity.v3.group:ListGroup - group_remove_user = openstackclient.identity.v3.group:RemoveUserFromGroup - group_set = openstackclient.identity.v3.group:SetGroup - group_show = openstackclient.identity.v3.group:ShowGroup - - identity_provider_create = openstackclient.identity.v3.identity_provider:CreateIdentityProvider - identity_provider_delete = openstackclient.identity.v3.identity_provider:DeleteIdentityProvider - identity_provider_list = openstackclient.identity.v3.identity_provider:ListIdentityProvider - identity_provider_set = openstackclient.identity.v3.identity_provider:SetIdentityProvider - identity_provider_show = openstackclient.identity.v3.identity_provider:ShowIdentityProvider - - implied_role_create = openstackclient.identity.v3.implied_role:CreateImpliedRole - implied_role_delete = openstackclient.identity.v3.implied_role:DeleteImpliedRole - implied_role_list = openstackclient.identity.v3.implied_role:ListImpliedRole - - limit_create = openstackclient.identity.v3.limit:CreateLimit - limit_delete = openstackclient.identity.v3.limit:DeleteLimit - limit_list = openstackclient.identity.v3.limit:ListLimit - limit_set = openstackclient.identity.v3.limit:SetLimit - limit_show = openstackclient.identity.v3.limit:ShowLimit - - mapping_create = openstackclient.identity.v3.mapping:CreateMapping - mapping_delete = openstackclient.identity.v3.mapping:DeleteMapping - mapping_list = openstackclient.identity.v3.mapping:ListMapping - mapping_set = openstackclient.identity.v3.mapping:SetMapping - mapping_show = openstackclient.identity.v3.mapping:ShowMapping - - policy_create = openstackclient.identity.v3.policy:CreatePolicy - policy_delete = openstackclient.identity.v3.policy:DeletePolicy - policy_list = openstackclient.identity.v3.policy:ListPolicy - policy_set = openstackclient.identity.v3.policy:SetPolicy - policy_show = openstackclient.identity.v3.policy:ShowPolicy - - project_create = openstackclient.identity.v3.project:CreateProject - project_delete = openstackclient.identity.v3.project:DeleteProject - project_list = openstackclient.identity.v3.project:ListProject - project_set = openstackclient.identity.v3.project:SetProject - project_show = openstackclient.identity.v3.project:ShowProject - - region_create = openstackclient.identity.v3.region:CreateRegion - region_delete = openstackclient.identity.v3.region:DeleteRegion - region_list = openstackclient.identity.v3.region:ListRegion - region_set = openstackclient.identity.v3.region:SetRegion - region_show = openstackclient.identity.v3.region:ShowRegion - - registered_limit_create = openstackclient.identity.v3.registered_limit:CreateRegisteredLimit - registered_limit_delete = openstackclient.identity.v3.registered_limit:DeleteRegisteredLimit - registered_limit_list = openstackclient.identity.v3.registered_limit:ListRegisteredLimit - registered_limit_set = openstackclient.identity.v3.registered_limit:SetRegisteredLimit - registered_limit_show = openstackclient.identity.v3.registered_limit:ShowRegisteredLimit - - request_token_authorize = openstackclient.identity.v3.token:AuthorizeRequestToken - request_token_create = openstackclient.identity.v3.token:CreateRequestToken - - role_add = openstackclient.identity.v3.role:AddRole - role_create = openstackclient.identity.v3.role:CreateRole - role_delete = openstackclient.identity.v3.role:DeleteRole - role_list = openstackclient.identity.v3.role:ListRole - role_remove = openstackclient.identity.v3.role:RemoveRole - role_show = openstackclient.identity.v3.role:ShowRole - role_set = openstackclient.identity.v3.role:SetRole - role_assignment_list = openstackclient.identity.v3.role_assignment:ListRoleAssignment - - service_create = openstackclient.identity.v3.service:CreateService - service_delete = openstackclient.identity.v3.service:DeleteService - service_list = openstackclient.identity.v3.service:ListService - service_show = openstackclient.identity.v3.service:ShowService - service_set = openstackclient.identity.v3.service:SetService - - service_provider_create = openstackclient.identity.v3.service_provider:CreateServiceProvider - service_provider_delete = openstackclient.identity.v3.service_provider:DeleteServiceProvider - service_provider_list = openstackclient.identity.v3.service_provider:ListServiceProvider - service_provider_set = openstackclient.identity.v3.service_provider:SetServiceProvider - service_provider_show = openstackclient.identity.v3.service_provider:ShowServiceProvider - - token_issue = openstackclient.identity.v3.token:IssueToken - token_revoke = openstackclient.identity.v3.token:RevokeToken - - trust_create = openstackclient.identity.v3.trust:CreateTrust - trust_delete = openstackclient.identity.v3.trust:DeleteTrust - trust_list = openstackclient.identity.v3.trust:ListTrust - trust_show = openstackclient.identity.v3.trust:ShowTrust - - user_create = openstackclient.identity.v3.user:CreateUser - user_delete = openstackclient.identity.v3.user:DeleteUser - user_list = openstackclient.identity.v3.user:ListUser - user_set = openstackclient.identity.v3.user:SetUser - user_password_set = openstackclient.identity.v3.user:SetPasswordUser - user_show = openstackclient.identity.v3.user:ShowUser - -openstack.image.v1 = - image_create = openstackclient.image.v1.image:CreateImage - image_delete = openstackclient.image.v1.image:DeleteImage - image_list = openstackclient.image.v1.image:ListImage - image_save = openstackclient.image.v1.image:SaveImage - image_set = openstackclient.image.v1.image:SetImage - image_show = openstackclient.image.v1.image:ShowImage - -openstack.image.v2 = - image_add_project = openstackclient.image.v2.image:AddProjectToImage - image_create = openstackclient.image.v2.image:CreateImage - image_delete = openstackclient.image.v2.image:DeleteImage - image_list = openstackclient.image.v2.image:ListImage - image_member_list = openstackclient.image.v2.image:ListImageProjects - image_remove_project = openstackclient.image.v2.image:RemoveProjectImage - image_save = openstackclient.image.v2.image:SaveImage - image_show = openstackclient.image.v2.image:ShowImage - image_set = openstackclient.image.v2.image:SetImage - image_unset = openstackclient.image.v2.image:UnsetImage - image_stage = openstackclient.image.v2.image:StageImage - image_task_show = openstackclient.image.v2.task:ShowTask - image_task_list = openstackclient.image.v2.task:ListTask - - image_metadef_namespace_create = openstackclient.image.v2.metadef_namespaces:CreateMetadefNameSpace - image_metadef_namespace_delete = openstackclient.image.v2.metadef_namespaces:DeleteMetadefNameSpace - image_metadef_namespace_list = openstackclient.image.v2.metadef_namespaces:ListMetadefNameSpaces - image_metadef_namespace_set = openstackclient.image.v2.metadef_namespaces:SetMetadefNameSpace - image_metadef_namespace_show = openstackclient.image.v2.metadef_namespaces:ShowMetadefNameSpace - -openstack.network.v2 = - address_group_create = openstackclient.network.v2.address_group:CreateAddressGroup - address_group_delete = openstackclient.network.v2.address_group:DeleteAddressGroup - address_group_list = openstackclient.network.v2.address_group:ListAddressGroup - address_group_set = openstackclient.network.v2.address_group:SetAddressGroup - address_group_show = openstackclient.network.v2.address_group:ShowAddressGroup - address_group_unset = openstackclient.network.v2.address_group:UnsetAddressGroup - - address_scope_create = openstackclient.network.v2.address_scope:CreateAddressScope - address_scope_delete = openstackclient.network.v2.address_scope:DeleteAddressScope - address_scope_list = openstackclient.network.v2.address_scope:ListAddressScope - address_scope_set = openstackclient.network.v2.address_scope:SetAddressScope - address_scope_show = openstackclient.network.v2.address_scope:ShowAddressScope - - floating_ip_create = openstackclient.network.v2.floating_ip:CreateFloatingIP - floating_ip_delete = openstackclient.network.v2.floating_ip:DeleteFloatingIP - floating_ip_list = openstackclient.network.v2.floating_ip:ListFloatingIP - floating_ip_set = openstackclient.network.v2.floating_ip:SetFloatingIP - floating_ip_show = openstackclient.network.v2.floating_ip:ShowFloatingIP - floating_ip_unset = openstackclient.network.v2.floating_ip:UnsetFloatingIP - - floating_ip_pool_list = openstackclient.network.v2.floating_ip_pool:ListFloatingIPPool - - floating_ip_port_forwarding_create = openstackclient.network.v2.floating_ip_port_forwarding:CreateFloatingIPPortForwarding - floating_ip_port_forwarding_delete = openstackclient.network.v2.floating_ip_port_forwarding:DeleteFloatingIPPortForwarding - floating_ip_port_forwarding_list = openstackclient.network.v2.floating_ip_port_forwarding:ListFloatingIPPortForwarding - floating_ip_port_forwarding_set = openstackclient.network.v2.floating_ip_port_forwarding:SetFloatingIPPortForwarding - floating_ip_port_forwarding_show = openstackclient.network.v2.floating_ip_port_forwarding:ShowFloatingIPPortForwarding - - ip_availability_list = openstackclient.network.v2.ip_availability:ListIPAvailability - ip_availability_show = openstackclient.network.v2.ip_availability:ShowIPAvailability - - local_ip_create = openstackclient.network.v2.local_ip:CreateLocalIP - local_ip_delete = openstackclient.network.v2.local_ip:DeleteLocalIP - local_ip_list = openstackclient.network.v2.local_ip:ListLocalIP - local_ip_set = openstackclient.network.v2.local_ip:SetLocalIP - local_ip_show = openstackclient.network.v2.local_ip:ShowLocalIP - - local_ip_association_create = openstackclient.network.v2.local_ip_association:CreateLocalIPAssociation - local_ip_association_delete = openstackclient.network.v2.local_ip_association:DeleteLocalIPAssociation - local_ip_association_list = openstackclient.network.v2.local_ip_association:ListLocalIPAssociation - - network_agent_add_network = openstackclient.network.v2.network_agent:AddNetworkToAgent - network_agent_add_router = openstackclient.network.v2.network_agent:AddRouterToAgent - network_agent_delete = openstackclient.network.v2.network_agent:DeleteNetworkAgent - network_agent_list = openstackclient.network.v2.network_agent:ListNetworkAgent - network_agent_remove_network = openstackclient.network.v2.network_agent:RemoveNetworkFromAgent - network_agent_remove_router = openstackclient.network.v2.network_agent:RemoveRouterFromAgent - network_agent_set = openstackclient.network.v2.network_agent:SetNetworkAgent - network_agent_show = openstackclient.network.v2.network_agent:ShowNetworkAgent - - network_auto_allocated_topology_create = openstackclient.network.v2.network_auto_allocated_topology:CreateAutoAllocatedTopology - network_auto_allocated_topology_delete = openstackclient.network.v2.network_auto_allocated_topology:DeleteAutoAllocatedTopology - - network_flavor_add_profile = openstackclient.network.v2.network_flavor:AddNetworkFlavorToProfile - network_flavor_create = openstackclient.network.v2.network_flavor:CreateNetworkFlavor - network_flavor_delete = openstackclient.network.v2.network_flavor:DeleteNetworkFlavor - network_flavor_list = openstackclient.network.v2.network_flavor:ListNetworkFlavor - network_flavor_remove_profile = openstackclient.network.v2.network_flavor:RemoveNetworkFlavorFromProfile - network_flavor_set = openstackclient.network.v2.network_flavor:SetNetworkFlavor - network_flavor_show = openstackclient.network.v2.network_flavor:ShowNetworkFlavor - - network_flavor_profile_create = openstackclient.network.v2.network_flavor_profile:CreateNetworkFlavorProfile - network_flavor_profile_delete = openstackclient.network.v2.network_flavor_profile:DeleteNetworkFlavorProfile - network_flavor_profile_list = openstackclient.network.v2.network_flavor_profile:ListNetworkFlavorProfile - network_flavor_profile_set = openstackclient.network.v2.network_flavor_profile:SetNetworkFlavorProfile - network_flavor_profile_show = openstackclient.network.v2.network_flavor_profile:ShowNetworkFlavorProfile - - network_create = openstackclient.network.v2.network:CreateNetwork - network_delete = openstackclient.network.v2.network:DeleteNetwork - network_list = openstackclient.network.v2.network:ListNetwork - network_set = openstackclient.network.v2.network:SetNetwork - network_show = openstackclient.network.v2.network:ShowNetwork - network_unset = openstackclient.network.v2.network:UnsetNetwork - - network_l3_conntrack_helper_create = openstackclient.network.v2.l3_conntrack_helper:CreateConntrackHelper - network_l3_conntrack_helper_delete = openstackclient.network.v2.l3_conntrack_helper:DeleteConntrackHelper - network_l3_conntrack_helper_list = openstackclient.network.v2.l3_conntrack_helper:ListConntrackHelper - network_l3_conntrack_helper_set = openstackclient.network.v2.l3_conntrack_helper:SetConntrackHelper - network_l3_conntrack_helper_show = openstackclient.network.v2.l3_conntrack_helper:ShowConntrackHelper - - network_meter_create = openstackclient.network.v2.network_meter:CreateMeter - network_meter_delete = openstackclient.network.v2.network_meter:DeleteMeter - network_meter_list = openstackclient.network.v2.network_meter:ListMeter - network_meter_show = openstackclient.network.v2.network_meter:ShowMeter - - network_meter_rule_create = openstackclient.network.v2.network_meter_rule:CreateMeterRule - network_meter_rule_delete = openstackclient.network.v2.network_meter_rule:DeleteMeterRule - network_meter_rule_list = openstackclient.network.v2.network_meter_rule:ListMeterRule - network_meter_rule_show = openstackclient.network.v2.network_meter_rule:ShowMeterRule - - network_qos_policy_create = openstackclient.network.v2.network_qos_policy:CreateNetworkQosPolicy - network_qos_policy_delete = openstackclient.network.v2.network_qos_policy:DeleteNetworkQosPolicy - network_qos_policy_list = openstackclient.network.v2.network_qos_policy:ListNetworkQosPolicy - network_qos_policy_set = openstackclient.network.v2.network_qos_policy:SetNetworkQosPolicy - network_qos_policy_show = openstackclient.network.v2.network_qos_policy:ShowNetworkQosPolicy - - network_qos_rule_create = openstackclient.network.v2.network_qos_rule:CreateNetworkQosRule - network_qos_rule_delete = openstackclient.network.v2.network_qos_rule:DeleteNetworkQosRule - network_qos_rule_list = openstackclient.network.v2.network_qos_rule:ListNetworkQosRule - network_qos_rule_set = openstackclient.network.v2.network_qos_rule:SetNetworkQosRule - network_qos_rule_show = openstackclient.network.v2.network_qos_rule:ShowNetworkQosRule - - network_qos_rule_type_list = openstackclient.network.v2.network_qos_rule_type:ListNetworkQosRuleType - network_qos_rule_type_show = openstackclient.network.v2.network_qos_rule_type:ShowNetworkQosRuleType - - network_rbac_create = openstackclient.network.v2.network_rbac:CreateNetworkRBAC - network_rbac_delete = openstackclient.network.v2.network_rbac:DeleteNetworkRBAC - network_rbac_list = openstackclient.network.v2.network_rbac:ListNetworkRBAC - network_rbac_set = openstackclient.network.v2.network_rbac:SetNetworkRBAC - network_rbac_show = openstackclient.network.v2.network_rbac:ShowNetworkRBAC - - network_segment_create = openstackclient.network.v2.network_segment:CreateNetworkSegment - network_segment_delete = openstackclient.network.v2.network_segment:DeleteNetworkSegment - network_segment_list = openstackclient.network.v2.network_segment:ListNetworkSegment - network_segment_set = openstackclient.network.v2.network_segment:SetNetworkSegment - network_segment_show = openstackclient.network.v2.network_segment:ShowNetworkSegment - - network_segment_range_create = openstackclient.network.v2.network_segment_range:CreateNetworkSegmentRange - network_segment_range_delete = openstackclient.network.v2.network_segment_range:DeleteNetworkSegmentRange - network_segment_range_list = openstackclient.network.v2.network_segment_range:ListNetworkSegmentRange - network_segment_range_set = openstackclient.network.v2.network_segment_range:SetNetworkSegmentRange - network_segment_range_show = openstackclient.network.v2.network_segment_range:ShowNetworkSegmentRange - - network_service_provider_list = openstackclient.network.v2.network_service_provider:ListNetworkServiceProvider - - network_subport_list = openstackclient.network.v2.network_trunk:ListNetworkSubport - network_trunk_create = openstackclient.network.v2.network_trunk:CreateNetworkTrunk - network_trunk_delete = openstackclient.network.v2.network_trunk:DeleteNetworkTrunk - network_trunk_list = openstackclient.network.v2.network_trunk:ListNetworkTrunk - network_trunk_set = openstackclient.network.v2.network_trunk:SetNetworkTrunk - network_trunk_show = openstackclient.network.v2.network_trunk:ShowNetworkTrunk - network_trunk_unset = openstackclient.network.v2.network_trunk:UnsetNetworkTrunk - - port_create = openstackclient.network.v2.port:CreatePort - port_delete = openstackclient.network.v2.port:DeletePort - port_list = openstackclient.network.v2.port:ListPort - port_set = openstackclient.network.v2.port:SetPort - port_show = openstackclient.network.v2.port:ShowPort - port_unset = openstackclient.network.v2.port:UnsetPort - - router_add_port = openstackclient.network.v2.router:AddPortToRouter - router_add_route = openstackclient.network.v2.router:AddExtraRoutesToRouter - router_add_subnet = openstackclient.network.v2.router:AddSubnetToRouter - router_create = openstackclient.network.v2.router:CreateRouter - router_delete = openstackclient.network.v2.router:DeleteRouter - router_list = openstackclient.network.v2.router:ListRouter - router_remove_port = openstackclient.network.v2.router:RemovePortFromRouter - router_remove_route = openstackclient.network.v2.router:RemoveExtraRoutesFromRouter - router_remove_subnet = openstackclient.network.v2.router:RemoveSubnetFromRouter - router_set = openstackclient.network.v2.router:SetRouter - router_show = openstackclient.network.v2.router:ShowRouter - router_unset = openstackclient.network.v2.router:UnsetRouter - - router_ndp_proxy_create = openstackclient.network.v2.ndp_proxy:CreateNDPProxy - router_ndp_proxy_delete = openstackclient.network.v2.ndp_proxy:DeleteNDPProxy - router_ndp_proxy_list = openstackclient.network.v2.ndp_proxy:ListNDPProxy - router_ndp_proxy_set = openstackclient.network.v2.ndp_proxy:SetNDPProxy - router_ndp_proxy_show = openstackclient.network.v2.ndp_proxy:ShowNDPProxy - - security_group_create = openstackclient.network.v2.security_group:CreateSecurityGroup - security_group_delete = openstackclient.network.v2.security_group:DeleteSecurityGroup - security_group_list = openstackclient.network.v2.security_group:ListSecurityGroup - security_group_set = openstackclient.network.v2.security_group:SetSecurityGroup - security_group_show = openstackclient.network.v2.security_group:ShowSecurityGroup - security_group_unset = openstackclient.network.v2.security_group:UnsetSecurityGroup - - security_group_rule_create = openstackclient.network.v2.security_group_rule:CreateSecurityGroupRule - security_group_rule_delete = openstackclient.network.v2.security_group_rule:DeleteSecurityGroupRule - security_group_rule_list = openstackclient.network.v2.security_group_rule:ListSecurityGroupRule - security_group_rule_show = openstackclient.network.v2.security_group_rule:ShowSecurityGroupRule - - subnet_create = openstackclient.network.v2.subnet:CreateSubnet - subnet_delete = openstackclient.network.v2.subnet:DeleteSubnet - subnet_list = openstackclient.network.v2.subnet:ListSubnet - subnet_set = openstackclient.network.v2.subnet:SetSubnet - subnet_show = openstackclient.network.v2.subnet:ShowSubnet - subnet_unset = openstackclient.network.v2.subnet:UnsetSubnet - - subnet_pool_create = openstackclient.network.v2.subnet_pool:CreateSubnetPool - subnet_pool_delete = openstackclient.network.v2.subnet_pool:DeleteSubnetPool - subnet_pool_list = openstackclient.network.v2.subnet_pool:ListSubnetPool - subnet_pool_set = openstackclient.network.v2.subnet_pool:SetSubnetPool - subnet_pool_show = openstackclient.network.v2.subnet_pool:ShowSubnetPool - subnet_pool_unset = openstackclient.network.v2.subnet_pool:UnsetSubnetPool - -openstack.object_store.v1 = - object_store_account_set = openstackclient.object.v1.account:SetAccount - object_store_account_show = openstackclient.object.v1.account:ShowAccount - object_store_account_unset = openstackclient.object.v1.account:UnsetAccount - container_create = openstackclient.object.v1.container:CreateContainer - container_delete = openstackclient.object.v1.container:DeleteContainer - container_list = openstackclient.object.v1.container:ListContainer - container_save = openstackclient.object.v1.container:SaveContainer - container_set = openstackclient.object.v1.container:SetContainer - container_show = openstackclient.object.v1.container:ShowContainer - container_unset = openstackclient.object.v1.container:UnsetContainer - object_create = openstackclient.object.v1.object:CreateObject - object_delete = openstackclient.object.v1.object:DeleteObject - object_list = openstackclient.object.v1.object:ListObject - object_save = openstackclient.object.v1.object:SaveObject - object_set = openstackclient.object.v1.object:SetObject - object_show = openstackclient.object.v1.object:ShowObject - object_unset = openstackclient.object.v1.object:UnsetObject - -openstack.volume.v1 = - volume_create = openstackclient.volume.v1.volume:CreateVolume - volume_delete = openstackclient.volume.v1.volume:DeleteVolume - volume_list = openstackclient.volume.v1.volume:ListVolume - volume_migrate = openstackclient.volume.v1.volume:MigrateVolume - volume_set = openstackclient.volume.v1.volume:SetVolume - volume_show = openstackclient.volume.v1.volume:ShowVolume - volume_unset = openstackclient.volume.v1.volume:UnsetVolume - - volume_backup_create = openstackclient.volume.v1.volume_backup:CreateVolumeBackup - volume_backup_delete = openstackclient.volume.v1.volume_backup:DeleteVolumeBackup - volume_backup_list = openstackclient.volume.v1.volume_backup:ListVolumeBackup - volume_backup_restore = openstackclient.volume.v1.volume_backup:RestoreVolumeBackup - volume_backup_show = openstackclient.volume.v1.volume_backup:ShowVolumeBackup - - volume_snapshot_create = openstackclient.volume.v1.volume_snapshot:CreateVolumeSnapshot - volume_snapshot_delete = openstackclient.volume.v1.volume_snapshot:DeleteVolumeSnapshot - volume_snapshot_list = openstackclient.volume.v1.volume_snapshot:ListVolumeSnapshot - volume_snapshot_set = openstackclient.volume.v1.volume_snapshot:SetVolumeSnapshot - volume_snapshot_show = openstackclient.volume.v1.volume_snapshot:ShowVolumeSnapshot - volume_snapshot_unset = openstackclient.volume.v1.volume_snapshot:UnsetVolumeSnapshot - - volume_type_create = openstackclient.volume.v1.volume_type:CreateVolumeType - volume_type_delete = openstackclient.volume.v1.volume_type:DeleteVolumeType - volume_type_list = openstackclient.volume.v1.volume_type:ListVolumeType - volume_type_set = openstackclient.volume.v1.volume_type:SetVolumeType - volume_type_show = openstackclient.volume.v1.volume_type:ShowVolumeType - volume_type_unset = openstackclient.volume.v1.volume_type:UnsetVolumeType - - volume_qos_associate = openstackclient.volume.v1.qos_specs:AssociateQos - volume_qos_create = openstackclient.volume.v1.qos_specs:CreateQos - volume_qos_delete = openstackclient.volume.v1.qos_specs:DeleteQos - volume_qos_disassociate = openstackclient.volume.v1.qos_specs:DisassociateQos - volume_qos_list = openstackclient.volume.v1.qos_specs:ListQos - volume_qos_set = openstackclient.volume.v1.qos_specs:SetQos - volume_qos_show = openstackclient.volume.v1.qos_specs:ShowQos - volume_qos_unset = openstackclient.volume.v1.qos_specs:UnsetQos - - volume_service_list = openstackclient.volume.v1.service:ListService - volume_service_set = openstackclient.volume.v1.service:SetService - - volume_transfer_request_accept = openstackclient.volume.v1.volume_transfer_request:AcceptTransferRequest - volume_transfer_request_create = openstackclient.volume.v1.volume_transfer_request:CreateTransferRequest - volume_transfer_request_delete = openstackclient.volume.v1.volume_transfer_request:DeleteTransferRequest - volume_transfer_request_list = openstackclient.volume.v1.volume_transfer_request:ListTransferRequest - volume_transfer_request_show = openstackclient.volume.v1.volume_transfer_request:ShowTransferRequest - -openstack.volume.v2 = - consistency_group_add_volume = openstackclient.volume.v2.consistency_group:AddVolumeToConsistencyGroup - consistency_group_create = openstackclient.volume.v2.consistency_group:CreateConsistencyGroup - consistency_group_delete = openstackclient.volume.v2.consistency_group:DeleteConsistencyGroup - consistency_group_list = openstackclient.volume.v2.consistency_group:ListConsistencyGroup - consistency_group_remove_volume = openstackclient.volume.v2.consistency_group:RemoveVolumeFromConsistencyGroup - consistency_group_set = openstackclient.volume.v2.consistency_group:SetConsistencyGroup - consistency_group_show = openstackclient.volume.v2.consistency_group:ShowConsistencyGroup - - consistency_group_snapshot_create = openstackclient.volume.v2.consistency_group_snapshot:CreateConsistencyGroupSnapshot - consistency_group_snapshot_delete = openstackclient.volume.v2.consistency_group_snapshot:DeleteConsistencyGroupSnapshot - consistency_group_snapshot_list = openstackclient.volume.v2.consistency_group_snapshot:ListConsistencyGroupSnapshot - consistency_group_snapshot_show = openstackclient.volume.v2.consistency_group_snapshot:ShowConsistencyGroupSnapshot - - volume_create = openstackclient.volume.v2.volume:CreateVolume - volume_delete = openstackclient.volume.v2.volume:DeleteVolume - volume_list = openstackclient.volume.v2.volume:ListVolume - volume_migrate = openstackclient.volume.v2.volume:MigrateVolume - volume_set = openstackclient.volume.v2.volume:SetVolume - volume_show = openstackclient.volume.v2.volume:ShowVolume - volume_unset = openstackclient.volume.v2.volume:UnsetVolume - - volume_backup_create = openstackclient.volume.v2.volume_backup:CreateVolumeBackup - volume_backup_delete = openstackclient.volume.v2.volume_backup:DeleteVolumeBackup - volume_backup_list = openstackclient.volume.v2.volume_backup:ListVolumeBackup - volume_backup_restore = openstackclient.volume.v2.volume_backup:RestoreVolumeBackup - volume_backup_set = openstackclient.volume.v2.volume_backup:SetVolumeBackup - volume_backup_show = openstackclient.volume.v2.volume_backup:ShowVolumeBackup - - volume_backup_record_export = openstackclient.volume.v2.backup_record:ExportBackupRecord - volume_backup_record_import = openstackclient.volume.v2.backup_record:ImportBackupRecord - - volume_backend_capability_show = openstackclient.volume.v2.volume_backend:ShowCapability - volume_backend_pool_list = openstackclient.volume.v2.volume_backend:ListPool - - volume_host_failover = openstackclient.volume.v2.volume_host:FailoverVolumeHost - volume_host_set = openstackclient.volume.v2.volume_host:SetVolumeHost - - volume_snapshot_create = openstackclient.volume.v2.volume_snapshot:CreateVolumeSnapshot - volume_snapshot_delete = openstackclient.volume.v2.volume_snapshot:DeleteVolumeSnapshot - volume_snapshot_list = openstackclient.volume.v2.volume_snapshot:ListVolumeSnapshot - volume_snapshot_set = openstackclient.volume.v2.volume_snapshot:SetVolumeSnapshot - volume_snapshot_show = openstackclient.volume.v2.volume_snapshot:ShowVolumeSnapshot - volume_snapshot_unset = openstackclient.volume.v2.volume_snapshot:UnsetVolumeSnapshot - - volume_type_create = openstackclient.volume.v2.volume_type:CreateVolumeType - volume_type_delete = openstackclient.volume.v2.volume_type:DeleteVolumeType - volume_type_list = openstackclient.volume.v2.volume_type:ListVolumeType - volume_type_set = openstackclient.volume.v2.volume_type:SetVolumeType - volume_type_show = openstackclient.volume.v2.volume_type:ShowVolumeType - volume_type_unset = openstackclient.volume.v2.volume_type:UnsetVolumeType - - volume_qos_associate = openstackclient.volume.v2.qos_specs:AssociateQos - volume_qos_create = openstackclient.volume.v2.qos_specs:CreateQos - volume_qos_delete = openstackclient.volume.v2.qos_specs:DeleteQos - volume_qos_disassociate = openstackclient.volume.v2.qos_specs:DisassociateQos - volume_qos_list = openstackclient.volume.v2.qos_specs:ListQos - volume_qos_set = openstackclient.volume.v2.qos_specs:SetQos - volume_qos_show = openstackclient.volume.v2.qos_specs:ShowQos - volume_qos_unset = openstackclient.volume.v2.qos_specs:UnsetQos - - volume_service_list = openstackclient.volume.v2.service:ListService - volume_service_set = openstackclient.volume.v2.service:SetService - - volume_transfer_request_accept = openstackclient.volume.v2.volume_transfer_request:AcceptTransferRequest - volume_transfer_request_create = openstackclient.volume.v2.volume_transfer_request:CreateTransferRequest - volume_transfer_request_delete = openstackclient.volume.v2.volume_transfer_request:DeleteTransferRequest - volume_transfer_request_list = openstackclient.volume.v2.volume_transfer_request:ListTransferRequest - volume_transfer_request_show = openstackclient.volume.v2.volume_transfer_request:ShowTransferRequest - -openstack.volume.v3 = - consistency_group_add_volume = openstackclient.volume.v2.consistency_group:AddVolumeToConsistencyGroup - consistency_group_create = openstackclient.volume.v2.consistency_group:CreateConsistencyGroup - consistency_group_delete = openstackclient.volume.v2.consistency_group:DeleteConsistencyGroup - consistency_group_list = openstackclient.volume.v2.consistency_group:ListConsistencyGroup - consistency_group_remove_volume = openstackclient.volume.v2.consistency_group:RemoveVolumeFromConsistencyGroup - consistency_group_set = openstackclient.volume.v2.consistency_group:SetConsistencyGroup - consistency_group_show = openstackclient.volume.v2.consistency_group:ShowConsistencyGroup - - consistency_group_snapshot_create = openstackclient.volume.v2.consistency_group_snapshot:CreateConsistencyGroupSnapshot - consistency_group_snapshot_delete = openstackclient.volume.v2.consistency_group_snapshot:DeleteConsistencyGroupSnapshot - consistency_group_snapshot_list = openstackclient.volume.v2.consistency_group_snapshot:ListConsistencyGroupSnapshot - consistency_group_snapshot_show = openstackclient.volume.v2.consistency_group_snapshot:ShowConsistencyGroupSnapshot - - volume_create = openstackclient.volume.v2.volume:CreateVolume - volume_delete = openstackclient.volume.v2.volume:DeleteVolume - volume_list = openstackclient.volume.v2.volume:ListVolume - volume_migrate = openstackclient.volume.v2.volume:MigrateVolume - volume_set = openstackclient.volume.v2.volume:SetVolume - volume_show = openstackclient.volume.v2.volume:ShowVolume - volume_unset = openstackclient.volume.v2.volume:UnsetVolume - - volume_attachment_create = openstackclient.volume.v3.volume_attachment:CreateVolumeAttachment - volume_attachment_delete = openstackclient.volume.v3.volume_attachment:DeleteVolumeAttachment - volume_attachment_list = openstackclient.volume.v3.volume_attachment:ListVolumeAttachment - volume_attachment_complete = openstackclient.volume.v3.volume_attachment:CompleteVolumeAttachment - volume_attachment_set = openstackclient.volume.v3.volume_attachment:SetVolumeAttachment - volume_attachment_show = openstackclient.volume.v3.volume_attachment:ShowVolumeAttachment - - volume_backup_create = openstackclient.volume.v2.volume_backup:CreateVolumeBackup - volume_backup_delete = openstackclient.volume.v2.volume_backup:DeleteVolumeBackup - volume_backup_list = openstackclient.volume.v2.volume_backup:ListVolumeBackup - volume_backup_restore = openstackclient.volume.v2.volume_backup:RestoreVolumeBackup - volume_backup_set = openstackclient.volume.v2.volume_backup:SetVolumeBackup - volume_backup_unset = openstackclient.volume.v2.volume_backup:UnsetVolumeBackup - volume_backup_show = openstackclient.volume.v2.volume_backup:ShowVolumeBackup - - volume_backend_capability_show = openstackclient.volume.v2.volume_backend:ShowCapability - volume_backend_pool_list = openstackclient.volume.v2.volume_backend:ListPool - - volume_backup_record_export = openstackclient.volume.v2.backup_record:ExportBackupRecord - volume_backup_record_import = openstackclient.volume.v2.backup_record:ImportBackupRecord - - volume_group_create = openstackclient.volume.v3.volume_group:CreateVolumeGroup - volume_group_delete = openstackclient.volume.v3.volume_group:DeleteVolumeGroup - volume_group_list = openstackclient.volume.v3.volume_group:ListVolumeGroup - volume_group_failover = openstackclient.volume.v3.volume_group:FailoverVolumeGroup - volume_group_set = openstackclient.volume.v3.volume_group:SetVolumeGroup - volume_group_show = openstackclient.volume.v3.volume_group:ShowVolumeGroup - - volume_group_snapshot_create = openstackclient.volume.v3.volume_group_snapshot:CreateVolumeGroupSnapshot - volume_group_snapshot_delete = openstackclient.volume.v3.volume_group_snapshot:DeleteVolumeGroupSnapshot - volume_group_snapshot_list = openstackclient.volume.v3.volume_group_snapshot:ListVolumeGroupSnapshot - volume_group_snapshot_show = openstackclient.volume.v3.volume_group_snapshot:ShowVolumeGroupSnapshot - - volume_group_type_create = openstackclient.volume.v3.volume_group_type:CreateVolumeGroupType - volume_group_type_delete = openstackclient.volume.v3.volume_group_type:DeleteVolumeGroupType - volume_group_type_list = openstackclient.volume.v3.volume_group_type:ListVolumeGroupType - volume_group_type_set = openstackclient.volume.v3.volume_group_type:SetVolumeGroupType - volume_group_type_show = openstackclient.volume.v3.volume_group_type:ShowVolumeGroupType - - volume_host_set = openstackclient.volume.v2.volume_host:SetVolumeHost - - volume_message_delete = openstackclient.volume.v3.volume_message:DeleteMessage - volume_message_list = openstackclient.volume.v3.volume_message:ListMessages - volume_message_show = openstackclient.volume.v3.volume_message:ShowMessage - - block_storage_cluster_list = openstackclient.volume.v3.block_storage_cluster:ListBlockStorageCluster - block_storage_cluster_set = openstackclient.volume.v3.block_storage_cluster:SetBlockStorageCluster - block_storage_cluster_show = openstackclient.volume.v3.block_storage_cluster:ShowBlockStorageCluster - block_storage_resource_filter_list = openstackclient.volume.v3.block_storage_resource_filter:ListBlockStorageResourceFilter - block_storage_resource_filter_show = openstackclient.volume.v3.block_storage_resource_filter:ShowBlockStorageResourceFilter - - volume_snapshot_create = openstackclient.volume.v2.volume_snapshot:CreateVolumeSnapshot - volume_snapshot_delete = openstackclient.volume.v2.volume_snapshot:DeleteVolumeSnapshot - volume_snapshot_list = openstackclient.volume.v2.volume_snapshot:ListVolumeSnapshot - volume_snapshot_set = openstackclient.volume.v2.volume_snapshot:SetVolumeSnapshot - volume_snapshot_show = openstackclient.volume.v2.volume_snapshot:ShowVolumeSnapshot - volume_snapshot_unset = openstackclient.volume.v2.volume_snapshot:UnsetVolumeSnapshot - - volume_type_create = openstackclient.volume.v2.volume_type:CreateVolumeType - volume_type_delete = openstackclient.volume.v2.volume_type:DeleteVolumeType - volume_type_list = openstackclient.volume.v2.volume_type:ListVolumeType - volume_type_set = openstackclient.volume.v2.volume_type:SetVolumeType - volume_type_show = openstackclient.volume.v2.volume_type:ShowVolumeType - volume_type_unset = openstackclient.volume.v2.volume_type:UnsetVolumeType - - volume_qos_associate = openstackclient.volume.v2.qos_specs:AssociateQos - volume_qos_create = openstackclient.volume.v2.qos_specs:CreateQos - volume_qos_delete = openstackclient.volume.v2.qos_specs:DeleteQos - volume_qos_disassociate = openstackclient.volume.v2.qos_specs:DisassociateQos - volume_qos_list = openstackclient.volume.v2.qos_specs:ListQos - volume_qos_set = openstackclient.volume.v2.qos_specs:SetQos - volume_qos_show = openstackclient.volume.v2.qos_specs:ShowQos - volume_qos_unset = openstackclient.volume.v2.qos_specs:UnsetQos - - volume_service_list = openstackclient.volume.v2.service:ListService - volume_service_set = openstackclient.volume.v2.service:SetService - - volume_transfer_request_accept = openstackclient.volume.v2.volume_transfer_request:AcceptTransferRequest - volume_transfer_request_create = openstackclient.volume.v2.volume_transfer_request:CreateTransferRequest - volume_transfer_request_delete = openstackclient.volume.v2.volume_transfer_request:DeleteTransferRequest - volume_transfer_request_list = openstackclient.volume.v2.volume_transfer_request:ListTransferRequest - volume_transfer_request_show = openstackclient.volume.v2.volume_transfer_request:ShowTransferRequest - - volume_summary = openstackclient.volume.v3.volume:VolumeSummary - volume_revert = openstackclient.volume.v3.volume:VolumeRevertToSnapshot - block_storage_log_level_list = openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelList - block_storage_log_level_set = openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelSet - block_storage_cleanup = openstackclient.volume.v3.block_storage_cleanup:BlockStorageCleanup - block_storage_volume_manageable_list = openstackclient.volume.v3.block_storage_manage:BlockStorageManageVolumes - block_storage_snapshot_manageable_list = openstackclient.volume.v3.block_storage_manage:BlockStorageManageSnapshots diff --git a/setup.py b/setup.py index cd35c3c35b..481505b030 100644 --- a/setup.py +++ b/setup.py @@ -15,6 +15,4 @@ import setuptools -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) +setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt index 275c2ff114..c9c1b28ccd 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,7 +1,5 @@ coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD -oslotest>=3.2.0 # Apache-2.0 -requests>=2.14.2 # Apache-2.0 requests-mock>=1.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 testtools>=2.2.0 # MIT diff --git a/tools/fast8.sh b/tools/fast8.sh deleted file mode 100755 index 2b3e22abda..0000000000 --- a/tools/fast8.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -cd $(dirname "$0")/.. -CHANGED=$(git diff --name-only HEAD~1 | tr '\n' ' ') - -# Skip files that don't exist -# (have been git rm'd) -CHECK="" -for FILE in $CHANGED; do - if [ -f "$FILE" ]; then - CHECK="$CHECK $FILE" - fi -done - -diff -u --from-file /dev/null $CHECK | flake8 --diff diff --git a/tox.ini b/tox.ini index 3de7dd3804..1988ec8236 100644 --- a/tox.ini +++ b/tox.ini @@ -1,15 +1,11 @@ [tox] -minversion = 3.18.0 +minversion = 4.3.0 envlist = py3,pep8 -#skipsdist = True -# Automatic envs (pyXX) will only use the python version appropriate to that -# env and ignore basepython inherited from [testenv] if we set -# ignore_basepython_conflict. -ignore_basepython_conflict = True [testenv] -usedevelop = True -basepython = python3 +description = + Run unit tests. +usedevelop = true setenv = OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 @@ -18,130 +14,116 @@ deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt -commands = stestr run {posargs} -allowlist_externals = stestr - -[testenv:fast8] -# Use same environment directory as pep8 env to save space and install time -setenv = - VIRTUAL_ENV={envdir} -envdir = {toxworkdir}/pep8 commands = - {toxinidir}/tools/fast8.sh + stestr run {posargs} [testenv:pep8] +description = + Run style checks. +skip_install = true deps = - hacking>=2.0.0 - bandit!=1.6.0,>=1.1.0 - flake8-import-order>=0.13 # LGPLv3 + pre-commit commands = - flake8 - bandit -r openstackclient -x tests -s B105,B106,B107,B401,B404,B603,B606,B607,B110,B605,B101 + pre-commit run --all-files --show-diff-on-failure [testenv:bandit] -# This command runs the bandit security linter against the openstackclient -# codebase minus the tests directory. Some tests are being excluded to -# reduce the number of positives before a team inspection, and to ensure a -# passing gate job for initial addition. The excluded tests are: -# B105-B107: hardcoded password checks - likely to generate false positives -# in a gate environment -# B401: import subprocess - not necessarily a security issue; this plugin is -# mainly used for penetration testing workflow -# B603,B606: process without shell - not necessarily a security issue; this -# plugin is mainly used for penetration testing workflow -# B607: start process with a partial path - this should be a project level -# decision -# NOTE(elmiko): The following tests are being excluded specifically for -# python-openstackclient, they are being excluded to ensure that voting jobs -# in the project and in bandit integration tests continue to pass. These -# tests have generated issue within the project and should be investigated -# by the project. -# B110: try, except, pass detected - possible security issue; this should be -# investigated by the project for possible exploitation -# B605: process with a shell - possible security issue; this should be -# investigated by the project for possible exploitation -# B101: use of assert - this code will be removed when compiling to optimized -# byte code +description = + Run bandit security checks. +skip_install = true +deps = + pre-commit commands = - bandit -r openstackclient -x tests -s B105,B106,B107,B401,B404,B603,B606,B607,B110,B605,B101 + pre-commit run --all-files --show-diff-on-failure bandit [testenv:unit-tips] commands = - python -m pip install -q -U -e "git+file://{toxinidir}/../cliff#egg=cliff" - python -m pip install -q -U -e "git+file://{toxinidir}/../keystoneauth#egg=keystoneauth" - python -m pip install -q -U -e "git+file://{toxinidir}/../osc-lib#egg=osc_lib" - pythom -m pip install -q -e "git+file://{toxinidir}/../openstacksdk#egg=openstacksdk" + python -m pip install -q -U -e {toxinidir}/../cliff#egg=cliff + python -m pip install -q -U -e {toxinidir}/../keystoneauth#egg=keystoneauth + python -m pip install -q -U -e {toxinidir}/../osc-lib#egg=osc_lib + python -m pip install -q -U -e {toxinidir}/../openstacksdk#egg=openstacksdk python -m pip freeze stestr run {posargs} -allowlist_externals = stestr - -[testenv:functional] -setenv = - OS_TEST_PATH=./openstackclient/tests/functional -passenv = - OS_* -commands = - stestr run {posargs} -[testenv:functional-tips] +[testenv:functional{,-tips,-py310,-py311,-py312,-py313,-py314}] +description = + Run functional tests. setenv = OS_TEST_PATH=./openstackclient/tests/functional passenv = OS_* commands = - python -m pip install -q -U -e "git+file://{toxinidir}/../cliff#egg=cliff" - python -m pip install -q -U -e "git+file://{toxinidir}/../keystoneauth#egg=keystoneauth1" - python -m pip install -q -U -e "git+file://{toxinidir}/../osc-lib#egg=osc_lib" - python -m pip install -q -U -e "git+file://{toxinidir}/../openstacksdk#egg=openstacksdk" - python -m pip freeze - stestr run {posargs} + tips: python -m pip install -q -U -e {toxinidir}/../cliff#egg=cliff + tips: python -m pip install -q -U -e {toxinidir}/../keystoneauth#egg=keystoneauth1 + tips: python -m pip install -q -U -e {toxinidir}/../osc-lib#egg=osc_lib + tips: python -m pip install -q -U -e {toxinidir}/../openstacksdk#egg=openstacksdk + tips: python -m pip freeze + {[testenv]commands} [testenv:venv] +description = + Run specified command in a virtual environment with all dependencies installed. deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/doc/requirements.txt -commands = {posargs} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt +commands = + {posargs} [testenv:cover] +description = + Run unit tests and generate coverage report. setenv = - VIRTUAL_ENV={envdir} + {[testenv]setenv} PYTHON=coverage run --source openstackclient --parallel-mode commands = - stestr -q run {posargs} + stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:debug] +description = + Run specified tests through oslo_debug_helper, which allows use of pdb. passenv = OS_* commands = oslo_debug_helper -t openstackclient/tests {posargs} [testenv:docs] +description = + Build documentation in HTML format. deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt commands = - sphinx-build -a -E -W -d doc/build/doctrees -b html doc/source doc/build/html - sphinx-build -a -E -W -d doc/build/doctrees -b man doc/source doc/build/man - # Validate redirects (must be done after the docs build - whereto doc/build/html/.htaccess doc/test/redirect-tests.txt + sphinx-build -a -E -W -d doc/build/doctrees -b html doc/source doc/build/html + sphinx-build -a -E -W -d doc/build/doctrees -b man doc/source doc/build/man + # Validate redirects (must be done after the docs build + whereto doc/build/html/.htaccess doc/test/redirect-tests.txt [testenv:releasenotes] +description = + Build release note documentation in HTML format. deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] -show-source = True -# H203: Use assertIs(Not)None to check for None -enable-extensions = H203 +show-source = true exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,releasenotes -# W503 and W504 are disabled since they're not very useful -ignore = W503,W504 +# We only enable the hacking (H) and openstackclient (O) checks +select = H,O +# H301 Black will put commas after imports that can't fit on one line +ignore = H301 import-order-style = pep8 application_import_names = openstackclient + +[flake8:local-plugins] +extension = + O400 = checks:assert_no_oslo + O401 = checks:assert_no_duplicated_setup + O402 = checks:assert_use_of_client_aliases + O403 = checks:assert_find_ignore_missing_kwargs +paths = ./hacking