From aeba372450391c28b64bcf82cdffa07f7b1779de Mon Sep 17 00:00:00 2001 From: Ananth Suryanarayana Date: Mon, 12 May 2014 18:06:35 -0700 Subject: [PATCH 01/25] Use sudo tee when piping output --- contrail/setup_contrail.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/contrail/setup_contrail.py b/contrail/setup_contrail.py index 56a335dedc..888b9fa982 100644 --- a/contrail/setup_contrail.py +++ b/contrail/setup_contrail.py @@ -427,7 +427,7 @@ def fixup_config_files(self): # generate service token self.service_token = self._args.service_token if not self.service_token: - self.run_shell("sudo openssl rand -hex 10 > /etc/contrail/service.token") + self.run_shell("sudo openssl rand -hex 10 | sudo tee /etc/contrail/service.token > /dev/null") tok_fd = open('/etc/contrail/service.token') self.service_token = tok_fd.read() tok_fd.close() @@ -441,7 +441,7 @@ def fixup_config_files(self): """ # Disable selinux - self.run_shell("sudo sed 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config > config.new") + self.run_shell("sudo sed 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config | sudo tee config.new > /dev/null") self.run_shell("sudo mv config.new /etc/selinux/config") self.run_shell("setenforce 0") @@ -706,7 +706,7 @@ def fixup_config_files(self): self.run_shell("echo 'api-server:api-server' >> %s/basicauthusers.properties" % dir) self.run_shell("echo 'schema-transformer:schema-transformer' >> %s/basicauthusers.properties" % dir) self.run_shell("echo 'svc-monitor:svc-monitor' >> %s/basicauthusers.properties" % dir) - self.run_shell("sudo sed -e '/%s:/d' -e '/%s.dns:/d' %s/%s > %s/%s.new" \ + self.run_shell("sudo sed -e '/%s:/d' -e '/%s.dns:/d' %s/%s | sudo tee %s/%s.new > /dev/null" \ %(control_ip, control_ip, dir, 'basicauthusers.properties', dir, 'basicauthusers.properties')) self.run_shell("echo '%s:%s' >> %s/%s.new" \ @@ -729,20 +729,20 @@ def fixup_config_files(self): self.run_shell('sudo cp /etc/libvirt/qemu.conf qemu.conf') self.run_shell('sudo chown %s qemu.conf' % whoami) if dist == 'centos': - self.run_shell('sudo echo "clear_emulator_capabilities = 1" >> qemu.conf') - self.run_shell('sudo echo \'user = "root"\' >> qemu.conf') - self.run_shell('sudo echo \'group = "root"\' >> qemu.conf') - self.run_shell('sudo echo \'cgroup_device_acl = [\' >> qemu.conf') - self.run_shell('sudo echo \' "/dev/null", "/dev/full", "/dev/zero",\' >> qemu.conf') - self.run_shell('sudo echo \' "/dev/random", "/dev/urandom",\' >> qemu.conf') - self.run_shell('sudo echo \' "/dev/ptmx", "/dev/kvm", "/dev/kqemu",\' >> qemu.conf') - self.run_shell('sudo echo \' "/dev/rtc", "/dev/hpet","/dev/net/tun",\' >> qemu.conf') - self.run_shell('sudo echo \']\' >> qemu.conf') + self.run_shell('sudo echo "clear_emulator_capabilities = 1" | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \'user = "root"\' | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \'group = "root"\' | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \'cgroup_device_acl = [\' | sudo tee -a qemu.conf') + self.run_shell('sudo echo \' "/dev/null", "/dev/full", "/dev/zero",\' | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \' "/dev/random", "/dev/urandom",\' | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \' "/dev/ptmx", "/dev/kvm", "/dev/kqemu",\' | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \' "/dev/rtc", "/dev/hpet","/dev/net/tun",\' | sudo tee -a qemu.conf > /dev/null') + self.run_shell('sudo echo \']\' | sudo tee -a qemu.conf > /dev/null') self.run_shell('sudo cp qemu.conf /etc/libvirt/qemu.conf') self._fixed_qemu_conf = True # add "alias bridge off" in /etc/modprobe.conf for Centos if dist == 'centos': - self.run_shell('sudo echo "alias bridge off" > /etc/modprobe.conf') + self.run_shell('sudo echo "alias bridge off" | sudo tee /etc/modprobe.conf > /dev/null') if 'compute' in self._args.role : openstack_ip = self._args.openstack_ip From 00a6e72d83b6f6346ddb528e6c2a1c0d21cd16b2 Mon Sep 17 00:00:00 2001 From: Ananth Suryanarayana Date: Mon, 12 May 2014 23:05:45 -0700 Subject: [PATCH 02/25] do not fetch node-v0.8.15.tar.gz if is already there. Also reduce timeouts for gpg --- lib/neutron_thirdparty/contrail | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index e77e3212ac..b0034d2b03 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -124,9 +124,9 @@ function install_contrail() { # See http://wiki.apache.org/cassandra/DebianPackaging echo "deb http://www.apache.org/dist/cassandra/debian 12x main" | \ sudo tee /etc/apt/sources.list.d/cassandra.list - gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D + gpg --keyserver-options timeout=1 --keyserver pgp.mit.edu --recv-keys F758CE318D77295D gpg --export --armor F758CE318D77295D | sudo apt-key add - - gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 + gpg --keyserver-options timeout=1 --keyserver pgp.mit.edu --recv-keys 2B5C1B00 gpg --export --armor 2B5C1B00 | sudo apt-key add - apt_get update @@ -193,7 +193,9 @@ function install_contrail() { cd ${contrail_cwd} fi if ! which node > /dev/null 2>&1 ; then - wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz + if [ ! -f node-v0.8.15.tar.gz]; then + wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz + fi tar -xf node-v0.8.15.tar.gz contrail_cwd=$(pwd) cd node-v0.8.15 From 4cab62571ae2a8ffaa30402be8f68da96120a8c8 Mon Sep 17 00:00:00 2001 From: Biswajit Mandal Date: Thu, 5 Jun 2014 08:51:56 +0530 Subject: [PATCH 03/25] Changing the dev-stack web-ui dev-env based on new packaging scheme --- lib/neutron_thirdparty/contrail | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index fbe4e87474..f1ca2282c8 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -207,8 +207,8 @@ function install_contrail() { if [ ! -d $CONTRAIL_SRC/contrail-web-core/node_modules ]; then contrail_cwd=$(pwd) cd $CONTRAIL_SRC/contrail-web-core - ./fetch_packages.sh - make prod-env + make fetch-pkgs-prod + make dev-env REPO=webController fi } From df36c17d35ff011de5b68ad550991624f1e329e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89douard=20Thuleau?= Date: Wed, 4 Jun 2014 11:53:40 +0200 Subject: [PATCH 04/25] Use the OpenContrail Neutron plugin Use the OpenContrail Neutron plugin instead of the Neutron fork and make some cleanup on unsued file and code. --- contrail/README | 4 +- contrail/localrc-multinode-compute | 5 - contrail/localrc-multinode-server | 5 - contrail/localrc-single | 5 - contrail/neutron_v3.patch | 4706 ---------------------------- contrail/neutron_v4.patch | 3931 ----------------------- contrail/nova_v3.patch | 3252 ------------------- contrail/nova_v4.patch | 3252 ------------------- lib/neutron_plugins/contrail | 3 +- lib/neutron_thirdparty/contrail | 17 +- 10 files changed, 6 insertions(+), 15174 deletions(-) delete mode 100644 contrail/neutron_v3.patch delete mode 100644 contrail/neutron_v4.patch delete mode 100644 contrail/nova_v3.patch delete mode 100644 contrail/nova_v4.patch diff --git a/contrail/README b/contrail/README index 193653935e..f3c54669f0 100644 --- a/contrail/README +++ b/contrail/README @@ -10,8 +10,8 @@ Notes: 1) You will need to install kernel-devel package to compile OpenContrail Vrouter module 2) Stack.sh will create an additional screen "contrail" for contrail modules. -3) This fork includes Neutron and Nova patches. These patches have been - seperately submitted for review. +3) This fork use Nova and Neutron upstream master repositories and the + OpenContrail Nova VIF driver and Neutron plugin repositories. 4) OpenStack + OpenContrail has been tested on FC17, Ubuntu 12.04 and 13.10 5) Repo tool uses SSH by default. Use ssh-agent to forward keys to target running devstack or upload host's SSH keys to the github account. diff --git a/contrail/localrc-multinode-compute b/contrail/localrc-multinode-compute index a4a8431602..f4e088e436 100644 --- a/contrail/localrc-multinode-compute +++ b/contrail/localrc-multinode-compute @@ -6,7 +6,6 @@ SERVICE_HOST=192.168.56.119 # control1 # the interface that contrail's vhost0 should take over PHYSICAL_INTERFACE=eth0 -ENABLE_CONTRAIL=yes Q_PLUGIN=contrail STACK_DIR=$(cd $(dirname $0) && pwd) @@ -34,10 +33,6 @@ SERVICE_TOKEN=contrail123 SERVICE_PASSWORD=contrail123 ADMIN_PASSWORD=contrail123 DATABASE_TYPE=mysql -# -# use contrail forked neutron repo -NEUTRON_REPO=https://github.com/dsetia/neutron.git -NEUTRON_BRANCH=master # repo proto is https or (default) ssh. Leave commented for ssh # CONTRAIL_REPO_PROTO=https diff --git a/contrail/localrc-multinode-server b/contrail/localrc-multinode-server index 79f191c183..f270ec1793 100644 --- a/contrail/localrc-multinode-server +++ b/contrail/localrc-multinode-server @@ -6,7 +6,6 @@ CONTRAIL_VGW_INTERFACE=vgw CONTRAIL_VGW_PUBLIC_SUBNET=10.99.99.0/24 # CONTRAIL_VGW_PUBLIC_NETWORK=default-domain:demo:public:public -ENABLE_CONTRAIL=yes PHYSICAL_INTERFACE=eth0 MULTI_HOST=True @@ -51,10 +50,6 @@ SERVICE_TOKEN=contrail123 SERVICE_PASSWORD=contrail123 ADMIN_PASSWORD=contrail123 -# use contrail forked neutron repo -NEUTRON_REPO=https://github.com/dsetia/neutron.git -NEUTRON_BRANCH=master - # repo proto is https or (default) ssh. Leave commented for ssh # CONTRAIL_REPO_PROTO=https diff --git a/contrail/localrc-single b/contrail/localrc-single index 18636bc90d..8384f9c7e3 100644 --- a/contrail/localrc-single +++ b/contrail/localrc-single @@ -43,14 +43,9 @@ SERVICE_TOKEN=contrail123 SERVICE_PASSWORD=contrail123 ADMIN_PASSWORD=contrail123 -ENABLE_CONTRAIL=yes Q_PLUGIN=contrail PHYSICAL_INTERFACE=eth0 -# use contrail forked neutron repo -NEUTRON_REPO=https://github.com/dsetia/neutron.git -NEUTRON_BRANCH=master - # repo proto is https or (default) ssh. Leave commented for ssh # CONTRAIL_REPO_PROTO=https diff --git a/contrail/neutron_v3.patch b/contrail/neutron_v3.patch deleted file mode 100644 index 8f74cdb552..0000000000 --- a/contrail/neutron_v3.patch +++ /dev/null @@ -1,4706 +0,0 @@ -diff --git neutron/extensions/ipam.py neutron/extensions/ipam.py -new file mode 100644 -index 0000000..5d610b3 ---- /dev/null -+++ neutron/extensions/ipam.py -@@ -0,0 +1,140 @@ -+from abc import abstractmethod -+ -+from neutron.api.v2 import attributes as attr -+from neutron.api.v2 import base -+from neutron.common import exceptions as qexception -+from neutron.api import extensions -+from neutron import manager -+from oslo.config import cfg -+ -+ -+# Ipam Exceptions -+class IpamNotFound(qexception.NotFound): -+ message = _("IPAM %(id)s could not be found") -+ -+# Attribute Map -+RESOURCE_ATTRIBUTE_MAP = { -+ 'ipams': { -+ 'id': {'allow_post': False, 'allow_put': False, -+ 'validate': {'type:regex': attr.UUID_PATTERN}, -+ 'is_visible': True}, -+ 'name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'fq_name': {'allow_post': False, 'allow_put': False, -+ 'is_visible': True}, -+ 'tenant_id': {'allow_post': True, 'allow_put': False, -+ 'required_by_policy': True, -+ 'is_visible': True}, -+ 'mgmt': {'allow_post': True, 'allow_put': True, -+ 'is_visible': True, 'default': None}, -+ 'nets_using': {'allow_post': False, 'allow_put': False, -+ 'is_visible': True, 'default': ''} -+ }, -+} -+ -+# TODO should this be tied to ipam extension? -+EXTENDED_ATTRIBUTES_2_0 = { -+ 'networks': { -+ 'contrail:fq_name': {'allow_post': False, -+ 'allow_put': False, -+ 'is_visible': True}, -+ 'contrail:instance_count': {'allow_post': False, -+ 'allow_put': False, -+ 'is_visible': True}, -+ 'contrail:policys': {'allow_post': True, -+ 'allow_put': True, -+ 'default': '', -+ 'is_visible': True}, -+ 'contrail:subnet_ipam': {'allow_post': False, -+ 'allow_put': False, -+ 'default': '', -+ 'is_visible': True}, -+ }, -+ 'subnets': { -+ 'contrail:instance_count': {'allow_post': False, -+ 'allow_put': False, -+ 'is_visible': True}, -+ 'contrail:ipam_fq_name': {'allow_post': True, -+ 'allow_put': True, -+ 'default': '', -+ 'is_visible': True}, -+ } -+} -+ -+ -+class Ipam(object): -+ -+ @classmethod -+ def get_name(cls): -+ return "Network IP Address Management" -+ -+ @classmethod -+ def get_alias(cls): -+ return "ipam" -+ -+ @classmethod -+ def get_description(cls): -+ return ("Configuration object for holding common to a set of" -+ " IP address blocks") -+ -+ @classmethod -+ def get_namespace(cls): -+ return "http://docs.openstack.org/TODO" -+ -+ @classmethod -+ def get_updated(cls): -+ return "2012-07-20T10:00:00-00:00" -+ -+ @classmethod -+ def get_resources(cls): -+ """ Returns Ext Resources """ -+ exts = [] -+ plugin = manager.QuantumManager.get_plugin() -+ for resource_name in ['ipam']: -+ collection_name = resource_name + "s" -+ params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) -+ -+ member_actions = {} -+ -+ controller = base.create_resource(collection_name, -+ resource_name, -+ plugin, params, -+ member_actions=member_actions) -+ -+ ex = extensions.ResourceExtension(collection_name, -+ controller, -+ member_actions=member_actions) -+ exts.append(ex) -+ -+ return exts -+ -+ def get_extended_resources(self, version): -+ if version == "2.0": -+ return EXTENDED_ATTRIBUTES_2_0 -+ else: -+ return {} -+#end class Ipam -+ -+ -+class IpamPluginBase(object): -+ -+ @abstractmethod -+ def create_ipam(self, context, ipam): -+ pass -+ -+ @abstractmethod -+ def update_ipam(self, context, id, ipam): -+ pass -+ -+ @abstractmethod -+ def get_ipam(self, context, id, fields=None): -+ pass -+ -+ @abstractmethod -+ def delete_ipam(self, context, id): -+ pass -+ -+ @abstractmethod -+ def get_ipams(self, context, filters=None, fields=None): -+ pass -+#end class IpamPluginBase -diff --git neutron/extensions/portbindings.py neutron/extensions/portbindings.py -index dbef592..f6b2144 100644 ---- neutron/extensions/portbindings.py -+++ neutron/extensions/portbindings.py -@@ -45,11 +45,12 @@ VIF_TYPE_802_QBG = '802.1qbg' - VIF_TYPE_802_QBH = '802.1qbh' - VIF_TYPE_HYPERV = 'hyperv' - VIF_TYPE_MIDONET = 'midonet' -+VIF_TYPE_CONTRAIL = 'contrail' - VIF_TYPE_OTHER = 'other' - VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS, - VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG, - VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET, -- VIF_TYPE_OTHER] -+ VIF_TYPE_CONTRAIL, VIF_TYPE_OTHER] - - - EXTENDED_ATTRIBUTES_2_0 = { -diff --git neutron/plugins/juniper/__init__.py neutron/plugins/juniper/__init__.py -new file mode 100644 -index 0000000..7bc8217 ---- /dev/null -+++ neutron/plugins/juniper/__init__.py -@@ -0,0 +1,17 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay Juniper Networks. -diff --git neutron/plugins/juniper/contrail/__init__.py neutron/plugins/juniper/contrail/__init__.py -new file mode 100644 -index 0000000..7bc8217 ---- /dev/null -+++ neutron/plugins/juniper/contrail/__init__.py -@@ -0,0 +1,17 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay Juniper Networks. -diff --git neutron/plugins/juniper/contrail/contrailplugin.py neutron/plugins/juniper/contrail/contrailplugin.py -new file mode 100644 -index 0000000..8028b32 ---- /dev/null -+++ neutron/plugins/juniper/contrail/contrailplugin.py -@@ -0,0 +1,1187 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay, Rudra Rugge, Atul Moghe Juniper Networks. -+ -+ -+import ConfigParser -+import logging -+from pprint import pformat -+ -+#from neutron.manager import NeutronManager -+from neutron.common import exceptions as exc -+from neutron.db import db_base_plugin_v2 -+from neutron.db import portbindings_base -+from neutron.extensions import l3 -+from neutron.extensions import securitygroup -+from neutron.extensions import portbindings -+#from neutron.extensions import vpcroutetable -+ -+import cgitb -+from httplib2 import Http -+from oslo.config import cfg -+import re -+import string -+import sys -+ -+import ctdb.config_db -+ -+LOG = logging.getLogger(__name__) -+ -+vnc_opts = [ -+ cfg.StrOpt('api_server_ip', default='127.0.0.1'), -+ cfg.StrOpt('api_server_port', default='8082'), -+] -+ -+ -+def _read_cfg(cfg_parser, section, option, default): -+ try: -+ val = cfg_parser.get(section, option) -+ except (AttributeError, -+ ConfigParser.NoOptionError, -+ ConfigParser.NoSectionError): -+ val = default -+ -+ return val -+ -+ -+def _read_cfg_boolean(cfg_parser, section, option, default): -+ try: -+ val = cfg_parser.getboolean(section, option) -+ except (AttributeError, ValueError, -+ ConfigParser.NoOptionError, -+ ConfigParser.NoSectionError): -+ val = default -+ -+ return val -+ -+ -+class ContrailPlugin(db_base_plugin_v2.NeutronDbPluginV2, -+ l3.RouterPluginBase, -+ securitygroup.SecurityGroupPluginBase, -+ portbindings_base.PortBindingBaseMixin): -+#vpcroutetable.RouteTablePluginBase): -+ -+ supported_extension_aliases = ["ipam", "policy", "security-group", -+ "router", "route-table", "port-security", -+ "binding",] -+ __native_bulk_support = False -+ _cfgdb = None -+ _args = None -+ _tenant_id_dict = {} -+ _tenant_name_dict = {} -+ -+ @classmethod -+ def _parse_class_args(cls, cfg_parser): -+ cfg_parser.read("/etc/neutron/plugins/juniper/" -+ "contrail/ContrailPlugin.ini" -+ ) -+ cls._multi_tenancy = _read_cfg_boolean(cfg_parser, 'APISERVER', -+ 'multi_tenancy', False) -+ cls._admin_token = _read_cfg(cfg_parser, 'KEYSTONE', 'admin_token', '') -+ cls._auth_url = _read_cfg(cfg_parser, 'KEYSTONE', 'auth_url', '') -+ cls._admin_user = _read_cfg(cfg_parser, 'KEYSTONE', 'admin_user', -+ 'user1') -+ cls._admin_password = _read_cfg(cfg_parser, 'KEYSTONE', -+ 'admin_password', 'password1') -+ cls._admin_tenant_name = _read_cfg(cfg_parser, 'KEYSTONE', -+ 'admin_tenant_name', -+ 'default-domain') -+ cls._tenants_api = '%s/tenants' % (cls._auth_url) -+ pass -+ -+ @classmethod -+ def _connect_to_db(cls): -+ """ -+ Many instantiations of plugin (base + extensions) but need to have -+ only one config db conn (else error from ifmap-server) -+ """ -+ cls._cfgdb_map = {} -+ if cls._cfgdb is None: -+ sip = cfg.CONF.APISERVER.api_server_ip -+ sport = cfg.CONF.APISERVER.api_server_port -+ # Initialize connection to DB and add default entries -+ cls._cfgdb = ctdb.config_db.DBInterface(cls._admin_user, -+ cls._admin_password, -+ cls._admin_tenant_name, -+ sip, sport) -+ cls._cfgdb.manager = cls -+ -+ @classmethod -+ def _get_user_cfgdb(cls, context): -+ if not cls._multi_tenancy: -+ return cls._cfgdb -+ user_id = context.user_id -+ role = string.join(context.roles, ",") -+ if user_id not in cls._cfgdb_map: -+ cls._cfgdb_map[user_id] = ctdb.config_db.DBInterface( -+ cls._admin_user, cls._admin_password, cls._admin_tenant_name, -+ cfg.CONF.APISERVER.api_server_ip, -+ cfg.CONF.APISERVER.api_server_port, -+ user_info={'user_id': user_id, 'role': role}) -+ cls._cfgdb_map[user_id].manager = cls -+ -+ return cls._cfgdb_map[user_id] -+ -+ @classmethod -+ def _tenant_list_from_keystone(cls): -+ # get all tenants -+ hdrs = {'X-Auth-Token': cls._admin_token, -+ 'Content-Type': 'application/json'} -+ try: -+ rsp, content = Http().request(cls._tenants_api, -+ method="GET", headers=hdrs) -+ if rsp.status != 200: -+ return -+ except Exception: -+ return -+ -+ # transform needed for python compatibility -+ content = re.sub('true', 'True', content) -+ content = re.sub('null', 'None', content) -+ content = eval(content) -+ -+ # bail if response is unexpected -+ if 'tenants' not in content: -+ return -+ -+ # create a dictionary for id->name and name->id mapping -+ for tenant in content['tenants']: -+ print 'Adding tenant %s:%s to cache' % (tenant['name'], -+ tenant['id']) -+ cls._tenant_id_dict[tenant['id']] = tenant['name'] -+ cls._tenant_name_dict[tenant['name']] = tenant['id'] -+ -+ def update_security_group(self, context, id, security_group): -+ pass -+ -+ def __init__(self): -+ cfg.CONF.register_opts(vnc_opts, 'APISERVER') -+ -+ cfg_parser = ConfigParser.ConfigParser() -+ ContrailPlugin._parse_class_args(cfg_parser) -+ -+ ContrailPlugin._connect_to_db() -+ self._cfgdb = ContrailPlugin._cfgdb -+ -+ ContrailPlugin._tenant_list_from_keystone() -+ self.base_binding_dict = self._get_base_binding_dict() -+ portbindings_base.register_port_dict_function() -+ -+ def _get_base_binding_dict(self): -+ binding = { -+ portbindings.VIF_TYPE: portbindings.VIF_TYPE_CONTRAIL, -+ portbindings.CAPABILITIES: { -+ portbindings.CAP_PORT_FILTER: -+ 'security-group' in self.supported_extension_aliases}} -+ return binding -+ -+ @classmethod -+ def tenant_id_to_name(cls, id): -+ # bail if we never built the list successfully -+ if len(cls._tenant_id_dict) == 0: -+ return id -+ # check cache -+ if id in cls._tenant_id_dict: -+ return cls._tenant_id_dict[id] -+ # otherwise refresh -+ cls._tenant_list_from_keystone() -+ # second time's a charm? -+ return cls._tenant_id_dict[id] if id in cls._tenant_id_dict else id -+ -+ @classmethod -+ def tenant_name_to_id(cls, name): -+ # bail if we never built the list successfully -+ if len(cls._tenant_name_dict) == 0: -+ return name -+ # check cache -+ if name in cls._tenant_name_dict: -+ return cls._tenant_name_dict[name] -+ # otherwise refresh -+ cls._tenant_list_from_keystone() -+ # second time's a charm? -+ if name in cls._tenant_name_dict: -+ return cls._tenant_name_dict[name] -+ else: -+ return name -+ -+ # Network API handlers -+ def create_network(self, context, network): -+ """ -+ Creates a new Virtual Network, and assigns it -+ a symbolic name. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ net_info = cfgdb.network_create(network['network']) -+ -+ # verify transformation is conforming to api -+ net_dict = self._make_network_dict(net_info['q_api_data'], -+ None, False) -+ -+ net_dict.update(net_info['q_extra_data']) -+ -+ LOG.debug("create_network(): " + pformat(net_dict) + "\n") -+ return net_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_network(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ net_info = cfgdb.network_read(id, fields) -+ -+ # verify transformation is conforming to api -+ if not fields: -+ # should return all fields -+ net_dict = self._make_network_dict(net_info['q_api_data'], -+ fields, False) -+ net_dict.update(net_info['q_extra_data']) -+ else: -+ net_dict = net_info['q_api_data'] -+ -+ LOG.debug("get_network(): " + pformat(net_dict)) -+ return self._fields(net_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_network(self, context, net_id, network): -+ """ -+ Updates the attributes of a particular Virtual Network. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ net_info = cfgdb.network_update(net_id, network['network']) -+ -+ # verify transformation is conforming to api -+ net_dict = self._make_network_dict(net_info['q_api_data'], -+ None, False) -+ -+ net_dict.update(net_info['q_extra_data']) -+ -+ LOG.debug("update_network(): " + pformat(net_dict)) -+ return net_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_network(self, context, net_id): -+ """ -+ Deletes the network with the specified network identifier -+ belonging to the specified tenant. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.network_delete(net_id) -+ LOG.debug("delete_network(): " + pformat(net_id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_networks(self, context, filters=None, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ nets_info = cfgdb.network_list(filters) -+ -+ nets_dicts = [] -+ for n_info in nets_info: -+ # verify transformation is conforming to api -+ n_dict = self._make_network_dict(n_info['q_api_data'], fields, -+ False) -+ -+ n_dict.update(n_info['q_extra_data']) -+ nets_dicts.append(n_dict) -+ -+ LOG.debug( -+ "get_networks(): filters: " + pformat(filters) + " data: " -+ + pformat(nets_dicts)) -+ return nets_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_networks_count(self, context, filters=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ nets_count = cfgdb.network_count(filters) -+ LOG.debug("get_networks_count(): " + str(nets_count)) -+ return nets_count -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # Subnet API handlers -+ def create_subnet(self, context, subnet): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ subnet_info = cfgdb.subnet_create(subnet['subnet']) -+ -+ # verify transformation is conforming to api -+ subnet_dict = self._make_subnet_dict(subnet_info['q_api_data']) -+ -+ subnet_dict.update(subnet_info['q_extra_data']) -+ -+ LOG.debug("create_subnet(): " + pformat(subnet_dict)) -+ return subnet_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_subnet(self, context, subnet_id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ subnet_info = cfgdb.subnet_read(subnet_id) -+ -+ # verify transformation is conforming to api -+ subnet_dict = self._make_subnet_dict(subnet_info['q_api_data'], -+ fields) -+ -+ subnet_dict.update(subnet_info['q_extra_data']) -+ -+ LOG.debug("get_subnet(): " + pformat(subnet_dict)) -+ return self._fields(subnet_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_subnet(self, context, subnet_id, subnet): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ subnet_info = cfgdb.subnet_update(subnet_id, subnet['subnet']) -+ -+ # verify transformation is conforming to api -+ subnet_dict = self._make_subnet_dict(subnet_info['q_api_data']) -+ -+ subnet_dict.update(subnet_info['q_extra_data']) -+ -+ LOG.debug("update_subnet(): " + pformat(subnet_dict)) -+ return subnet_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_subnet(self, context, subnet_id): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.subnet_delete(subnet_id) -+ -+ LOG.debug("delete_subnet(): " + pformat(subnet_id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_subnets(self, context, filters=None, fields=None): -+ """ -+ Called from Neutron API -> get_ -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ subnets_info = cfgdb.subnets_list(filters) -+ -+ subnets_dicts = [] -+ for sn_info in subnets_info: -+ # verify transformation is conforming to api -+ sn_dict = self._make_subnet_dict(sn_info['q_api_data'], fields) -+ -+ sn_dict.update(sn_info['q_extra_data']) -+ subnets_dicts.append(sn_dict) -+ -+ LOG.debug( -+ "get_subnets(): filters: " + pformat(filters) + " data: " -+ + pformat(subnets_dicts)) -+ return subnets_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_subnets_count(self, context, filters=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ subnets_count = cfgdb.subnets_count(filters) -+ LOG.debug("get_subnets_count(): " + str(subnets_count)) -+ return subnets_count -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # Ipam API handlers -+ def create_ipam(self, context, ipam): -+ """ -+ Creates a new IPAM, and assigns it -+ a symbolic name. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ipam_info = cfgdb.ipam_create(ipam['ipam']) -+ -+ ##verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ ipam_dict = ipam_info['q_api_data'] -+ ipam_dict.update(ipam_info['q_extra_data']) -+ -+ LOG.debug("create_ipam(): " + pformat(ipam_dict)) -+ return ipam_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_ipam(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ipam_info = cfgdb.ipam_read(id) -+ -+ ## verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ ipam_dict = ipam_info['q_api_data'] -+ ipam_dict.update(ipam_info['q_extra_data']) -+ -+ LOG.debug("get_ipam(): " + pformat(ipam_dict)) -+ return ipam_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_ipam(self, context, id, ipam): -+ """ -+ Updates the attributes of a particular IPAM. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ipam_info = cfgdb.ipam_update(id, ipam) -+ -+ ## verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ ipam_dict = ipam_info['q_api_data'] -+ ipam_dict.update(ipam_info['q_extra_data']) -+ -+ LOG.debug("update_ipam(): " + pformat(ipam_dict)) -+ return ipam_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_ipam(self, context, ipam_id): -+ """ -+ Deletes the ipam with the specified identifier -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.ipam_delete(ipam_id) -+ -+ LOG.debug("delete_ipam(): " + pformat(ipam_id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_ipams(self, context, filters=None, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ipams_info = cfgdb.ipam_list(filters) -+ -+ ipams_dicts = [] -+ for ipam_info in ipams_info: -+ # verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ ipam_dict = ipam_info['q_api_data'] -+ ipam_dict.update(ipam_info['q_extra_data']) -+ ipams_dicts.append(ipam_dict) -+ -+ LOG.debug("get_ipams(): " + pformat(ipams_dicts)) -+ return ipams_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_ipams_count(self, context, filters=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ipams_count = cfgdb.ipams_count(filters) -+ LOG.debug("get_ipams_count(): " + str(ipams_count)) -+ return ipams_count -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # Policy API handlers -+ def create_policy(self, context, policy): -+ """ -+ Creates a new Policy, and assigns it -+ a symbolic name. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ policy_info = cfgdb.policy_create(policy['policy']) -+ -+ ##verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ policy_dict = policy_info['q_api_data'] -+ policy_dict.update(policy_info['q_extra_data']) -+ -+ LOG.debug("create_policy(): " + pformat(policy_dict)) -+ return policy_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_policy(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ policy_info = cfgdb.policy_read(id) -+ -+ ## verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ policy_dict = policy_info['q_api_data'] -+ policy_dict.update(policy_info['q_extra_data']) -+ -+ LOG.debug("get_policy(): " + pformat(policy_dict)) -+ return policy_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_policy(self, context, id, policy): -+ """ -+ Updates the attributes of a particular Policy. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ policy_info = cfgdb.policy_update(id, policy) -+ -+ ## verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ policy_dict = policy_info['q_api_data'] -+ policy_dict.update(policy_info['q_extra_data']) -+ -+ LOG.debug("update_policy(): " + pformat(policy_dict)) -+ return policy_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_policy(self, context, policy_id): -+ """ -+ Deletes the Policy with the specified identifier -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.policy_delete(policy_id) -+ -+ LOG.debug("delete_policy(): " + pformat(policy_id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_policys(self, context, filters=None, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ policys_info = cfgdb.policy_list(filters) -+ -+ policys_dicts = [] -+ for policy_info in policys_info: -+ # verify transformation is conforming to api -+ #ipam_dict = self._make_ipam_dict(ipam_info) -+ policy_dict = policy_info['q_api_data'] -+ policy_dict.update(policy_info['q_extra_data']) -+ policys_dicts.append(policy_dict) -+ -+ LOG.debug("get_policys(): " + pformat(policys_dicts)) -+ return policys_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_policy_count(self, context, filters=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ policy_count = cfgdb.policy_count(filters) -+ LOG.debug("get_policy_count(): " + str(policy_count)) -+ return policy_count -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # Floating IP API handlers -+ def _make_floatingip_dict(self, floatingip, fields=None): -+ res = {'id': floatingip['id'], -+ 'tenant_id': floatingip['tenant_id'], -+ 'floating_ip_address': floatingip['floating_ip_address'], -+ 'floating_network_id': floatingip['floating_network_id'], -+ 'router_id': floatingip['router_id'], -+ 'port_id': floatingip['fixed_port_id'], -+ 'fixed_ip_address': floatingip['fixed_ip_address']} -+ return self._fields(res, fields) -+ -+ def create_floatingip(self, context, floatingip): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ fip_info = cfgdb.floatingip_create(floatingip['floatingip']) -+ -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ fip_dict.update(fip_info['q_extra_data']) -+ -+ LOG.debug("create_floatingip(): " + pformat(fip_dict)) -+ return fip_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_floatingip(self, context, fip_id, floatingip): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ fip_info = cfgdb.floatingip_update(fip_id, -+ floatingip['floatingip']) -+ -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ fip_dict.update(fip_info['q_extra_data']) -+ -+ LOG.debug("update_floatingip(): " + pformat(fip_dict)) -+ return fip_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_floatingip(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ fip_info = cfgdb.floatingip_read(id) -+ -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ fip_dict.update(fip_info['q_extra_data']) -+ -+ LOG.debug("get_floatingip(): " + pformat(fip_dict)) -+ return fip_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_floatingip(self, context, fip_id): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.floatingip_delete(fip_id) -+ LOG.debug("delete_floating(): " + pformat(fip_id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_floatingips(self, context, filters=None, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ fips_info = cfgdb.floatingip_list(filters) -+ -+ fips_dicts = [] -+ for fip_info in fips_info: -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ fip_dict.update(fip_info['q_extra_data']) -+ fips_dicts.append(fip_dict) -+ -+ LOG.debug("get_floatingips(): " + pformat(fips_dicts)) -+ return fips_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_floatingips_count(self, context, filters=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ floatingips_count = cfgdb.floatingip_count(filters) -+ LOG.debug("get_floatingips_count(): " + str(floatingips_count)) -+ return floatingips_count -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # Port API handlers -+ def create_port(self, context, port): -+ """ -+ Creates a port on the specified Virtual Network. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ port_info = cfgdb.port_create(port['port']) -+ -+ # verify transformation is conforming to api -+ port_dict = self._make_port_dict(port_info['q_api_data']) -+ self._process_portbindings_create_and_update(context, -+ port['port'], -+ port_dict) -+ -+ port_dict.update(port_info['q_extra_data']) -+ -+ -+ LOG.debug("create_port(): " + pformat(port_dict)) -+ return port_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_port(self, context, port_id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ port_info = cfgdb.port_read(port_id) -+ -+ # verify transformation is conforming to api -+ port_dict = self._make_port_dict(port_info['q_api_data'], fields) -+ self._process_portbindings_create_and_update(context, -+ port_info, -+ port_dict) -+ -+ port_dict.update(port_info['q_extra_data']) -+ -+ LOG.debug("get_port(): " + pformat(port_dict)) -+ return self._fields(port_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_port(self, context, port_id, port): -+ """ -+ Updates the attributes of a port on the specified Virtual Network. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ port_info = cfgdb.port_update(port_id, port['port']) -+ -+ # verify transformation is conforming to api -+ port_dict = self._make_port_dict(port_info['q_api_data']) -+ self._process_portbindings_create_and_update(context, -+ port['port'], -+ port_dict) -+ -+ port_dict.update(port_info['q_extra_data']) -+ -+ LOG.debug("update_port(): " + pformat(port_dict)) -+ return port_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_port(self, context, port_id): -+ """ -+ Deletes a port on a specified Virtual Network, -+ if the port contains a remote interface attachment, -+ the remote interface is first un-plugged and then the port -+ is deleted. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.port_delete(port_id) -+ LOG.debug("delete_port(): " + pformat(port_id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_ports(self, context, filters=None, fields=None): -+ """ -+ Retrieves all port identifiers belonging to the -+ specified Virtual Network. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ports_info = cfgdb.port_list(filters) -+ -+ ports_dicts = [] -+ for p_info in ports_info: -+ # verify transformation is conforming to api -+ p_dict = self._make_port_dict(p_info['q_api_data'], fields) -+ self._process_portbindings_create_and_update(context, -+ p_info, -+ p_dict) -+ -+ p_dict.update(p_info['q_extra_data']) -+ ports_dicts.append(p_dict) -+ -+ LOG.debug( -+ "get_ports(): filter: " + pformat(filters) + 'data: ' -+ + pformat(ports_dicts)) -+ return ports_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_ports_count(self, context, filters=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ ports_count = cfgdb.port_count(filters) -+ LOG.debug("get_ports_count(): " + str(ports_count)) -+ return ports_count -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id): -+ """ -+ Attaches a remote interface to the specified port on the -+ specified Virtual Network. -+ """ -+ port = self._get_port(tenant_id, net_id, port_id) -+ # Validate attachment -+ self._validate_attachment(tenant_id, net_id, port_id, -+ remote_interface_id) -+ if port['interface_id']: -+ raise exc.PortInUse(net_id=net_id, port_id=port_id, -+ att_id=port['interface_id']) -+ -+ def unplug_interface(self, tenant_id, net_id, port_id): -+ """ -+ Detaches a remote interface from the specified port on the -+ specified Virtual Network. -+ """ -+ self._get_port(tenant_id, net_id, port_id) -+ -+ # VPC route table handlers -+ def _make_route_table_routes_dict(self, route_table_route, fields=None): -+ res = {'prefix': route_table_route['prefix'], -+ 'next_hop': route_table_route['next_hop']} -+ -+ return self._fields(res, fields) -+ -+ def _make_route_table_dict(self, route_table, fields=None): -+ res = {'id': route_table['id'], -+ 'name': route_table['name'], -+ 'fq_name': route_table['fq_name'], -+ 'tenant_id': route_table['tenant_id']} -+ if route_table['routes']: -+ res['routes'] = [self._make_route_table_routes_dict(r) -+ for r in route_table['routes']['route']] -+ return self._fields(res, fields) -+ -+ def create_route_table(self, context, route_table): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ rt_info = cfgdb.route_table_create( -+ route_table['route_table']) -+ -+ # verify transformation is conforming to api -+ rt_dict = self._make_route_table_dict(rt_info['q_api_data']) -+ rt_dict.update(rt_info['q_extra_data']) -+ LOG.debug("create_route_table(): " + pformat(rt_dict)) -+ return rt_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def update_route_table(self, context, id, route_table): -+ """ -+ Updates the attributes of a particular route table. -+ """ -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ rt_info = cfgdb.route_table_update(id, route_table['route_table']) -+ -+ rt_dict = self._make_route_table_dict(rt_info['q_api_data']) -+ rt_dict.update(rt_info['q_extra_data']) -+ LOG.debug("create_route_table(): " + pformat(rt_dict)) -+ return rt_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_route_table(self, context, id): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.route_table_delete(id) -+ LOG.debug("delete_route_table(): " + pformat(id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_route_tables(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ route_tables_info = cfgdb.route_table_list(context, filters) -+ -+ route_tables_dicts = [] -+ for rt_info in route_tables_info: -+ # verify transformation is conforming to api -+ rt_dict = self._make_route_table_dict(rt_info['q_api_data'], -+ fields) -+ -+ rt_dict.update(rt_info['q_extra_data']) -+ route_tables_dicts.append(rt_dict) -+ -+ LOG.debug( -+ "get_route_tables(): filter: " + pformat(filters) -+ + 'data: ' + pformat(route_tables_dicts)) -+ return route_tables_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_route_table(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ rt_info = cfgdb.route_table_read(id) -+ -+ # verify transformation is conforming to api -+ rt_dict = self._make_route_table_dict(rt_info['q_api_data'], -+ fields) -+ -+ rt_dict.update(rt_info['q_extra_data']) -+ -+ LOG.debug("get_route_table(): " + pformat(rt_dict)) -+ return self._fields(rt_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # VPC route table svc instance handlers -+ def _make_svc_instance_dict(self, svc_instance, fields=None): -+ res = {'id': svc_instance['id'], -+ 'name': svc_instance['name'], -+ 'tenant_id': svc_instance['tenant_id']} -+ if svc_instance['internal_net']: -+ res['internal_net'] = svc_instance['internal_net'] -+ if svc_instance['external_net']: -+ res['external_net'] = svc_instance['external_net'] -+ return self._fields(res, fields) -+ -+ def create_nat_instance(self, context, nat_instance): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ si_info = cfgdb.svc_instance_create( -+ nat_instance['nat_instance']) -+ -+ # verify transformation is conforming to api -+ si_dict = self._make_svc_instance_dict(si_info['q_api_data']) -+ -+ si_dict.update(si_info['q_extra_data']) -+ -+ LOG.debug("create_nat_instance(): " + pformat(si_dict)) -+ return si_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_nat_instance(self, context, id): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.svc_instance_delete(id) -+ LOG.debug("delete_nat_instance(): " + pformat(id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_nat_instances(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ svc_instances_info = cfgdb.svc_instance_list(context, filters) -+ -+ svc_instances_dicts = [] -+ for si_info in svc_instances_info: -+ # verify transformation is conforming to api -+ si_dict = self._make_svc_instance_dict(si_info['q_api_data'], -+ fields) -+ -+ si_dict.update(si_info['q_extra_data']) -+ svc_instances_dicts.append(si_dict) -+ -+ LOG.debug( -+ "get_nat_instances(): filter: " + pformat(filters) -+ + 'data: ' + pformat(svc_instances_dicts)) -+ return svc_instances_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_nat_instance(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ si_info = cfgdb.svc_instance_read(id) -+ -+ # verify transformation is conforming to api -+ si_dict = self._make_svc_instance_dict(si_info['q_api_data'], -+ fields) -+ -+ si_dict.update(si_info['q_extra_data']) -+ -+ LOG.debug("get_nat_instance(): " + pformat(si_dict)) -+ return self._fields(si_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ # Security Group handlers -+ def _make_security_group_rule_dict(self, security_group_rule, fields=None): -+ res = {'id': security_group_rule['id'], -+ 'tenant_id': security_group_rule['tenant_id'], -+ 'security_group_id': security_group_rule['security_group_id'], -+ 'ethertype': security_group_rule['ethertype'], -+ 'direction': security_group_rule['direction'], -+ 'protocol': security_group_rule['protocol'], -+ 'port_range_min': security_group_rule['port_range_min'], -+ 'port_range_max': security_group_rule['port_range_max'], -+ 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], -+ 'remote_group_id': security_group_rule['remote_group_id']} -+ -+ return self._fields(res, fields) -+ -+ def _make_security_group_dict(self, security_group, fields=None): -+ res = {'id': security_group['id'], -+ 'name': security_group['name'], -+ 'tenant_id': security_group['tenant_id'], -+ 'description': security_group['description']} -+ res['security_group_rules'] = [self._make_security_group_rule_dict(r) -+ for r in security_group['rules']] -+ return self._fields(res, fields) -+ -+ def create_security_group(self, context, security_group): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ sg_info = cfgdb.security_group_create( -+ security_group['security_group']) -+ -+ # verify transformation is conforming to api -+ sg_dict = self._make_security_group_dict(sg_info['q_api_data']) -+ -+ sg_dict.update(sg_info['q_extra_data']) -+ -+ LOG.debug("create_security_group(): " + pformat(sg_dict)) -+ return sg_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_security_group(self, context, id): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.security_group_delete(id) -+ LOG.debug("delete_security_group(): " + pformat(id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_security_groups(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ security_groups_info = cfgdb.security_group_list(context, filters) -+ -+ security_groups_dicts = [] -+ for sg_info in security_groups_info: -+ # verify transformation is conforming to api -+ sg_dict = self._make_security_group_dict(sg_info['q_api_data'], -+ fields) -+ -+ sg_dict.update(sg_info['q_extra_data']) -+ security_groups_dicts.append(sg_dict) -+ -+ LOG.debug( -+ "get_security_groups(): filter: " + pformat(filters) -+ + 'data: ' + pformat(security_groups_dicts)) -+ return security_groups_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_security_group(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ sg_info = cfgdb.security_group_read(id) -+ -+ # verify transformation is conforming to api -+ sg_dict = self._make_security_group_dict(sg_info['q_api_data'], -+ fields) -+ -+ sg_dict.update(sg_info['q_extra_data']) -+ -+ LOG.debug("get_security_group(): " + pformat(sg_dict)) -+ return self._fields(sg_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def create_security_group_rule(self, context, security_group_rule): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ sgr_info = cfgdb.security_group_rule_create( -+ security_group_rule['security_group_rule']) -+ -+ # verify transformation is conforming to api -+ sgr_dict = self._make_security_group_rule_dict( -+ sgr_info['q_api_data']) -+ sgr_dict.update(sgr_info['q_extra_data']) -+ -+ LOG.debug("create_security_group_rule(): " + pformat(sgr_dict)) -+ return sgr_dict -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def delete_security_group_rule(self, context, id): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ cfgdb.security_group_rule_delete(id) -+ LOG.debug("delete_security_group_rule(): " + pformat(id)) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_security_group_rules(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ security_group_rules_info = cfgdb.security_group_rule_list(filters) -+ -+ security_group_rules_dicts = [] -+ for sgr_info in security_group_rules_info: -+ for sgr in sgr_info: -+ # verify transformation is conforming to api -+ sgr_dict = self._make_security_group_rule_dict( -+ sgr['q_api_data'], fields) -+ sgr_dict.update(sgr['q_extra_data']) -+ security_group_rules_dicts.append(sgr_dict) -+ -+ LOG.debug( -+ "get_security_group_rules(): filter: " + pformat(filters) + -+ 'data: ' + pformat(security_group_rules_dicts)) -+ return security_group_rules_dicts -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -+ -+ def get_security_group_rule(self, context, id, fields=None): -+ try: -+ cfgdb = ContrailPlugin._get_user_cfgdb(context) -+ sgr_info = cfgdb.security_group_rule_read(id) -+ -+ # verify transformation is conforming to api -+ sgr_dict = {} -+ if sgr_info != {}: -+ sgr_dict = self._make_security_group_rule_dict( -+ sgr_info['q_api_data'], fields) -+ sgr_dict.update(sgr_info['q_extra_data']) -+ -+ LOG.debug("get_security_group_rule(): " + pformat(sgr_dict)) -+ return self._fields(sgr_dict, fields) -+ except Exception as e: -+ cgitb.Hook(format="text").handle(sys.exc_info()) -+ raise e -diff --git neutron/plugins/juniper/contrail/ctdb/__init__.py neutron/plugins/juniper/contrail/ctdb/__init__.py -new file mode 100644 -index 0000000..7bc8217 ---- /dev/null -+++ neutron/plugins/juniper/contrail/ctdb/__init__.py -@@ -0,0 +1,17 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay Juniper Networks. -diff --git neutron/plugins/juniper/contrail/ctdb/config_db.py neutron/plugins/juniper/contrail/ctdb/config_db.py -new file mode 100644 -index 0000000..4a87a41 ---- /dev/null -+++ neutron/plugins/juniper/contrail/ctdb/config_db.py -@@ -0,0 +1,2238 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay, Rudra Rugge, Atul Moghe Juniper Networks. -+ -+import json -+import re -+import requests -+import socket -+import time -+import uuid -+from netaddr import IPNetwork, IPSet, IPAddress -+ -+from neutron.api.v2 import attributes as attr -+from neutron.common import constants -+from neutron.common import exceptions -+from neutron.extensions import portbindings -+from vnc_api.common import exceptions as vnc_exc -+from vnc_api import vnc_api -+ -+_DEFAULT_HEADERS = { -+ 'Content-type': 'application/json; charset="UTF-8"', } -+ -+CREATE = 1 -+READ = 2 -+UPDATE = 3 -+DELETE = 4 -+ -+ -+class DBInterface(object): -+ """ -+ An instance of this class forwards requests to vnc cfg api (web)server -+ """ -+ Q_URL_PREFIX = '/extensions/ct' -+ -+ def __init__(self, admin_name, admin_password, admin_tenant_name, -+ api_srvr_ip, api_srvr_port, user_info=None): -+ self._api_srvr_ip = api_srvr_ip -+ self._api_srvr_port = api_srvr_port -+ -+ self._db_cache = {} -+ self._db_cache['q_networks'] = {} -+ self._db_cache['q_subnets'] = {} -+ self._db_cache['q_subnet_maps'] = {} -+ self._db_cache['q_policies'] = {} -+ self._db_cache['q_ipams'] = {} -+ self._db_cache['q_floatingips'] = {} -+ self._db_cache['q_ports'] = {} -+ self._db_cache['q_fixed_ip_to_subnet'] = {} -+ #obj-uuid to tenant-uuid mapping -+ self._db_cache['q_obj_to_tenant'] = {} -+ #port count per tenant-id -+ self._db_cache['q_tenant_port_count'] = {} -+ self._db_cache['vnc_networks'] = {} -+ self._db_cache['vnc_ports'] = {} -+ self._db_cache['vnc_projects'] = {} -+ self._db_cache['vnc_instance_ips'] = {} -+ -+ # Retry till a api-server is up -+ connected = False -+ while not connected: -+ try: -+ self._vnc_lib = vnc_api.VncApi( -+ admin_name, admin_password, -+ admin_tenant_name, api_srvr_ip, -+ api_srvr_port, '/', user_info=user_info) -+ connected = True -+ except requests.exceptions.RequestException: -+ time.sleep(3) -+ -+ # changes 'net_fq_name_str pfx/len' key to 'net_id pfx/len' key -+ subnet_map = self._vnc_lib.kv_retrieve(key=None) -+ for kv_dict in subnet_map: -+ key = kv_dict['key'] -+ if len(key.split()) == 1: -+ subnet_id = key -+ # uuid key, fixup value portion to 'net_id pfx/len' format -+ # if not already so -+ if len(kv_dict['value'].split(':')) == 1: -+ # new format already, skip -+ continue -+ -+ net_fq_name = kv_dict['value'].split()[0].split(':') -+ try: -+ net_obj = self._virtual_network_read(fq_name=net_fq_name) -+ except vnc_exc.NoIdError: -+ self._vnc_lib.kv_delete(subnet_id) -+ continue -+ -+ new_subnet_key = '%s %s' % (net_obj.uuid, -+ kv_dict['value'].split()[1]) -+ self._vnc_lib.kv_store(subnet_id, new_subnet_key) -+ else: # subnet key -+ if len(key.split()[0].split(':')) == 1: -+ # new format already, skip -+ continue -+ -+ # delete old key, convert to new key format and save -+ old_subnet_key = key -+ self._vnc_lib.kv_delete(old_subnet_key) -+ -+ subnet_id = kv_dict['value'] -+ net_fq_name = key.split()[0].split(':') -+ try: -+ net_obj = self._virtual_network_read(fq_name=net_fq_name) -+ except vnc_exc.NoIdError: -+ continue -+ -+ new_subnet_key = '%s %s' % (net_obj.uuid, key.split()[1]) -+ self._vnc_lib.kv_store(new_subnet_key, subnet_id) -+ -+ # Helper routines -+ def _request_api_server(self, url, method, data=None, headers=None): -+ if method == 'GET': -+ return requests.get(url) -+ if method == 'POST': -+ return requests.post(url, data=data, headers=headers) -+ if method == 'DELETE': -+ return requests.delete(url) -+ -+ def _relay_request(self, request): -+ """ -+ Send received request to api server -+ """ -+ # chop neutron parts of url and add api server address -+ url_path = re.sub(self.Q_URL_PREFIX, '', request.environ['PATH_INFO']) -+ url = "http://%s:%s%s" % (self._api_srvr_ip, self._api_srvr_port, -+ url_path) -+ -+ return self._request_api_server( -+ url, request.environ['REQUEST_METHOD'], -+ request.body, {'Content-type': request.environ['CONTENT_TYPE']}) -+ -+ def _obj_to_json(self, obj): -+ return dict((k, v) for k, v in obj.__dict__.iteritems()) -+ -+ def _ensure_instance_exists(self, instance_id): -+ instance_name = instance_id -+ instance_obj = vnc_api.VirtualMachine(instance_name) -+ try: -+ id = self._vnc_lib.obj_to_id(instance_obj) -+ instance_obj = self._vnc_lib.virtual_machine_read(id=id) -+ except Exception as e: -+ #except vnc_exc.NoIdError: # instance doesn't exist, create it -+ instance_obj.uuid = instance_id -+ self._vnc_lib.virtual_machine_create(instance_obj) -+ -+ return instance_obj -+ -+ def _ensure_default_security_group_exists(self, proj_id): -+ proj_obj = self._vnc_lib.project_read(id=proj_id) -+ sg_groups = proj_obj.get_security_groups() -+ for sg_group in sg_groups or []: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_group['uuid']) -+ if sg_obj.name == 'default': -+ return -+ -+ sg_obj = vnc_api.SecurityGroup(name='default', parent_obj=proj_obj) -+ self._vnc_lib.security_group_create(sg_obj) -+ -+ #allow all egress traffic -+ def_rule = {} -+ def_rule['port_range_min'] = 0 -+ def_rule['port_range_max'] = 65535 -+ def_rule['direction'] = 'egress' -+ def_rule['remote_ip_prefix'] = None -+ def_rule['remote_group_id'] = None -+ def_rule['protocol'] = 'any' -+ rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE) -+ self._security_group_rule_create(sg_obj.uuid, rule) -+ -+ #allow ingress traffic from within default security group -+ def_rule = {} -+ def_rule['port_range_min'] = 0 -+ def_rule['port_range_max'] = 65535 -+ def_rule['direction'] = 'ingress' -+ def_rule['remote_ip_prefix'] = None -+ def_rule['remote_group_id'] = None -+ def_rule['protocol'] = 'any' -+ rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE) -+ self._security_group_rule_create(sg_obj.uuid, rule) -+ -+ def _get_obj_tenant_id(self, q_type, obj_uuid): -+ # Get the mapping from cache, else seed cache and return -+ try: -+ return self._db_cache['q_obj_to_tenant'][obj_uuid] -+ except KeyError: -+ # Seed the cache and return -+ if q_type == 'port': -+ port_obj = self._virtual_machine_interface_read(obj_uuid) -+ net_id = port_obj.get_virtual_network_refs()[0]['uuid'] -+ # recurse up type-hierarchy -+ tenant_id = self._get_obj_tenant_id('network', net_id) -+ self._set_obj_tenant_id(obj_uuid, tenant_id) -+ return tenant_id -+ -+ if q_type == 'network': -+ net_obj = self._virtual_network_read(net_id=obj_uuid) -+ tenant_id = net_obj.parent_uuid.replace('-', '') -+ self._set_obj_tenant_id(obj_uuid, tenant_id) -+ return tenant_id -+ -+ return None -+ -+ def _set_obj_tenant_id(self, obj_uuid, tenant_uuid): -+ self._db_cache['q_obj_to_tenant'][obj_uuid] = tenant_uuid -+ -+ def _del_obj_tenant_id(self, obj_uuid): -+ try: -+ del self._db_cache['q_obj_to_tenant'][obj_uuid] -+ except Exception: -+ pass -+ -+ def _project_read(self, proj_id=None, fq_name=None): -+ if proj_id: -+ try: -+ # disable cache for now as fip pool might be put without -+ # neutron knowing it -+ raise KeyError -+ #return self._db_cache['vnc_projects'][proj_id] -+ except KeyError: -+ proj_obj = self._vnc_lib.project_read(id=proj_id) -+ fq_name_str = json.dumps(proj_obj.get_fq_name()) -+ self._db_cache['vnc_projects'][proj_id] = proj_obj -+ self._db_cache['vnc_projects'][fq_name_str] = proj_obj -+ return proj_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ try: -+ # disable cache for now as fip pool might be put without -+ # neutron knowing it -+ raise KeyError -+ #return self._db_cache['vnc_projects'][fq_name_str] -+ except KeyError: -+ proj_obj = self._vnc_lib.project_read(fq_name=fq_name) -+ self._db_cache['vnc_projects'][fq_name_str] = proj_obj -+ self._db_cache['vnc_projects'][proj_obj.uuid] = proj_obj -+ return proj_obj -+ -+ def _security_group_rule_create(self, sg_id, sg_rule): -+ sg_vnc = self._vnc_lib.security_group_read(id=sg_id) -+ rules = sg_vnc.get_security_group_entries() -+ if rules is None: -+ rules = vnc_api.PolicyEntriesType([sg_rule]) -+ else: -+ rules.add_policy_rule(sg_rule) -+ -+ sg_vnc.set_security_group_entries(rules) -+ self._vnc_lib.security_group_update(sg_vnc) -+ return -+ -+ def _security_group_rule_find(self, sgr_id): -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_sgs = self._security_group_list_project(proj_id) -+ -+ for sg in project_sgs: -+ sg_obj = self._vnc_lib.security_group_read(id=sg['uuid']) -+ sgr_entries = sg_obj.get_security_group_entries() -+ if sgr_entries is None: -+ continue -+ -+ for sg_rule in sgr_entries.get_policy_rule(): -+ if sg_rule.get_rule_uuid() == sgr_id: -+ return sg_obj, sg_rule -+ -+ return None, None -+ -+ def _security_group_rule_delete(self, sg_obj, sg_rule): -+ rules = sg_obj.get_security_group_entries() -+ rules.get_policy_rule().remove(sg_rule) -+ sg_obj.set_security_group_entries(rules) -+ self._vnc_lib.security_group_update(sg_obj) -+ return -+ -+ def _security_group_create(self, sg_obj): -+ sg_uuid = self._vnc_lib.security_group_create(sg_obj) -+ return sg_uuid -+ -+ def _security_group_delete(self, sg_id): -+ self._vnc_lib.security_group_delete(id=sg_id) -+ -+ def _svc_instance_create(self, si_obj): -+ si_uuid = self._vnc_lib.service_instance_create(si_obj) -+ st_fq_name = ['default-domain', 'nat-template'] -+ st_obj = self._vnc_lib.service_template_read(fq_name=st_fq_name) -+ si_obj.set_service_template(st_obj) -+ self._vnc_lib.service_instance_update(si_obj) -+ -+ return si_uuid -+ -+ def _svc_instance_delete(self, si_id): -+ self._vnc_lib.service_instance_delete(id=si_id) -+ -+ def _route_table_create(self, rt_obj): -+ rt_uuid = self._vnc_lib.route_table_create(rt_obj) -+ return rt_uuid -+ -+ def _route_table_delete(self, rt_id): -+ self._vnc_lib.route_table_delete(id=rt_id) -+ -+ def _virtual_network_create(self, net_obj): -+ net_uuid = self._vnc_lib.virtual_network_create(net_obj) -+ -+ return net_uuid -+ -+ def _virtual_network_read(self, net_id=None, fq_name=None): -+ if net_id: -+ try: -+ # return self._db_cache['vnc_networks'][net_id] -+ raise KeyError -+ except KeyError: -+ net_obj = self._vnc_lib.virtual_network_read(id=net_id) -+ fq_name_str = json.dumps(net_obj.get_fq_name()) -+ self._db_cache['vnc_networks'][net_id] = net_obj -+ self._db_cache['vnc_networks'][fq_name_str] = net_obj -+ return net_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ try: -+ # return self._db_cache['vnc_networks'][fq_name_str] -+ raise KeyError -+ except KeyError: -+ net_obj = self._vnc_lib.virtual_network_read(fq_name=fq_name) -+ self._db_cache['vnc_networks'][fq_name_str] = net_obj -+ self._db_cache['vnc_networks'][net_obj.uuid] = net_obj -+ return net_obj -+ -+ def _virtual_network_update(self, net_obj): -+ self._vnc_lib.virtual_network_update(net_obj) -+ # read back to get subnet gw allocated by api-server -+ net_obj = self._vnc_lib.virtual_network_read(id=net_obj.uuid) -+ fq_name_str = json.dumps(net_obj.get_fq_name()) -+ -+ self._db_cache['vnc_networks'][net_obj.uuid] = net_obj -+ self._db_cache['vnc_networks'][fq_name_str] = net_obj -+ -+ def _virtual_network_delete(self, net_id): -+ fq_name_str = None -+ try: -+ net_obj = self._db_cache['vnc_networks'][net_id] -+ fq_name_str = json.dumps(net_obj.get_fq_name()) -+ except KeyError: -+ pass -+ -+ self._vnc_lib.virtual_network_delete(id=net_id) -+ -+ try: -+ del self._db_cache['vnc_networks'][net_id] -+ if fq_name_str: -+ del self._db_cache['vnc_networks'][fq_name_str] -+ except KeyError: -+ pass -+ -+ def _virtual_machine_interface_create(self, port_obj): -+ port_uuid = self._vnc_lib.virtual_machine_interface_create(port_obj) -+ -+ return port_uuid -+ -+ def _virtual_machine_interface_read(self, port_id=None, fq_name=None): -+ if port_id: -+ try: -+ # return self._db_cache['vnc_ports'][port_id] -+ raise KeyError -+ except KeyError: -+ port_obj = self._vnc_lib.virtual_machine_interface_read( -+ id=port_id) -+ fq_name_str = json.dumps(port_obj.get_fq_name()) -+ self._db_cache['vnc_ports'][port_id] = port_obj -+ self._db_cache['vnc_ports'][fq_name_str] = port_obj -+ return port_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ try: -+ # return self._db_cache['vnc_ports'][fq_name_str] -+ raise KeyError -+ except KeyError: -+ port_obj = self._vnc_lib.virtual_machine_interface_read( -+ fq_name=fq_name) -+ self._db_cache['vnc_ports'][fq_name_str] = port_obj -+ self._db_cache['vnc_ports'][port_obj.uuid] = port_obj -+ return port_obj -+ -+ def _virtual_machine_interface_update(self, port_obj): -+ self._vnc_lib.virtual_machine_interface_update(port_obj) -+ fq_name_str = json.dumps(port_obj.get_fq_name()) -+ -+ self._db_cache['vnc_ports'][port_obj.uuid] = port_obj -+ self._db_cache['vnc_ports'][fq_name_str] = port_obj -+ -+ def _virtual_machine_interface_delete(self, port_id): -+ fq_name_str = None -+ try: -+ port_obj = self._db_cache['vnc_ports'][port_id] -+ fq_name_str = json.dumps(port_obj.get_fq_name()) -+ except KeyError: -+ pass -+ -+ self._vnc_lib.virtual_machine_interface_delete(id=port_id) -+ -+ try: -+ del self._db_cache['vnc_ports'][port_id] -+ if fq_name_str: -+ del self._db_cache['vnc_ports'][fq_name_str] -+ except KeyError: -+ pass -+ -+ def _instance_ip_create(self, iip_obj): -+ iip_uuid = self._vnc_lib.instance_ip_create(iip_obj) -+ -+ return iip_uuid -+ -+ def _instance_ip_read(self, instance_ip_id=None, fq_name=None): -+ if instance_ip_id: -+ try: -+ # return self._db_cache['vnc_instance_ips'][instance_ip_id] -+ raise KeyError -+ except KeyError: -+ iip_obj = self._vnc_lib.instance_ip_read(id=instance_ip_id) -+ fq_name_str = json.dumps(iip_obj.get_fq_name()) -+ self._db_cache['vnc_instance_ips'][instance_ip_id] = iip_obj -+ self._db_cache['vnc_instance_ips'][fq_name_str] = iip_obj -+ return iip_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ try: -+ # return self._db_cache['vnc_instance_ips'][fq_name_str] -+ raise KeyError -+ except KeyError: -+ iip_obj = self._vnc_lib.instance_ip_read(fq_name=fq_name) -+ self._db_cache['vnc_instance_ips'][fq_name_str] = iip_obj -+ self._db_cache['vnc_instance_ips'][iip_obj.uuid] = iip_obj -+ return iip_obj -+ -+ def _instance_ip_update(self, iip_obj): -+ self._vnc_lib.instance_ip_update(iip_obj) -+ fq_name_str = json.dumps(iip_obj.get_fq_name()) -+ -+ self._db_cache['vnc_instance_ips'][iip_obj.uuid] = iip_obj -+ self._db_cache['vnc_instance_ips'][fq_name_str] = iip_obj -+ -+ def _instance_ip_delete(self, instance_ip_id): -+ fq_name_str = None -+ try: -+ iip_obj = self._db_cache['vnc_instance_ips'][instance_ip_id] -+ fq_name_str = json.dumps(iip_obj.get_fq_name()) -+ except KeyError: -+ pass -+ -+ self._vnc_lib.instance_ip_delete(id=instance_ip_id) -+ -+ try: -+ del self._db_cache['vnc_instance_ips'][instance_ip_id] -+ if fq_name_str: -+ del self._db_cache['vnc_instance_ips'][fq_name_str] -+ except KeyError: -+ pass -+ -+ # find projects on a given domain -+ def _project_list_domain(self, domain_id): -+ fq_name = ['default-domain'] -+ resp_dict = self._vnc_lib.projects_list(parent_fq_name=fq_name) -+ -+ return resp_dict['projects'] -+ -+ # find network ids on a given project -+ def _network_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ print "Error in converting uuid %s" % (project_id) -+ -+ resp_dict = self._vnc_lib.virtual_networks_list(parent_id=project_uuid) -+ -+ return resp_dict['virtual-networks'] -+ -+ def _ipam_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ print "Error in converting uuid %s" % (project_id) -+ -+ resp_dict = self._vnc_lib.network_ipams_list(parent_id=project_uuid) -+ -+ return resp_dict['network-ipams'] -+ -+ def _security_group_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ print "Error in converting uuid %s" % (project_id) -+ -+ self._ensure_default_security_group_exists(project_uuid) -+ -+ resp_dict = self._vnc_lib.security_groups_list(parent_id=project_uuid) -+ -+ return resp_dict['security-groups'] -+ -+ def _security_group_entries_list_sg(self, sg_id): -+ try: -+ sg_uuid = str(uuid.UUID(sg_id)) -+ except Exception: -+ print "Error in converting SG uuid %s" % (sg_id) -+ -+ resp_dict = self._vnc_lib.security_groups_list(parent_id=sg_uuid) -+ -+ return resp_dict['security-groups'] -+ -+ def _route_table_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ print "Error in converting uuid %s" % (project_id) -+ -+ resp_dict = self._vnc_lib.route_tables_list(parent_id=project_uuid) -+ -+ return resp_dict['route-tables'] -+ -+ def _svc_instance_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ print "Error in converting uuid %s" % (project_id) -+ -+ resp_dict = self._vnc_lib.service_instances_list(parent_id=project_uuid) -+ -+ return resp_dict['service-instances'] -+ -+ def _policy_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ print "Error in converting uuid %s" % (project_id) -+ -+ resp_dict = self._vnc_lib.network_policys_list(parent_id=project_uuid) -+ -+ return resp_dict['network-policys'] -+ -+ # find floating ip pools a project has access to -+ def _fip_pool_refs_project(self, project_id): -+ project_uuid = str(uuid.UUID(project_id)) -+ project_obj = self._project_read(proj_id=project_uuid) -+ -+ return project_obj.get_floating_ip_pool_refs() -+ -+ # find networks of floating ip pools project has access to -+ def _fip_pool_ref_networks(self, project_id): -+ ret_nets = [] -+ -+ proj_fip_pool_refs = self._fip_pool_refs_project(project_id) -+ if not proj_fip_pool_refs: -+ return ret_nets -+ -+ for fip_pool_ref in proj_fip_pool_refs: -+ fip_uuid = fip_pool_ref['uuid'] -+ fip_pool_obj = self._vnc_lib.floating_ip_pool_read(id=fip_uuid) -+ net_uuid = fip_pool_obj.parent_uuid -+ net_obj = self._virtual_network_read(net_id=net_uuid) -+ ret_nets.append({'uuid': net_obj.uuid, -+ 'fq_name': net_obj.get_fq_name()}) -+ -+ return ret_nets -+ -+ # find floating ip pools defined by network -+ def _fip_pool_list_network(self, net_id): -+ resp_dict = self._vnc_lib.floating_ip_pools_list(parent_id=net_id) -+ -+ return resp_dict['floating-ip-pools'] -+ -+ # find port ids on a given network -+ def _port_list_network(self, network_id): -+ ret_list = [] -+ -+ try: -+ net_obj = self._virtual_network_read(net_id=network_id) -+ except vnc_exc.NoIdError: -+ return ret_list -+ -+ port_back_refs = net_obj.get_virtual_machine_interface_back_refs() -+ if port_back_refs: -+ for port_back_ref in port_back_refs: -+ ret_list.append({'id': port_back_ref['uuid']}) -+ -+ return ret_list -+ -+ # find port ids on a given project -+ def _port_list_project(self, project_id): -+ ret_list = [] -+ project_nets = self._network_list_project(project_id) -+ for net in project_nets: -+ net_ports = self._port_list_network(net['uuid']) -+ ret_list.extend(net_ports) -+ -+ return ret_list -+ -+ # Returns True if -+ # * no filter is specified -+ # OR -+ # * search-param is not present in filters -+ # OR -+ # * 1. search-param is present in filters AND -+ # 2. resource matches param-list AND -+ # 3. shared parameter in filters is False -+ def _filters_is_present(self, filters, key_name, match_value): -+ if filters: -+ if key_name in filters: -+ try: -+ if ('shared' in filters and -+ filters['shared'][0] is True): -+ # yuck, q-api has shared as list always of 1 elem -+ return False # no shared-resource support -+ except ValueError: # not in requested list -+ return False -+ elif len(filters.keys()) == 1: -+ shared_val = filters.get('shared', None) -+ if shared_val and shared_val[0] is True: -+ return False -+ -+ return True -+ -+ def _network_read(self, net_uuid): -+ net_obj = self._virtual_network_read(net_id=net_uuid) -+ return net_obj -+ -+ def _subnet_vnc_create_mapping(self, subnet_id, subnet_key): -+ #import pdb; pdb.set_trace() -+ self._vnc_lib.kv_store(subnet_id, subnet_key) -+ self._vnc_lib.kv_store(subnet_key, subnet_id) -+ self._db_cache['q_subnet_maps'][subnet_id] = subnet_key -+ self._db_cache['q_subnet_maps'][subnet_key] = subnet_id -+ -+ def _subnet_vnc_read_mapping(self, id=None, key=None): -+ if id: -+ try: -+ return self._db_cache['q_subnet_maps'][id] -+ #raise KeyError -+ except KeyError: -+ subnet_key = self._vnc_lib.kv_retrieve(id) -+ self._db_cache['q_subnet_maps'][id] = subnet_key -+ return subnet_key -+ if key: -+ try: -+ return self._db_cache['q_subnet_maps'][key] -+ #raise KeyError -+ except KeyError: -+ subnet_id = self._vnc_lib.kv_retrieve(key) -+ self._db_cache['q_subnet_maps'][key] = subnet_id -+ return subnet_id -+ -+ def _subnet_vnc_read_or_create_mapping(self, id=None, key=None): -+ if id: -+ return self._subnet_vnc_read_mapping(id=id) -+ -+ # if subnet was created outside of neutron handle it and create -+ # neutron representation now (lazily) -+ try: -+ return self._subnet_vnc_read_mapping(key=key) -+ except vnc_exc.NoIdError: -+ subnet_id = str(uuid.uuid4()) -+ self._subnet_vnc_create_mapping(subnet_id, key) -+ return self._subnet_vnc_read_mapping(key=key) -+ -+ def _subnet_vnc_delete_mapping(self, subnet_id, subnet_key): -+ self._vnc_lib.kv_delete(subnet_id) -+ self._vnc_lib.kv_delete(subnet_key) -+ try: -+ del self._db_cache['q_subnet_maps'][subnet_id] -+ del self._db_cache['q_subnet_maps'][subnet_key] -+ except KeyError: -+ pass -+ -+ def _subnet_vnc_get_key(self, subnet_vnc, net_id): -+ pfx = subnet_vnc.subnet.get_ip_prefix() -+ pfx_len = subnet_vnc.subnet.get_ip_prefix_len() -+ -+ return '%s %s/%s' % (net_id, pfx, pfx_len) -+ -+ def _subnet_read(self, net_uuid, subnet_key): -+ try: -+ net_obj = self._virtual_network_read(net_id=net_uuid) -+ except vnc_exc.NoIdError: -+ return None -+ -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if not ipam_refs: -+ return None -+ -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ if self._subnet_vnc_get_key(subnet_vnc, -+ net_uuid) == subnet_key: -+ return subnet_vnc -+ -+ return None -+ -+ def _ip_address_to_subnet_id(self, ip_addr, net_obj): -+ # find subnet-id for ip-addr, called when instance-ip created -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(), -+ subnet_vnc.subnet.get_ip_prefix_len()) -+ if IPAddress(ip_addr) in IPSet([cidr]): -+ subnet_key = self._subnet_vnc_get_key(subnet_vnc, -+ net_obj.uuid) -+ subnet_id = self._subnet_vnc_read_mapping( -+ key=subnet_key) -+ return subnet_id -+ -+ return None -+ -+ # Conversion routines between VNC and Quantum objects -+ def _svc_instance_neutron_to_vnc(self, si_q, oper): -+ if oper == CREATE: -+ project_id = str(uuid.UUID(si_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ net_id = si_q['internal_net'] -+ int_vn = self._vnc_lib.virtual_network_read(id=net_id) -+ net_id = si_q['external_net'] -+ ext_vn = self._vnc_lib.virtual_network_read(id=net_id) -+ scale_out = vnc_api.ServiceScaleOutType(max_instances=1, -+ auto_scale=False) -+ si_prop = vnc_api.ServiceInstanceType( -+ auto_policy=True, left_virtual_network=int_vn.name, -+ right_virtual_network=ext_vn.name, scale_out=scale_out) -+ si_prop.set_scale_out(scale_out) -+ si_vnc = vnc_api.ServiceInstance( -+ name=si_q['name'], -+ parent_obj=project_obj, -+ service_instance_properties=si_prop) -+ -+ return si_vnc -+ -+ def _svc_instance_vnc_to_neutron(self, si_obj): -+ si_q_dict = json.loads(json.dumps(si_obj, -+ default=self._obj_to_json)) -+ -+ # replace field names -+ si_q_dict['id'] = si_obj.uuid -+ si_q_dict['tenant_id'] = si_obj.parent_uuid.replace('-', '') -+ si_q_dict['name'] = si_obj.name -+ si_props = si_obj.get_service_instance_properties() -+ if si_props: -+ vn_fq_name = si_obj.get_parent_fq_name() -+ vn_name = si_props.get_left_virtual_network() -+ vn_fq_name.extend([vn_name]) -+ vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) -+ si_q_dict['internal_net'] = str(vn_obj.uuid) + ' ' + vn_name -+ vn_fq_name = si_obj.get_parent_fq_name() -+ vn_name = si_props.get_right_virtual_network() -+ vn_fq_name.extend([vn_name]) -+ vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) -+ si_q_dict['external_net'] = str(vn_obj.uuid) + ' ' + vn_name -+ -+ return {'q_api_data': si_q_dict, -+ 'q_extra_data': {}} -+ -+ def _route_table_neutron_to_vnc(self, rt_q, oper): -+ if oper == CREATE: -+ project_id = str(uuid.UUID(rt_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ rt_vnc = vnc_api.RouteTable(name=rt_q['name'], -+ parent_obj=project_obj) -+ rt_vnc.set_routes(vnc_api.RouteTableType.factory(**rt_q['routes'])) -+ else: -+ rt_vnc = self._vnc_lib.route_table_read(id=rt_q['id']) -+ rt_vnc.set_routes(vnc_api.RouteTableType.factory(**rt_q['routes'])) -+ -+ return rt_vnc -+ -+ def _route_table_vnc_to_neutron(self, rt_obj): -+ rt_q_dict = json.loads(json.dumps(rt_obj, -+ default=self._obj_to_json)) -+ -+ # replace field names -+ rt_q_dict['id'] = rt_obj.uuid -+ rt_q_dict['tenant_id'] = rt_obj.parent_uuid.replace('-', '') -+ rt_q_dict['name'] = rt_obj.name -+ rt_q_dict['fq_name'] = rt_obj.fq_name -+ -+ # get route table routes -+ rt_q_dict['routes'] = rt_q_dict.pop('routes', None) -+ return {'q_api_data': rt_q_dict, -+ 'q_extra_data': {}} -+ -+ def _security_group_vnc_to_neutron(self, sg_obj): -+ sg_q_dict = json.loads(json.dumps(sg_obj, -+ default=self._obj_to_json)) -+ -+ # replace field names -+ sg_q_dict['id'] = sg_obj.uuid -+ sg_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '') -+ sg_q_dict['name'] = sg_obj.name -+ sg_q_dict['description'] = sg_obj.get_id_perms().get_description() -+ -+ # get security group rules -+ sg_q_dict['rules'] = [] -+ rule_list = self.security_group_rules_read(sg_obj.uuid) -+ if rule_list: -+ for rule in rule_list: -+ sg_q_dict['rules'].append(rule['q_api_data']) -+ -+ return {'q_api_data': sg_q_dict, -+ 'q_extra_data': {}} -+ -+ def _security_group_neutron_to_vnc(self, sg_q, oper): -+ if oper == CREATE: -+ project_id = str(uuid.UUID(sg_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ id_perms = vnc_api.IdPermsType( -+ enable=True, description=sg_q['description']) -+ sg_vnc = vnc_api.SecurityGroup( -+ name=sg_q['name'], parent_obj=project_obj, -+ id_perms=id_perms) -+ -+ return sg_vnc -+ -+ def _security_group_rule_vnc_to_neutron(self, sg_id, sg_rule): -+ sgr_q_dict = {} -+ if sg_id is None: -+ return {'q_api_data': sgr_q_dict, -+ 'q_extra_data': {}} -+ -+ try: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=sg_id) -+ -+ direction = 'egress' -+ if sg_rule.get_direction() == '<': -+ direction = 'ingress' -+ -+ remote_cidr = '' -+ remote_sg_uuid = '' -+ if direction == 'ingress': -+ addr = sg_rule.get_src_addresses()[0] -+ else: -+ addr = sg_rule.get_dst_addresses()[0] -+ -+ if addr.get_subnet(): -+ remote_cidr = '%s/%s' % (addr.get_subnet().get_ip_prefix(), -+ addr.get_subnet().get_ip_prefix_len()) -+ elif addr.get_security_group(): -+ if (addr.get_security_group() != 'any') and \ -+ (addr.get_security_group() != 'local'): -+ remote_sg = addr.get_security_group() -+ try: -+ remote_sg_obj = self._vnc_lib.security_group_read( -+ fq_name_str=remote_sg) -+ remote_sg_uuid = remote_sg_obj.uuid -+ except vnc_exc.NoIdError: -+ pass -+ -+ sgr_q_dict['id'] = sg_rule.get_rule_uuid() -+ sgr_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '') -+ sgr_q_dict['security_group_id'] = sg_obj.uuid -+ sgr_q_dict['ethertype'] = 'IPv4' -+ sgr_q_dict['direction'] = direction -+ sgr_q_dict['protocol'] = sg_rule.get_protocol() -+ sgr_q_dict['port_range_min'] = sg_rule.get_dst_ports()[0].\ -+ get_start_port() -+ sgr_q_dict['port_range_max'] = sg_rule.get_dst_ports()[0].\ -+ get_end_port() -+ sgr_q_dict['remote_ip_prefix'] = remote_cidr -+ sgr_q_dict['remote_group_id'] = remote_sg_uuid -+ -+ return {'q_api_data': sgr_q_dict, -+ 'q_extra_data': {}} -+ -+ def _security_group_rule_neutron_to_vnc(self, sgr_q, oper): -+ if oper == CREATE: -+ port_min = 0 -+ port_max = 65535 -+ if sgr_q['port_range_min']: -+ port_min = sgr_q['port_range_min'] -+ if sgr_q['port_range_max']: -+ port_max = sgr_q['port_range_max'] -+ -+ endpt = [vnc_api.AddressType(security_group='any')] -+ if sgr_q['remote_ip_prefix']: -+ cidr = sgr_q['remote_ip_prefix'].split('/') -+ pfx = cidr[0] -+ pfx_len = int(cidr[1]) -+ endpt = [vnc_api.AddressType( -+ subnet=vnc_api.SubnetType(pfx, pfx_len))] -+ elif sgr_q['remote_group_id']: -+ sg_obj = self._vnc_lib.security_group_read( -+ id=sgr_q['remote_group_id']) -+ endpt = [vnc_api.AddressType( -+ security_group=sg_obj.get_fq_name_str())] -+ -+ if sgr_q['direction'] == 'ingress': -+ dir = '<' -+ local = endpt -+ remote = [vnc_api.AddressType(security_group='local')] -+ else: -+ dir = '>' -+ remote = endpt -+ local = [vnc_api.AddressType(security_group='local')] -+ -+ if not sgr_q['protocol']: -+ sgr_q['protocol'] = 'any' -+ -+ sgr_uuid = str(uuid.uuid4()) -+ -+ rule = vnc_api.PolicyRuleType( -+ rule_uuid=sgr_uuid, -+ direction=dir, -+ protocol=sgr_q['protocol'], -+ src_addresses=local, -+ src_ports=[vnc_api.PortType(0, 65535)], -+ dst_addresses=remote, -+ dst_ports=[vnc_api.PortType(port_min, port_max)]) -+ return rule -+ -+ def _network_neutron_to_vnc(self, network_q, oper): -+ net_name = network_q.get('name', None) -+ if oper == CREATE: -+ project_id = str(uuid.UUID(network_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ id_perms = vnc_api.IdPermsType(enable=True) -+ net_obj = vnc_api.VirtualNetwork( -+ net_name, project_obj, id_perms=id_perms) -+ else: # READ/UPDATE/DELETE -+ net_obj = self._virtual_network_read(net_id=network_q['id']) -+ -+ id_perms = net_obj.get_id_perms() -+ if 'admin_state_up' in network_q: -+ id_perms.enable = network_q['admin_state_up'] -+ net_obj.set_id_perms(id_perms) -+ -+ if 'contrail:policys' in network_q: -+ policy_fq_names = network_q['contrail:policys'] -+ # reset and add with newly specified list -+ net_obj.set_network_policy_list([], []) -+ seq = 0 -+ for p_fq_name in policy_fq_names: -+ domain_name, project_name, policy_name = p_fq_name -+ -+ domain_obj = vnc_api.Domain(domain_name) -+ project_obj = vnc_api.Project(project_name, domain_obj) -+ policy_obj = vnc_api.NetworkPolicy(policy_name, project_obj) -+ -+ net_obj.add_network_policy( -+ policy_obj, -+ vnc_api.VirtualNetworkPolicyType( -+ sequence=vnc_api.SequenceType(seq, 0))) -+ seq = seq + 1 -+ -+ if 'vpc:route_table' in network_q: -+ rt_fq_name = network_q['vpc:route_table'] -+ if rt_fq_name: -+ try: -+ rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name) -+ net_obj.set_route_table(rt_obj) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=net_obj.uuid) -+ -+ return net_obj -+ -+ def _network_vnc_to_neutron(self, net_obj, net_repr='SHOW'): -+ net_q_dict = {} -+ extra_dict = {} -+ -+ net_q_dict['id'] = net_obj.uuid -+ net_q_dict['name'] = net_obj.name -+ extra_dict['contrail:fq_name'] = net_obj.get_fq_name() -+ net_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '') -+ net_q_dict['admin_state_up'] = net_obj.get_id_perms().enable -+ net_q_dict['shared'] = False -+ net_q_dict['status'] = constants.NET_STATUS_ACTIVE -+ -+ if net_repr == 'SHOW': -+ port_back_refs = net_obj.get_virtual_machine_interface_back_refs() -+ #if port_back_refs: -+ # net_q_dict['ports'] = [] -+ # for port_back_ref in port_back_refs: -+ # fq_name = port_back_ref['to'] -+ # try: -+ # port_obj = self._virtual_machine_interface_read( -+ # port_id = fq_name[-1]) -+ # except NoIdError: -+ # continue -+ # -+ # port_info = self._port_vnc_to_neutron(port_obj, net_obj) -+ # port_dict = port_info['q_api_data'] -+ # port_dict.update(port_info['q_extra_data']) -+ # -+ # net_q_dict['ports'].append(port_dict) -+ -+ extra_dict['contrail:instance_count'] = 0 -+ if port_back_refs: -+ extra_dict['contrail:instance_count'] = len(port_back_refs) -+ -+ net_policy_refs = net_obj.get_network_policy_refs() -+ if net_policy_refs: -+ extra_dict['contrail:policys'] = \ -+ [np_ref['to'] for np_ref in net_policy_refs] -+ -+ elif net_repr == 'LIST': -+ extra_dict['contrail:instance_count'] = 0 -+ port_back_refs = net_obj.get_virtual_machine_interface_back_refs() -+ if port_back_refs: -+ extra_dict['contrail:instance_count'] = len(port_back_refs) -+ -+ ipam_refs = net_obj.get_network_ipam_refs() -+ net_q_dict['subnets'] = [] -+ if ipam_refs: -+ extra_dict['contrail:subnet_ipam'] = [] -+ for ipam_ref in ipam_refs: -+ subnets = ipam_ref['attr'].get_ipam_subnets() -+ for subnet in subnets: -+ sn_info = self._subnet_vnc_to_neutron(subnet, net_obj, -+ ipam_ref['to']) -+ sn_dict = sn_info['q_api_data'] -+ sn_dict.update(sn_info['q_extra_data']) -+ net_q_dict['subnets'].append(sn_dict) -+ sn_ipam = {} -+ sn_ipam['subnet_cidr'] = sn_dict['cidr'] -+ sn_ipam['ipam_fq_name'] = ipam_ref['to'] -+ extra_dict['contrail:subnet_ipam'].append(sn_ipam) -+ -+ return {'q_api_data': net_q_dict, -+ 'q_extra_data': extra_dict} -+ -+ def _subnet_neutron_to_vnc(self, subnet_q): -+ cidr = subnet_q['cidr'].split('/') -+ pfx = cidr[0] -+ pfx_len = int(cidr[1]) -+ if subnet_q['gateway_ip'] != attr.ATTR_NOT_SPECIFIED: -+ default_gw = subnet_q['gateway_ip'] -+ else: -+ # Assigned by address manager -+ default_gw = None -+ sub_net = vnc_api.SubnetType(ip_prefix=pfx, -+ ip_prefix_len=pfx_len) -+ #subnet_vnc = vnc_api.IpamSubnetType( -+ #subnet=vnc_api.SubnetType(pfx, pfx_len), -+ #default_gateway=default_gw) -+ subnet_vnc = vnc_api.IpamSubnetType(subnet=sub_net, -+ default_gateway=default_gw) -+ return subnet_vnc -+ -+ def _subnet_vnc_to_neutron(self, subnet_vnc, net_obj, ipam_fq_name): -+ sn_q_dict = {} -+ sn_q_dict['name'] = '' -+ sn_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '') -+ sn_q_dict['network_id'] = net_obj.uuid -+ sn_q_dict['ip_version'] = 4 -+ -+ cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(), -+ subnet_vnc.subnet.get_ip_prefix_len()) -+ sn_q_dict['cidr'] = cidr -+ -+ subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_obj.uuid) -+ sn_id = self._subnet_vnc_read_or_create_mapping(key=subnet_key) -+ -+ sn_q_dict['id'] = sn_id -+ -+ sn_q_dict['gateway_ip'] = subnet_vnc.default_gateway -+ -+ first_ip = str(IPNetwork(cidr).network + 1) -+ last_ip = str(IPNetwork(cidr).broadcast - 2) -+ sn_q_dict['allocation_pools'] = \ -+ [{'id': 'TODO-allocation_pools-id', -+ 'subnet_id': sn_id, -+ 'first_ip': first_ip, -+ 'last_ip': last_ip, -+ 'available_ranges': {}}] -+ -+ sn_q_dict['enable_dhcp'] = False -+ sn_q_dict['dns_nameservers'] = [{'address': '169.254.169.254', -+ 'subnet_id': sn_id}] -+ -+ sn_q_dict['routes'] = [{'destination': 'TODO-destination', -+ 'nexthop': 'TODO-nexthop', -+ 'subnet_id': sn_id}] -+ -+ sn_q_dict['shared'] = False -+ -+ extra_dict = {} -+ extra_dict['contrail:instance_count'] = 0 -+ extra_dict['contrail:ipam_fq_name'] = ipam_fq_name -+ -+ return {'q_api_data': sn_q_dict, -+ 'q_extra_data': extra_dict} -+ -+ def _ipam_neutron_to_vnc(self, ipam_q, oper): -+ ipam_name = ipam_q.get('name', None) -+ if oper == CREATE: -+ project_id = str(uuid.UUID(ipam_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ ipam_obj = vnc_api.NetworkIpam(ipam_name, project_obj) -+ else: # READ/UPDATE/DELETE -+ ipam_obj = self._vnc_lib.network_ipam_read(id=ipam_q['id']) -+ -+ if ipam_q['mgmt']: -+ ipam_obj.set_network_ipam_mgmt( -+ vnc_api.IpamType.factory(**ipam_q['mgmt'])) -+ -+ return ipam_obj -+ -+ def _ipam_vnc_to_neutron(self, ipam_obj): -+ ipam_q_dict = json.loads(json.dumps(ipam_obj, -+ default=self._obj_to_json)) -+ -+ # replace field names -+ ipam_q_dict['id'] = ipam_q_dict.pop('uuid') -+ ipam_q_dict['tenant_id'] = ipam_obj.parent_uuid.replace('-', '') -+ ipam_q_dict['mgmt'] = ipam_q_dict.pop('network_ipam_mgmt', None) -+ net_back_refs = ipam_q_dict.pop('virtual_network_back_refs', None) -+ if net_back_refs: -+ ipam_q_dict['nets_using'] = [] -+ for net_back_ref in net_back_refs: -+ net_fq_name = net_back_ref['to'] -+ ipam_q_dict['nets_using'].append(net_fq_name) -+ -+ return {'q_api_data': ipam_q_dict, -+ 'q_extra_data': {}} -+ -+ def _policy_neutron_to_vnc(self, policy_q, oper): -+ policy_name = policy_q.get('name', None) -+ if oper == CREATE: -+ project_id = str(uuid.UUID(policy_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ policy_obj = vnc_api.NetworkPolicy(policy_name, project_obj) -+ else: # READ/UPDATE/DELETE -+ policy_obj = self._vnc_lib.network_policy_read(id=policy_q['id']) -+ -+ policy_obj.set_network_policy_entries( -+ vnc_api.PolicyEntriesType.factory(**policy_q['entries'])) -+ -+ return policy_obj -+ -+ def _policy_vnc_to_neutron(self, policy_obj): -+ policy_q_dict = json.loads(json.dumps(policy_obj, -+ default=self._obj_to_json)) -+ -+ # replace field names -+ policy_q_dict['id'] = policy_q_dict.pop('uuid') -+ policy_q_dict['tenant_id'] = policy_obj.uuid.replace('-', '') -+ policy_q_dict['entries'] = policy_q_dict.pop('network_policy_entries', -+ None) -+ net_back_refs = policy_q_dict.pop('virtual_network_back_refs', None) -+ if net_back_refs: -+ policy_q_dict['nets_using'] = [] -+ for net_back_ref in net_back_refs: -+ net_fq_name = net_back_ref['to'] -+ policy_q_dict['nets_using'].append(net_fq_name) -+ -+ return {'q_api_data': policy_q_dict, -+ 'q_extra_data': {}} -+ -+ def _floatingip_neutron_to_vnc(self, fip_q, oper): -+ if oper == CREATE: -+ # use first available pool on net -+ net_id = fip_q['floating_network_id'] -+ fq_name = self._fip_pool_list_network(net_id)[0]['fq_name'] -+ fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fq_name) -+ fip_name = str(uuid.uuid4()) -+ fip_obj = vnc_api.FloatingIp(fip_name, fip_pool_obj) -+ fip_obj.uuid = fip_name -+ -+ proj_id = str(uuid.UUID(fip_q['tenant_id'])) -+ proj_obj = self._project_read(proj_id=proj_id) -+ fip_obj.set_project(proj_obj) -+ else: # READ/UPDATE/DELETE -+ fip_obj = self._vnc_lib.floating_ip_read(id=fip_q['id']) -+ -+ if fip_q['port_id']: -+ port_obj = self._virtual_machine_interface_read( -+ port_id=fip_q['port_id']) -+ fip_obj.set_virtual_machine_interface(port_obj) -+ else: -+ fip_obj.set_virtual_machine_interface_list([]) -+ -+ return fip_obj -+ -+ def _floatingip_vnc_to_neutron(self, fip_obj): -+ fip_q_dict = {} -+ extra_dict = {} -+ -+ fip_pool_obj = self._vnc_lib.floating_ip_pool_read( -+ id=fip_obj.parent_uuid) -+ net_obj = self._virtual_network_read(net_id=fip_pool_obj.parent_uuid) -+ -+ tenant_id = fip_obj.get_project_refs()[0]['uuid'].replace('-', '') -+ -+ port_id = None -+ port_refs = fip_obj.get_virtual_machine_interface_refs() -+ if port_refs: -+ port_id = fip_obj.get_virtual_machine_interface_refs()[0]['uuid'] -+ -+ fip_q_dict['id'] = fip_obj.uuid -+ fip_q_dict['tenant_id'] = tenant_id -+ fip_q_dict['floating_ip_address'] = fip_obj.get_floating_ip_address() -+ fip_q_dict['floating_network_id'] = net_obj.uuid -+ fip_q_dict['router_id'] = None -+ fip_q_dict['fixed_port_id'] = port_id -+ fip_q_dict['fixed_ip_address'] = None -+ -+ return {'q_api_data': fip_q_dict, -+ 'q_extra_data': extra_dict} -+ -+ def _port_neutron_to_vnc(self, port_q, net_obj, oper): -+ if oper == CREATE: -+ port_name = str(uuid.uuid4()) -+ instance_name = port_q['device_id'] -+ instance_obj = vnc_api.VirtualMachine(instance_name) -+ -+ id_perms = vnc_api.IdPermsType(enable=True) -+ port_obj = vnc_api.VirtualMachineInterface(port_name, instance_obj, -+ id_perms=id_perms) -+ port_obj.uuid = port_name -+ port_obj.set_virtual_network(net_obj) -+ -+ else: # READ/UPDATE/DELETE -+ port_obj = self._virtual_machine_interface_read( -+ port_id=port_q['id']) -+ -+ port_obj.set_security_group_list([]) -+ if ('security_groups' in port_q and -+ port_q['security_groups'].__class__ is not object): -+ for sg_id in port_q['security_groups']: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ port_obj.add_security_group(sg_obj) -+ -+ id_perms = port_obj.get_id_perms() -+ if 'admin_state_up' in port_q: -+ id_perms.enable = port_q['admin_state_up'] -+ port_obj.set_id_perms(id_perms) -+ -+ return port_obj -+ -+ def _port_vnc_to_neutron(self, port_obj, net_obj=None): -+ port_q_dict = {} -+ port_q_dict['name'] = port_obj.uuid -+ port_q_dict['id'] = port_obj.uuid -+ port_q_dict[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_CONTRAIL -+ -+ if not net_obj: -+ net_refs = port_obj.get_virtual_network_refs() -+ if net_refs: -+ net_id = net_refs[0]['uuid'] -+ else: -+ net_id = self._vnc_lib.obj_to_id(vnc_api.VirtualNetwork()) -+ -+ #proj_id = self._get_obj_tenant_id('port', port_obj.uuid) -+ proj_id = None -+ if not proj_id: -+ # not in cache, get by reading VN obj, and populate cache -+ net_obj = self._virtual_network_read(net_id=net_id) -+ proj_id = net_obj.parent_uuid.replace('-', '') -+ self._set_obj_tenant_id(port_obj.uuid, proj_id) -+ else: -+ net_id = net_obj.uuid -+ proj_id = net_obj.parent_uuid.replace('-', '') -+ -+ port_q_dict['tenant_id'] = proj_id -+ port_q_dict['network_id'] = net_id -+ -+ port_q_dict['mac_address'] = '' -+ mac_refs = port_obj.get_virtual_machine_interface_mac_addresses() -+ if mac_refs: -+ port_q_dict['mac_address'] = mac_refs.mac_address[0] -+ -+ port_q_dict['fixed_ips'] = [] -+ ip_back_refs = port_obj.get_instance_ip_back_refs() -+ if ip_back_refs: -+ for ip_back_ref in ip_back_refs: -+ try: -+ ip_obj = self._instance_ip_read( -+ instance_ip_id=ip_back_ref['uuid']) -+ except vnc_exc.NoIdError: -+ continue -+ -+ ip_addr = ip_obj.get_instance_ip_address() -+ -+ ip_q_dict = {} -+ ip_q_dict['port_id'] = port_obj.uuid -+ ip_q_dict['ip_address'] = ip_addr -+ ip_q_dict['subnet_id'] = self._ip_address_to_subnet_id(ip_addr, -+ net_obj) -+ ip_q_dict['net_id'] = net_id -+ -+ port_q_dict['fixed_ips'].append(ip_q_dict) -+ -+ sg_dict = {'port_security_enabled': True} -+ sg_dict['security_groups'] = [] -+ sg_refs = port_obj.get_security_group_refs() -+ for sg_ref in sg_refs or []: -+ sg_dict['security_groups'].append(sg_ref['uuid']) -+ -+ port_q_dict['admin_state_up'] = port_obj.get_id_perms().enable -+ port_q_dict['status'] = constants.PORT_STATUS_ACTIVE -+ port_q_dict['device_id'] = port_obj.parent_name -+ port_q_dict['device_owner'] = 'TODO-device-owner' -+ -+ return {'q_api_data': port_q_dict, -+ 'q_extra_data': sg_dict} -+ -+ # public methods -+ # network api handlers -+ def network_create(self, network_q): -+ #self._ensure_project_exists(network_q['tenant_id']) -+ -+ net_obj = self._network_neutron_to_vnc(network_q, CREATE) -+ net_uuid = self._virtual_network_create(net_obj) -+ -+ ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW') -+ self._db_cache['q_networks'][net_uuid] = ret_network_q -+ -+ return ret_network_q -+ -+ def network_read(self, net_uuid, fields=None): -+ # see if we can return fast... -+ if fields and (len(fields) == 1) and fields[0] == 'tenant_id': -+ tenant_id = self._get_obj_tenant_id('network', net_uuid) -+ return {'q_api_data': {'id': net_uuid, 'tenant_id': tenant_id}} -+ -+ try: -+ # return self._db_cache['q_networks']['net_uuid'] -+ raise KeyError -+ except KeyError: -+ pass -+ -+ try: -+ net_obj = self._network_read(net_uuid) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=net_uuid) -+ -+ return self._network_vnc_to_neutron(net_obj, net_repr='SHOW') -+ -+ def network_update(self, net_id, network_q): -+ network_q['id'] = net_id -+ net_obj = self._network_neutron_to_vnc(network_q, UPDATE) -+ self._virtual_network_update(net_obj) -+ -+ ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW') -+ self._db_cache['q_networks'][net_id] = ret_network_q -+ -+ return ret_network_q -+ -+ def network_delete(self, net_id): -+ self._virtual_network_delete(net_id=net_id) -+ try: -+ del self._db_cache['q_networks'][net_id] -+ except KeyError: -+ pass -+ -+ def network_list(self, filters=None): -+ ret_list = [] -+ -+ if filters and 'shared' in filters: -+ if filters['shared'][0] is True: -+ # no support for shared networks -+ return ret_list -+ -+ # collect phase -+ all_nets = [] # all n/ws in all projects -+ if filters and 'tenant_id' in filters: -+ # project-id is present -+ if 'id' in filters: -+ # required networks are also specified, -+ # just read and populate ret_list -+ # prune is skipped because all_nets is empty -+ for net_id in filters['id']: -+ net_obj = self._network_read(net_id) -+ net_info = self._network_vnc_to_neutron(net_obj, -+ net_repr='LIST') -+ ret_list.append(net_info) -+ else: -+ # read all networks in project, and prune below -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ if 'router:external' in filters: -+ all_nets.append(self._fip_pool_ref_networks(p_id)) -+ else: -+ project_nets = self._network_list_project(p_id) -+ all_nets.append(project_nets) -+ elif filters and 'id' in filters: -+ # required networks are specified, just read and populate ret_list -+ # prune is skipped because all_nets is empty -+ for net_id in filters['id']: -+ net_obj = self._network_read(net_id) -+ net_info = self._network_vnc_to_neutron(net_obj, -+ net_repr='LIST') -+ ret_list.append(net_info) -+ else: -+ # read all networks in all projects -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ if filters and 'router:external' in filters: -+ all_nets.append(self._fip_pool_ref_networks(proj_id)) -+ else: -+ project_nets = self._network_list_project(proj_id) -+ all_nets.append(project_nets) -+ -+ # prune phase -+ for project_nets in all_nets: -+ for proj_net in project_nets: -+ proj_net_id = proj_net['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_net_id): -+ continue -+ -+ proj_net_fq_name = unicode(proj_net['fq_name']) -+ if not self._filters_is_present(filters, 'contrail:fq_name', -+ proj_net_fq_name): -+ continue -+ -+ try: -+ net_obj = self._network_read(proj_net['uuid']) -+ net_info = self._network_vnc_to_neutron(net_obj, -+ net_repr='LIST') -+ except vnc_exc.NoIdError: -+ continue -+ ret_list.append(net_info) -+ -+ return ret_list -+ -+ def network_count(self, filters=None): -+ nets_info = self.network_list(filters) -+ return len(nets_info) -+ -+ # subnet api handlers -+ def subnet_create(self, subnet_q): -+ net_id = subnet_q['network_id'] -+ net_obj = self._virtual_network_read(net_id=net_id) -+ -+ ipam_fq_name = subnet_q['contrail:ipam_fq_name'] -+ if ipam_fq_name != '': -+ domain_name, project_name, ipam_name = ipam_fq_name -+ -+ project_obj = vnc_api.Project(project_name) -+ netipam_obj = vnc_api.NetworkIpam(ipam_name, project_obj) -+ else: # link subnet with default ipam -+ project_obj = vnc_api.Project(net_obj.parent_name) -+ netipam_obj = vnc_api.NetworkIpam(project_obj=project_obj) -+ ipam_fq_name = netipam_obj.get_fq_name() -+ -+ subnet_vnc = self._subnet_neutron_to_vnc(subnet_q) -+ subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_id) -+ -+ # Locate list of subnets to which this subnet has to be appended -+ net_ipam_ref = None -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ if ipam_ref['to'] == ipam_fq_name: -+ net_ipam_ref = ipam_ref -+ break -+ -+ if not net_ipam_ref: -+ # First link from net to this ipam -+ vnsn_data = vnc_api.VnSubnetsType(ipam_subnets=[subnet_vnc]) -+ net_obj.add_network_ipam(netipam_obj, vnsn_data) -+ else: # virtual-network already linked to this ipam -+ for subnet in net_ipam_ref['attr'].get_ipam_subnets(): -+ if subnet_key == self._subnet_vnc_get_key(subnet, net_id): -+ # duplicate !! -+ subnet_info = self._subnet_vnc_to_neutron(subnet, -+ net_obj, -+ ipam_fq_name) -+ return subnet_info -+ vnsn_data = net_ipam_ref['attr'] -+ vnsn_data.ipam_subnets.append(subnet_vnc) -+ -+ self._virtual_network_update(net_obj) -+ -+ # allocate an id to the subnet and store mapping with -+ # api-server -+ subnet_id = str(uuid.uuid4()) -+ self._subnet_vnc_create_mapping(subnet_id, subnet_key) -+ -+ # Read in subnet from server to get updated values for gw etc. -+ subnet_vnc = self._subnet_read(net_obj.uuid, subnet_key) -+ subnet_info = self._subnet_vnc_to_neutron(subnet_vnc, net_obj, -+ ipam_fq_name) -+ -+ #self._db_cache['q_subnets'][subnet_id] = subnet_info -+ -+ return subnet_info -+ -+ def subnet_read(self, subnet_id): -+ try: -+ # return self._db_cache['q_subnets'][subnet_id] -+ raise KeyError -+ except KeyError: -+ pass -+ -+ subnet_key = self._subnet_vnc_read_mapping(id=subnet_id) -+ net_id = subnet_key.split()[0] -+ -+ net_obj = self._network_read(net_id) -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ if self._subnet_vnc_get_key(subnet_vnc, -+ net_id) == subnet_key: -+ ret_subnet_q = self._subnet_vnc_to_neutron( -+ subnet_vnc, net_obj, ipam_ref['to']) -+ self._db_cache['q_subnets'][subnet_id] = ret_subnet_q -+ return ret_subnet_q -+ -+ return {} -+ -+ def subnet_delete(self, subnet_id): -+ subnet_key = self._subnet_vnc_read_mapping(id=subnet_id) -+ net_id = subnet_key.split()[0] -+ -+ net_obj = self._network_read(net_id) -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ orig_subnets = ipam_ref['attr'].get_ipam_subnets() -+ new_subnets = [subnet_vnc for subnet_vnc in orig_subnets -+ if self._subnet_vnc_get_key(subnet_vnc, net_id) -+ != subnet_key] -+ if len(orig_subnets) != len(new_subnets): -+ # matched subnet to be deleted -+ ipam_ref['attr'].set_ipam_subnets(new_subnets) -+ self._virtual_network_update(net_obj) -+ self._subnet_vnc_delete_mapping(subnet_id, subnet_key) -+ try: -+ del self._db_cache['q_subnets'][subnet_id] -+ except KeyError: -+ pass -+ -+ return -+ -+ def subnets_list(self, filters=None): -+ ret_subnets = [] -+ -+ if filters and 'id' in filters: -+ # required subnets are specified, -+ # just read in corresponding net_ids -+ net_ids = set([]) -+ for subnet_id in filters['id']: -+ subnet_key = self._subnet_vnc_read_mapping(id=subnet_id) -+ net_id = subnet_key.split()[0] -+ net_ids.add(net_id) -+ else: -+ nets_info = self.network_list() -+ net_ids = [n_info['q_api_data']['id'] for n_info in nets_info] -+ -+ for net_id in net_ids: -+ net_obj = self._network_read(net_id) -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ sn_info = self._subnet_vnc_to_neutron(subnet_vnc, -+ net_obj, -+ ipam_ref['to']) -+ sn_id = sn_info['q_api_data']['id'] -+ sn_proj_id = sn_info['q_api_data']['tenant_id'] -+ sn_net_id = sn_info['q_api_data']['network_id'] -+ -+ if filters: -+ if not self._filters_is_present(filters, 'id', -+ sn_id): -+ continue -+ if not self._filters_is_present(filters, -+ 'tenant_id', -+ sn_proj_id): -+ continue -+ if not self._filters_is_present(filters, -+ 'network_id', -+ sn_net_id): -+ continue -+ -+ ret_subnets.append(sn_info) -+ -+ return ret_subnets -+ -+ def subnets_count(self, filters=None): -+ subnets_info = self.subnets_list(filters) -+ return len(subnets_info) -+ -+ # ipam api handlers -+ def ipam_create(self, ipam_q): -+ ipam_obj = self._ipam_neutron_to_vnc(ipam_q, CREATE) -+ self._vnc_lib.network_ipam_create(ipam_obj) -+ -+ return self._ipam_vnc_to_neutron(ipam_obj) -+ -+ def ipam_read(self, ipam_id): -+ try: -+ ipam_obj = self._vnc_lib.network_ipam_read(id=ipam_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=ipam_id) -+ -+ return self._ipam_vnc_to_neutron(ipam_obj) -+ -+ def ipam_update(self, ipam_id, ipam): -+ ipam_q = ipam['ipam'] -+ ipam_q['id'] = ipam_id -+ ipam_obj = self._ipam_neutron_to_vnc(ipam_q, UPDATE) -+ self._vnc_lib.network_ipam_update(ipam_obj) -+ -+ return self._ipam_vnc_to_neutron(ipam_obj) -+ -+ def ipam_delete(self, ipam_id): -+ self._vnc_lib.network_ipam_delete(id=ipam_id) -+ -+ def ipam_list(self, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ all_ipams = [] # all ipams in all projects -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ project_ipams = self._ipam_list_project(p_id) -+ all_ipams.append(project_ipams) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_ipams = self._ipam_list_project(proj_id) -+ all_ipams.append(project_ipams) -+ -+ # prune phase -+ for project_ipams in all_ipams: -+ for proj_ipam in project_ipams: -+ proj_ipam_id = proj_ipam['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_ipam_id): -+ continue -+ ipam_info = self.ipam_read(proj_ipam['uuid']) -+ ret_list.append(ipam_info) -+ -+ return ret_list -+ -+ def ipam_count(self, filters=None): -+ ipam_info = self.ipam_list(filters) -+ return len(ipam_info) -+ -+ # policy api handlers -+ def policy_create(self, policy_q): -+ -+ policy_obj = self._policy_neutron_to_vnc(policy_q, CREATE) -+ self._vnc_lib.network_policy_create(policy_obj) -+ -+ return self._policy_vnc_to_neutron(policy_obj) -+ -+ def policy_read(self, policy_id): -+ policy_obj = self._vnc_lib.network_policy_read(id=policy_id) -+ -+ return self._policy_vnc_to_neutron(policy_obj) -+ -+ def policy_update(self, policy_id, policy): -+ policy_q = policy['policy'] -+ policy_q['id'] = policy_id -+ policy_obj = self._policy_neutron_to_vnc(policy_q, UPDATE) -+ self._vnc_lib.network_policy_update(policy_obj) -+ -+ return self._policy_vnc_to_neutron(policy_obj) -+ -+ def policy_delete(self, policy_id): -+ self._vnc_lib.network_policy_delete(id=policy_id) -+ -+ def policy_list(self, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ all_policys = [] # all policys in all projects -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ project_policys = self._policy_list_project(p_id) -+ all_policys.append(project_policys) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_policys = self._policy_list_project(proj_id) -+ all_policys.append(project_policys) -+ -+ # prune phase -+ for project_policys in all_policys: -+ for proj_policy in project_policys: -+ proj_policy_id = proj_policy['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_policy_id): -+ continue -+ policy_info = self.policy_read(proj_policy['uuid']) -+ ret_list.append(policy_info) -+ -+ return ret_list -+ -+ def policy_count(self, filters=None): -+ policy_info = self.policy_list(filters) -+ return len(policy_info) -+ -+ # floatingip api handlers -+ def floatingip_create(self, fip_q): -+ fip_obj = self._floatingip_neutron_to_vnc(fip_q, CREATE) -+ fip_uuid = self._vnc_lib.floating_ip_create(fip_obj) -+ fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid) -+ -+ return self._floatingip_vnc_to_neutron(fip_obj) -+ -+ def floatingip_read(self, fip_uuid): -+ fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid) -+ -+ return self._floatingip_vnc_to_neutron(fip_obj) -+ -+ def floatingip_update(self, fip_id, fip_q): -+ fip_q['id'] = fip_id -+ fip_obj = self._floatingip_neutron_to_vnc(fip_q, UPDATE) -+ self._vnc_lib.floating_ip_update(fip_obj) -+ -+ return self._floatingip_vnc_to_neutron(fip_obj) -+ -+ def floatingip_delete(self, fip_id): -+ self._vnc_lib.floating_ip_delete(id=fip_id) -+ -+ def floatingip_list(self, filters=None): -+ # Find networks, get floatingip backrefs and return -+ ret_list = [] -+ -+ if filters: -+ if 'tenant_id' in filters: -+ proj_ids = [str(uuid.UUID(id)) for id in filters['tenant_id']] -+ elif 'port_id' in filters: -+ # required ports are specified, just read and populate ret_list -+ # prune is skipped because proj_objs is empty -+ proj_ids = [] -+ for port_id in filters['port_id']: -+ port_obj = self._virtual_machine_interface_read( -+ port_id=port_id) -+ fip_back_refs = port_obj.get_floating_ip_back_refs() -+ if not fip_back_refs: -+ continue -+ for fip_back_ref in fip_back_refs: -+ fip_obj = self._vnc_lib.floating_ip_read( -+ id=fip_back_ref['uuid']) -+ ret_list.append(self._floatingip_vnc_to_neutron( -+ fip_obj)) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ proj_ids = [proj['uuid'] for proj in dom_projects] -+ -+ proj_objs = [self._project_read(proj_id=id) for id in proj_ids] -+ -+ for proj_obj in proj_objs: -+ fip_back_refs = proj_obj.get_floating_ip_back_refs() -+ if not fip_back_refs: -+ continue -+ for fip_back_ref in fip_back_refs: -+ fip_obj = self._vnc_lib.floating_ip_read( -+ id=fip_back_ref['uuid']) -+ ret_list.append(self._floatingip_vnc_to_neutron(fip_obj)) -+ -+ return ret_list -+ -+ def floatingip_count(self, filters=None): -+ floatingip_info = self.floatingip_list(filters) -+ return len(floatingip_info) -+ -+ # port api handlers -+ def port_create(self, port_q): -+ net_id = port_q['network_id'] -+ net_obj = self._network_read(net_id) -+ proj_id = net_obj.parent_uuid -+ -+ self._ensure_instance_exists(port_q['device_id']) -+ -+ # initialize port object -+ port_obj = self._port_neutron_to_vnc(port_q, net_obj, CREATE) -+ -+ # if ip address passed then use it -+ ip_addr = None -+ ip_obj = None -+ if port_q['fixed_ips'].__class__ is not object: -+ ip_addr = port_q['fixed_ips'][0]['ip_address'] -+ ip_name = '%s %s' % (net_id, ip_addr) -+ try: -+ ip_obj = self._instance_ip_read(fq_name=[ip_name]) -+ #ip_id = ip_obj.uuid -+ except Exception as e: -+ ip_obj = None -+ -+ # create the object -+ port_id = self._virtual_machine_interface_create(port_obj) -+ -+ # initialize ip object -+ if ip_obj is None: -+ ip_name = str(uuid.uuid4()) -+ ip_obj = vnc_api.InstanceIp(name=ip_name) -+ ip_obj.uuid = ip_name -+ ip_obj.set_virtual_machine_interface(port_obj) -+ ip_obj.set_virtual_network(net_obj) -+ if ip_addr: -+ ip_obj.set_instance_ip_address(ip_addr) -+ try: -+ self._instance_ip_create(ip_obj) -+ except Exception as e: -+ # ResourceExhaustionError, resources are not available -+ self._virtual_machine_interface_delete(port_id=port_id) -+ raise e -+ # shared ip address -+ else: -+ if ip_addr == ip_obj.get_instance_ip_address(): -+ ip_obj.add_virtual_machine_interface(port_obj) -+ self._instance_ip_update(ip_obj) -+ -+ port_obj = self._virtual_machine_interface_read(port_id=port_id) -+ -+ ret_port_q = self._port_vnc_to_neutron(port_obj, net_obj) -+ #self._db_cache['q_ports'][port_id] = ret_port_q -+ self._set_obj_tenant_id(port_id, proj_id) -+ -+ # update cache on successful creation -+ tenant_id = proj_id.replace('-', '') -+ if tenant_id not in self._db_cache['q_tenant_port_count']: -+ ncurports = self.port_count({'tenant_id': tenant_id}) -+ else: -+ ncurports = self._db_cache['q_tenant_port_count'][tenant_id] -+ -+ self._db_cache['q_tenant_port_count'][tenant_id] = ncurports + 1 -+ -+ return ret_port_q -+ -+ def port_read(self, port_id): -+ try: -+ # return self._db_cache['q_ports'][port_id] -+ raise KeyError -+ except KeyError: -+ pass -+ -+ port_obj = self._virtual_machine_interface_read(port_id=port_id) -+ -+ ret_port_q = self._port_vnc_to_neutron(port_obj) -+ self._db_cache['q_ports'][port_id] = ret_port_q -+ -+ return ret_port_q -+ -+ def port_update(self, port_id, port_q): -+ port_q['id'] = port_id -+ port_obj = self._port_neutron_to_vnc(port_q, None, UPDATE) -+ self._virtual_machine_interface_update(port_obj) -+ -+ ret_port_q = self._port_vnc_to_neutron(port_obj) -+ self._db_cache['q_ports'][port_id] = ret_port_q -+ -+ return ret_port_q -+ -+ def port_delete(self, port_id): -+ port_obj = self._port_neutron_to_vnc({'id': port_id}, None, READ) -+ instance_id = port_obj.parent_uuid -+ -+ # release instance IP address -+ iip_back_refs = port_obj.get_instance_ip_back_refs() -+ if iip_back_refs: -+ for iip_back_ref in iip_back_refs: -+ # if name contains IP address then this is shared ip -+ iip_obj = self._vnc_lib.instance_ip_read( -+ id=iip_back_ref['uuid']) -+ name = iip_obj.name -+ if len(name.split(' ')) > 1: -+ name = name.split(' ')[1] -+ -+ # in case of shared ip only delete the link to the VMI -+ try: -+ socket.inet_aton(name) -+ iip_obj.del_virtual_machine_interface(port_obj) -+ self._instance_ip_update(iip_obj) -+ except socket.error: -+ self._instance_ip_delete( -+ instance_ip_id=iip_back_ref['uuid']) -+ -+ # disassociate any floating IP used by instance -+ fip_back_refs = port_obj.get_floating_ip_back_refs() -+ if fip_back_refs: -+ for fip_back_ref in fip_back_refs: -+ fip_obj = self._vnc_lib.floating_ip_read( -+ id=fip_back_ref['uuid']) -+ self.floatingip_update(fip_obj.uuid, {'port_id': None}) -+ -+ self._virtual_machine_interface_delete(port_id=port_id) -+ -+ # delete instance if this was the last port -+ inst_obj = self._vnc_lib.virtual_machine_read(id=instance_id) -+ inst_intfs = inst_obj.get_virtual_machine_interfaces() -+ if not inst_intfs: -+ self._vnc_lib.virtual_machine_delete(id=inst_obj.uuid) -+ -+ try: -+ del self._db_cache['q_ports'][port_id] -+ except KeyError: -+ pass -+ -+ # update cache on successful deletion -+ try: -+ tenant_id = self._get_obj_tenant_id('port', port_id) -+ self._db_cache['q_tenant_port_count'][tenant_id] = \ -+ self._db_cache['q_tenant_port_count'][tenant_id] - 1 -+ except KeyError: -+ pass -+ -+ self._del_obj_tenant_id(port_id) -+ -+ def port_list(self, filters=None): -+ ret_q_ports = [] -+ all_project_ids = [] -+ -+ if 'device_owner' in filters: -+ return ret_q_ports -+ -+ if 'device_id' not in filters: -+ # Listing from back references -+ if not filters: -+ # no filters => return all ports! -+ all_projects = self._project_list_domain(None) -+ all_project_ids = [project['uuid'] for project in all_projects] -+ elif 'tenant_id' in filters: -+ all_project_ids = filters.get('tenant_id') -+ -+ for proj_id in all_project_ids: -+ proj_ports = self._port_list_project(proj_id) -+ for port in proj_ports: -+ try: -+ port_info = self.port_read(port['id']) -+ except vnc_exc.NoIdError: -+ continue -+ ret_q_ports.append(port_info) -+ -+ for net_id in filters.get('network_id', []): -+ net_ports = self._port_list_network(net_id) -+ for port in net_ports: -+ port_info = self.port_read(port['id']) -+ ret_q_ports.append(port_info) -+ -+ return ret_q_ports -+ -+ # Listing from parent to children -+ virtual_machine_ids = filters['device_id'] -+ for vm_id in virtual_machine_ids: -+ resp_dict = self._vnc_lib.virtual_machine_interfaces_list( -+ parent_id=vm_id) -+ vm_intf_ids = resp_dict['virtual-machine-interfaces'] -+ for vm_intf in vm_intf_ids: -+ try: -+ port_info = self.port_read(vm_intf['uuid']) -+ except vnc_exc.NoIdError: -+ continue -+ ret_q_ports.append(port_info) -+ -+ return ret_q_ports -+ -+ def port_count(self, filters=None): -+ if 'device_owner' in filters: -+ return 0 -+ -+ if 'tenant_id' in filters: -+ project_id = filters['tenant_id'][0] -+ try: -+ return self._db_cache['q_tenant_port_count'][project_id] -+ except KeyError: -+ # do it the hard way but remember for next time -+ nports = len(self._port_list_project(project_id)) -+ self._db_cache['q_tenant_port_count'][project_id] = nports -+ else: -+ # across all projects -+ # get only a count from api-server! -+ nports = len(self.port_list(filters)) -+ -+ return nports -+ -+ # security group api handlers -+ def security_group_create(self, sg_q): -+ sg_obj = self._security_group_neutron_to_vnc(sg_q, CREATE) -+ sg_uuid = self._security_group_create(sg_obj) -+ -+ #allow all egress traffic -+ def_rule = {} -+ def_rule['port_range_min'] = 0 -+ def_rule['port_range_max'] = 65535 -+ def_rule['direction'] = 'egress' -+ def_rule['remote_ip_prefix'] = None -+ def_rule['remote_group_id'] = None -+ def_rule['protocol'] = 'any' -+ rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE) -+ self._security_group_rule_create(sg_uuid, rule) -+ -+ ret_sg_q = self._security_group_vnc_to_neutron(sg_obj) -+ return ret_sg_q -+ -+ def security_group_read(self, sg_id): -+ try: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=sg_id) -+ -+ return self._security_group_vnc_to_neutron(sg_obj) -+ -+ def security_group_delete(self, sg_id): -+ self._security_group_delete(sg_id) -+ -+ def security_group_list(self, context, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ all_sgs = [] # all sgs in all projects -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ project_sgs = self._security_group_list_project(p_id) -+ all_sgs.append(project_sgs) -+ elif filters and 'name' in filters: -+ p_id = str(uuid.UUID(context.tenant)) -+ project_sgs = self._security_group_list_project(p_id) -+ all_sgs.append(project_sgs) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_sgs = self._security_group_list_project(proj_id) -+ all_sgs.append(project_sgs) -+ -+ # prune phase -+ for project_sgs in all_sgs: -+ for proj_sg in project_sgs: -+ proj_sg_id = proj_sg['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_sg_id): -+ continue -+ sg_info = self.security_group_read(proj_sg_id) -+ if not self._filters_is_present(filters, 'name', -+ sg_info['q_api_data']['name']): -+ continue -+ ret_list.append(sg_info) -+ -+ return ret_list -+ -+ def security_group_rule_create(self, sgr_q): -+ sg_id = sgr_q['security_group_id'] -+ sg_rule = self._security_group_rule_neutron_to_vnc(sgr_q, CREATE) -+ self._security_group_rule_create(sg_id, sg_rule) -+ ret_sg_rule_q = self._security_group_rule_vnc_to_neutron(sg_id, -+ sg_rule) -+ -+ return ret_sg_rule_q -+ -+ def security_group_rule_read(self, sgr_id): -+ sg_obj, sg_rule = self._security_group_rule_find(sgr_id) -+ if sg_obj and sg_rule: -+ return self._security_group_rule_vnc_to_neutron(sg_obj.uuid, -+ sg_rule) -+ -+ return {} -+ -+ def security_group_rule_delete(self, sgr_id): -+ sg_obj, sg_rule = self._security_group_rule_find(sgr_id) -+ if sg_obj and sg_rule: -+ return self._security_group_rule_delete(sg_obj, sg_rule) -+ -+ def security_group_rules_read(self, sg_id): -+ try: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ sgr_entries = sg_obj.get_security_group_entries() -+ sg_rules = [] -+ if sgr_entries is None: -+ return -+ -+ for sg_rule in sgr_entries.get_policy_rule(): -+ sg_info = self._security_group_rule_vnc_to_neutron(sg_obj.uuid, -+ sg_rule) -+ sg_rules.append(sg_info) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=sg_id) -+ -+ return sg_rules -+ -+ def security_group_rule_list(self, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ all_sgs = [] -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ project_sgs = self._security_group_list_project(p_id) -+ all_sgs.append(project_sgs) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_sgs = self._security_group_list_project(proj_id) -+ all_sgs.append(project_sgs) -+ -+ # prune phase -+ for project_sgs in all_sgs: -+ for proj_sg in project_sgs: -+ proj_sg_id = proj_sg['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_sg_id): -+ continue -+ sgr_info = self.security_group_rules_read(proj_sg_id) -+ if sgr_info: -+ ret_list.append(sgr_info) -+ -+ return ret_list -+ -+ #route table api handlers -+ def route_table_create(self, rt_q): -+ rt_obj = self._route_table_neutron_to_vnc(rt_q, CREATE) -+ self._route_table_create(rt_obj) -+ ret_rt_q = self._route_table_vnc_to_neutron(rt_obj) -+ return ret_rt_q -+ -+ def route_table_read(self, rt_id): -+ try: -+ rt_obj = self._vnc_lib.route_table_read(id=rt_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=rt_id) -+ -+ return self._route_table_vnc_to_neutron(rt_obj) -+ -+ def route_table_update(self, rt_id, rt_q): -+ rt_q['id'] = rt_id -+ rt_obj = self._route_table_neutron_to_vnc(rt_q, UPDATE) -+ self._vnc_lib.route_table_update(rt_obj) -+ return self._route_table_vnc_to_neutron(rt_obj) -+ -+ def route_table_delete(self, rt_id): -+ self._route_table_delete(rt_id) -+ -+ def route_table_list(self, context, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ all_rts = [] # all rts in all projects -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ project_rts = self._route_table_list_project(p_id) -+ all_rts.append(project_rts) -+ elif filters and 'name' in filters: -+ p_id = str(uuid.UUID(context.tenant)) -+ project_rts = self._route_table_list_project(p_id) -+ all_rts.append(project_rts) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_rts = self._route_table_list_project(proj_id) -+ all_rts.append(project_rts) -+ -+ # prune phase -+ for project_rts in all_rts: -+ for proj_rt in project_rts: -+ proj_rt_id = proj_rt['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_rt_id): -+ continue -+ rt_info = self.route_table_read(proj_rt_id) -+ if not self._filters_is_present(filters, 'name', -+ rt_info['q_api_data']['name']): -+ continue -+ ret_list.append(rt_info) -+ -+ return ret_list -+ -+ #service instance api handlers -+ def svc_instance_create(self, si_q): -+ si_obj = self._svc_instance_neutron_to_vnc(si_q, CREATE) -+ self._svc_instance_create(si_obj) -+ ret_si_q = self._svc_instance_vnc_to_neutron(si_obj) -+ return ret_si_q -+ -+ def svc_instance_read(self, si_id): -+ try: -+ si_obj = self._vnc_lib.service_instance_read(id=si_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=si_id) -+ -+ return self._svc_instance_vnc_to_neutron(si_obj) -+ -+ def svc_instance_delete(self, si_id): -+ self._svc_instance_delete(si_id) -+ -+ def svc_instance_list(self, context, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ all_sis = [] # all sis in all projects -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ project_sis = self._svc_instance_list_project(p_id) -+ all_sis.append(project_sis) -+ elif filters and 'name' in filters: -+ p_id = str(uuid.UUID(context.tenant)) -+ project_sis = self._svc_instance_list_project(p_id) -+ all_sis.append(project_sis) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_sis = self._svc_instance_list_project(proj_id) -+ all_sis.append(project_sis) -+ -+ # prune phase -+ for project_sis in all_sis: -+ for proj_si in project_sis: -+ proj_si_id = proj_si['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_si_id): -+ continue -+ si_info = self.svc_instance_read(proj_si_id) -+ if not self._filters_is_present(filters, 'name', -+ si_info['q_api_data']['name']): -+ continue -+ ret_list.append(si_info) -+ -+ return ret_list -diff --git neutron/tests/unit/juniper/__init__.py neutron/tests/unit/juniper/__init__.py -new file mode 100644 -index 0000000..72bebec ---- /dev/null -+++ neutron/tests/unit/juniper/__init__.py -@@ -0,0 +1,14 @@ -+# Copyright (c) 2012 OpenStack Foundation. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -diff --git neutron/tests/unit/juniper/test_contrail_plugin.py neutron/tests/unit/juniper/test_contrail_plugin.py -new file mode 100644 -index 0000000..decf79e ---- /dev/null -+++ neutron/tests/unit/juniper/test_contrail_plugin.py -@@ -0,0 +1,998 @@ -+# Copyright (c) 2012 OpenStack Foundation. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import datetime -+import mock -+import neutron.db.api -+from neutron.manager import NeutronManager -+from neutron.tests.unit import test_db_plugin as test_plugin -+import sys -+import uuid -+ -+subnet_obj = {u'subnet': -+ {'name': '', 'enable_dhcp': True, -+ u'network_id': u'b11ffca3-3dfc-435e-ae0e-8f44da7188b7', -+ 'tenant_id': u'8162e75da480419a8b2ae7088dbc14f5', -+ 'dns_nameservers': '', -+ u'contrail:ipam_fq_name': -+ [u'default-domain', u'admin', u'default-network-ipam'], -+ 'allocation_pools': '', 'host_routes': '', u'ip_version': 4, -+ 'gateway_ip': '', u'cidr': u'20.20.1.0/29'}} -+ -+vn_list = [] -+GlobalProjects = [] -+ -+ -+class MockVncApi(mock.MagicMock): -+ def __init__(self, *args, **kwargs): -+ pass -+ -+ def kv_retrieve(self, *args, **kwargs): -+ return [] -+ -+ def kv_store(self, *args, **kwargs): -+ return -+ -+ def kv_delete(self, *args, **kwargs): -+ return -+ -+ def project_read(self, *args, **kwargs): -+ #return MockProject('dummy-proj') -+ return GlobalProjects[0] -+ -+ def virtual_network_create(self, net_obj): -+ net_id = unicode(str(uuid.uuid4())) -+ net_obj.set_uuid(net_id) -+ vn_list.append(net_obj) -+ return net_id -+ -+ def virtual_network_read(self, id, *args, **kwargs): -+ if len(vn_list): -+ for index in range(len(vn_list)): -+ if ((vn_list[index].get_uuid()) == id): -+ return vn_list[index] -+ -+ #return a mock object if it is not created so far -+ return MockVirtualNetwork('dummy-net', MockProject()) -+ -+ def virtual_network_delete(self, *args, **kwargs): -+ return -+ -+ def virtual_network_update(self, *args, **kwargs): -+ return -+ -+ def virtual_networks_list(self, *args, **kwargs): -+ return vn_list -+ -+ -+class MockVncObject(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ if not parent_obj: -+ self._fq_name = [name] -+ else: -+ self._fq_name = parent_obj.get_fq_name() + [name] -+ -+ self._ipam_refs = [{'to': [u'default-domain', u'admin', -+ u'default-network-ipam']}] -+ self.uuid = None -+ self.name = name -+ self.network_ipam_refs = [] -+ -+ def set_uuid(self, uuid): -+ self.uuid = uuid -+ -+ def get_uuid(self): -+ return self.uuid -+ -+ def get_fq_name(self): -+ return self._fq_name -+ -+ def get_network_ipam_refs(self): -+ return getattr(self, 'network_ipam_refs', None) -+ -+ def add_network_ipam(self, ref_obj, ref_data): -+ refs = getattr(self, 'network_ipam_refs', []) -+ if not refs: -+ self.network_ipam_refs = [] -+ -+ # if ref already exists, update any attr with it -+ for ref in refs: -+ if ref['to'] == ref_obj.get_fq_name(): -+ ref = {'to': ref_obj.get_fq_name(), 'attr': ref_data} -+ if ref_obj.uuid: -+ ref['uuid'] = ref_obj.uuid -+ return -+ -+ # ref didn't exist before -+ ref_info = {'to': ref_obj.get_fq_name(), 'attr': ref_data} -+ if ref_obj.uuid: -+ ref_info['uuid'] = ref_obj.uuid -+ -+ self.network_ipam_refs.append(ref_info) -+ -+ -+class MockVirtualNetwork(MockVncObject): -+ pass -+ -+ -+class MockSubnetType(mock.MagicMock): -+ def __init__(self, name=None, ip_prefix=None, ip_prefix_len=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self.ip_prefix = ip_prefix -+ self.ip_prefix_len = ip_prefix_len -+ -+ def get_ip_prefix(self): -+ return self.ip_prefix -+ -+ def set_ip_prefix(self, ip_prefix): -+ self.ip_prefix = ip_prefix -+ -+ def get_ip_prefix_len(self): -+ return self.ip_prefix_len -+ -+ def set_ip_prefix_len(self, ip_prefix_len): -+ self.ip_prefix_len = ip_prefix_len -+ -+ -+class MockIpamSubnetType(mock.MagicMock): -+ def __init__(self, name=None, subnet=None, default_gateway=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self.subnet = subnet -+ self.default_gateway = default_gateway -+ -+ def get_subnet(self): -+ return self.subnet -+ -+ def set_subnet(self, subnet): -+ self.subnet = subnet -+ -+ def get_default_gateway(self): -+ return self.default_gateway -+ -+ def set_default_gateway(self, default_gateway): -+ self.default_gateway = default_gateway -+ -+ def validate_IpAddressType(self, value): -+ pass -+ -+ -+class MockVnSubnetsType(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, ipam_subnets=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self.ipam_subnets = [] -+ if ipam_subnets: -+ #self.ipam_subnets = copy.deepcopy(ipam_subnets) -+ self.ipam_subnets = ipam_subnets -+ -+ def get_ipam_subnets(self): -+ return self.ipam_subnets -+ -+ def set_ipam_subnets(self, ipam_subnets): -+ self.ipam_subnets = ipam_subnets -+ -+ def add_ipam_subnets(self, value): -+ self.ipam_subnets.append(value) -+ -+ def insert_ipam_subnets(self, index, value): -+ self.ipam_subnets[index] = value -+ -+ def delete_ipam_subnets(self, value): -+ self.ipam_subnets.remove(value) -+ -+ -+class MockNetworkIpam(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, -+ network_ipam_mgmt=None, id_perms=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self._type = 'default-network-ipam' -+ self.name = name -+ self.uuid = None -+ if parent_obj: -+ self.parent_type = parent_obj._type -+ # copy parent's fq_name -+ self.fq_name = list(parent_obj.fq_name) -+ self.fq_name.append(name) -+ if not parent_obj.get_network_ipams(): -+ parent_obj.network_ipams = [] -+ parent_obj.network_ipams.append(self) -+ else: # No parent obj specified -+ self.parent_type = 'project' -+ self.fq_name = [u'default-domain', u'default-project'] -+ self.fq_name.append(name) -+ -+ # property fields -+ if network_ipam_mgmt: -+ self.network_ipam_mgmt = network_ipam_mgmt -+ if id_perms: -+ self.id_perms = id_perms -+ -+ def get_fq_name(self): -+ return self.fq_name -+ -+ -+class MockProject(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, id_perms=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self._type = 'project' -+ self.uuid = None -+ self.parent_type = 'domain' -+ self.fq_name = [u'default-domain'] -+ self.fq_name.append(name) -+ -+ def get_fq_name(self): -+ return self.fq_name -+ -+ -+def GlobalProjectApi(project_name): -+ if not GlobalProjects: -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ return GlobalProjects[0] -+ -+ -+# Mock definations for different pkgs, modules and VncApi -+mock_vnc_api_cls = mock.MagicMock(name='MockVncApi', side_effect=MockVncApi) -+mock_vnc_api_mod = mock.MagicMock(name='vnc_api_mock_mod') -+mock_vnc_api_mod.VncApi = mock_vnc_api_cls -+mock_vnc_api_mod.VirtualNetwork = MockVirtualNetwork -+mock_vnc_api_mod.SubnetType = MockSubnetType -+mock_vnc_api_mod.IpamSubnetType = MockIpamSubnetType -+mock_vnc_api_mod.VnSubnetsType = MockVnSubnetsType -+mock_vnc_api_mod.NetworkIpam = MockNetworkIpam -+mock_vnc_api_mod.Project = GlobalProjectApi -+ -+mock_vnc_api_pkg = mock.MagicMock(name='vnc_api_mock_pkg') -+mock_vnc_api_pkg.vnc_api = mock_vnc_api_mod -+mock_vnc_common_mod = mock.MagicMock(name='vnc_common_mock_mod') -+mock_vnc_exception_mod = mock.MagicMock(name='vnc_exception_mock_mod') -+sys.modules['neutron.plugins.juniper.contrail.ctdb.vnc_api'] = \ -+ mock_vnc_api_pkg -+sys.modules['neutron.plugins.juniper.contrail.ctdb.vnc_api.vnc_api'] = \ -+ mock_vnc_api_mod -+sys.modules['neutron.plugins.juniper.contrail.ctdb.vnc_api.common'] = \ -+ mock_vnc_common_mod -+sys.modules[('neutron.plugins.juniper.contrail.ctdb.vnc_api.common.' -+ 'exceptions')] = \ -+ mock_vnc_exception_mod -+ -+CONTRAIL_PKG_PATH = "neutron.plugins.juniper.contrail.contrailplugin" -+ -+ -+class RouterInstance(object): -+ def __init__(self): -+ self._name = 'rounter_instance' -+ -+ -+class Context(object): -+ def __init__(self, tenant_id=''): -+ self.read_only = False -+ self.show_deleted = False -+ self.roles = [u'admin', u'KeystoneServiceAdmin', u'KeystoneAdmin'] -+ self._read_deleted = 'no' -+ self.timestamp = datetime.datetime.now() -+ self.auth_token = None -+ self._session = None -+ self._is_admin = True -+ self.admin = uuid.uuid4().hex.decode() -+ self.request_id = 'req-' + str(uuid.uuid4()) -+ self.tenant = tenant_id -+ -+ -+class JVContrailPluginTestCase(test_plugin.NeutronDbPluginV2TestCase): -+ _plugin_name = ('%s.ContrailPlugin' % CONTRAIL_PKG_PATH) -+ -+ def setUp(self): -+ -+ mock_vnc_common_mod.exceptions = mock_vnc_exception_mod -+ -+ mock_vnc_api_mod.common = mock_vnc_common_mod -+ mock_vnc_api_mod.VncApi = mock_vnc_api_cls -+ -+ mock_vnc_api_pkg.vnc_api = mock_vnc_api_mod -+ -+ super(JVContrailPluginTestCase, self).setUp(self._plugin_name) -+ neutron.db.api._ENGINE = mock.MagicMock() -+ -+ def teardown(self): -+ super(JVContrailPluginTestCase, self).setUp(self._plugin_name) -+ -+ -+class TestContrailNetworks(test_plugin.TestNetworksV2, -+ JVContrailPluginTestCase): -+ -+ def test_create_network(self): -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ network = {} -+ router_inst = RouterInstance() -+ network['router:external'] = router_inst -+ network[u'name'] = u'network1' -+ network['admin_state_up'] = 'True' -+ network['tenant_id'] = uuid.uuid4().hex.decode() -+ network['vpc:route_table'] = '' -+ network['shared'] = False -+ network['port_security_enabled'] = True -+ network[u'contrail:policys'] = [] -+ -+ networks_req[u'network'] = network -+ context_obj = Context(network['tenant_id']) -+ -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ net = plugin_obj.create_network(context_obj, networks_req) -+ if 'contrail:fq_name' not in net.keys(): -+ assert False -+ else: -+ assert True -+ -+ def test_delete_network(self): -+ # First create the network and request to delete the same -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ network = {} -+ router_inst = RouterInstance() -+ network['router:external'] = router_inst -+ network[u'name'] = u'network1' -+ network['admin_state_up'] = 'True' -+ network['tenant_id'] = uuid.uuid4().hex.decode() -+ network['vpc:route_table'] = '' -+ network['shared'] = False -+ network['port_security_enabled'] = True -+ network[u'contrail:policys'] = [] -+ -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ networks_req[u'network'] = network -+ net_dict = plugin_obj.create_network(context_obj, networks_req) -+ net_id = net_dict.get('id') -+ -+ plugin_obj.delete_network(context_obj, net_id) -+ mock_vnc_api_cls.virtual_network_delete.assert_called_once() -+ -+ def test_update_network(self): -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ network = {} -+ router_inst = RouterInstance() -+ network['router:external'] = router_inst -+ network[u'name'] = u'network1' -+ network['admin_state_up'] = 'True' -+ network['tenant_id'] = uuid.uuid4().hex.decode() -+ network['vpc:route_table'] = '' -+ network['shared'] = False -+ network['port_security_enabled'] = True -+ network[u'contrail:policys'] = [] -+ -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ networks_req[u'network'] = network -+ net_dict = plugin_obj.create_network(context_obj, networks_req) -+ net_id = net_dict.get('id') -+ # change one of the attribute and update the network -+ network['admin_state_up'] = 'False' -+ new_dict = plugin_obj.update_network(context_obj, net_id, -+ networks_req) -+ if (net_dict.get('admin_state_up') == new_dict.get('admin_state_up')): -+ assert False -+ else: -+ assert True -+ -+ # Not supported test cases in the this TestClass -+ def test_create_networks_bulk_emulated(self): -+ pass -+ -+ def test_create_networks_bulk_emulated_plugin_failure(self): -+ pass -+ -+ def test_create_public_network(self): -+ pass -+ -+ def test_create_networks_bulk_wrong_input(self): -+ pass -+ -+ def test_update_shared_network_noadmin_returns_403(self): -+ pass -+ -+ def test_update_network_set_shared(self): -+ pass -+ -+ def test_update_network_set_not_shared_multi_tenants_returns_409(self): -+ pass -+ -+ def test_update_network_set_not_shared_multi_tenants2_returns_409(self): -+ pass -+ -+ def test_update_network_set_not_shared_single_tenant(self): -+ pass -+ -+ def test_update_network_set_not_shared_other_tenant_returns_409(self): -+ pass -+ -+ def test_update_network_with_subnet_set_shared(self): -+ pass -+ -+ def test_show_network(self): -+ pass -+ -+ def test_show_network_with_subnet(self): -+ pass -+ -+ def test_list_networks(self): -+ pass -+ -+ def test_list_shared_networks_with_non_admin_user(self): -+ pass -+ -+ def test_list_networks_with_parameters(self): -+ pass -+ -+ def test_list_networks_with_fields(self): -+ pass -+ -+ def test_list_networks_with_parameters_invalid_values(self): -+ pass -+ -+ def test_list_networks_with_pagination_emulated(self): -+ pass -+ -+ def test_list_networks_without_pk_in_fields_pagination_emulated(self): -+ pass -+ -+ def test_list_networks_with_sort_emulated(self): -+ pass -+ -+ def test_list_networks_with_pagination_reverse_emulated(self): -+ pass -+ -+ def test_invalid_admin_status(self): -+ pass -+ -+ -+class TestContrailSubnetsV2(test_plugin.TestSubnetsV2, -+ JVContrailPluginTestCase): -+ -+ def test_create_subnet(self): -+ #First create virtual network without subnet and then -+ #create subnet to update given network. -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ network = {} -+ router_inst = RouterInstance() -+ network['router:external'] = router_inst -+ network[u'name'] = u'network1' -+ network['admin_state_up'] = 'True' -+ network['tenant_id'] = uuid.uuid4().hex.decode() -+ network['vpc:route_table'] = '' -+ network['shared'] = False -+ network['port_security_enabled'] = True -+ network[u'contrail:policys'] = [] -+ -+ networks_req[u'network'] = network -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ net = plugin_obj.create_network(context_obj, networks_req) -+ -+ subnet_obj[u'subnet']['network_id'] = net['id'] -+ subnet_dict = plugin_obj.create_subnet(context_obj, subnet_obj) -+ if subnet_dict['cidr'] != subnet_obj['subnet']['cidr']: -+ assert False -+ else: -+ assert True -+ -+ def test_delete_subnet(self): -+ #First create virtual network without subnet and then -+ #create subnet to update given network. -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ network = {} -+ router_inst = RouterInstance() -+ network['router:external'] = router_inst -+ network[u'name'] = u'network1' -+ network['admin_state_up'] = 'True' -+ network['tenant_id'] = uuid.uuid4().hex.decode() -+ network['vpc:route_table'] = '' -+ network['shared'] = False -+ network['port_security_enabled'] = True -+ network[u'contrail:policys'] = [] -+ -+ networks_req[u'network'] = network -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ net = plugin_obj.create_network(context_obj, networks_req) -+ -+ subnet_obj[u'subnet']['network_id'] = net['id'] -+ subnet_dict = plugin_obj.create_subnet(context_obj, subnet_obj) -+ subnet_id = subnet_dict['id'] -+ plugin_obj.delete_subnet(context_obj, subnet_id) -+ -+ def test_update_subnet_gateway_in_allocation_pool_returns_409(self): -+ pass -+ -+ def test_delete_network(self): -+ pass -+ -+ def test_update_subnet_gw_outside_cidr_force_on_returns_400(self): -+ pass -+ -+ def test_update_subnet_adding_additional_host_routes_and_dns(self): -+ pass -+ -+ def test_update_subnet_no_gateway(self): -+ pass -+ -+ def test_create_subnet_bad_cidr(self): -+ pass -+ -+ def test_create_subnet_gw_of_network_force_on_returns_400(self): -+ pass -+ -+ def test_create_subnet_gw_outside_cidr_force_on_returns_400(self): -+ pass -+ -+ def test_create_two_subnets(self): -+ pass -+ -+ def test_create_two_subnets_same_cidr_returns_400(self): -+ pass -+ -+ def test_create_subnet_bad_V4_cidr(self): -+ pass -+ -+ def test_create_subnet_bad_V6_cidr(self): -+ pass -+ -+ def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): -+ pass -+ -+ def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): -+ pass -+ -+ def test_create_subnets_bulk_native(self): -+ pass -+ -+ def test_create_subnets_bulk_emulated(self): -+ pass -+ -+ def test_create_subnets_bulk_emulated_plugin_failure(self): -+ pass -+ -+ def test_create_subnets_bulk_native_plugin_failure(self): -+ pass -+ -+ def test_delete_subnet_port_exists_owned_by_network(self): -+ pass -+ -+ def test_delete_subnet_port_exists_owned_by_other(self): -+ pass -+ -+ def test_create_subnet_bad_tenant(self): -+ pass -+ -+ def test_create_subnet_bad_ip_version(self): -+ pass -+ -+ def test_create_subnet_bad_ip_version_null(self): -+ pass -+ -+ def test_create_subnet_bad_uuid(self): -+ pass -+ -+ def test_create_subnet_bad_boolean(self): -+ pass -+ -+ def test_create_subnet_bad_pools(self): -+ pass -+ -+ def test_create_subnet_bad_nameserver(self): -+ pass -+ -+ def test_create_subnet_bad_hostroutes(self): -+ pass -+ -+ def test_create_subnet_defaults(self): -+ pass -+ -+ def test_create_subnet_gw_values(self): -+ pass -+ -+ def test_create_force_subnet_gw_values(self): -+ pass -+ -+ def test_create_subnet_with_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_with_none_gateway(self): -+ pass -+ -+ def test_create_subnet_with_none_gateway_fully_allocated(self): -+ pass -+ -+ def test_subnet_with_allocation_range(self): -+ pass -+ -+ def test_create_subnet_with_none_gateway_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_with_v6_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_with_large_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_multiple_allocation_pools(self): -+ pass -+ -+ def test_create_subnet_with_dhcp_disabled(self): -+ pass -+ -+ def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( -+ self): -+ pass -+ -+ def test_create_subnet_gateway_in_allocation_pool_returns_409(self): -+ pass -+ -+ def test_create_subnet_overlapping_allocation_pools_returns_409(self): -+ pass -+ -+ def test_create_subnet_invalid_allocation_pool_returns_400(self): -+ pass -+ -+ def test_create_subnet_out_of_range_allocation_pool_returns_400(self): -+ pass -+ -+ def test_create_subnet_shared_returns_400(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv6_cidrv4(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_cidrv6(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_gatewayv6(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv6_gatewayv4(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv6_dns_v4(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): -+ pass -+ -+ def test_create_subnet_gw_bcast_force_on_returns_400(self): -+ pass -+ -+ def test_update_subnet(self): -+ pass -+ -+ def test_update_subnet_shared_returns_400(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv4_gatewayv6(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv6_gatewayv4(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv4_dns_v6(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): -+ pass -+ -+ def test_show_subnet(self): -+ pass -+ -+ def test_list_subnets(self): -+ pass -+ -+ def test_list_subnets_shared(self): -+ pass -+ -+ def test_list_subnets_with_parameter(self): -+ pass -+ -+ def test_invalid_ip_version(self): -+ pass -+ -+ def test_invalid_subnet(self): -+ pass -+ -+ def test_invalid_ip_address(self): -+ pass -+ -+ def test_invalid_uuid(self): -+ pass -+ -+ def test_create_subnet_with_one_dns(self): -+ pass -+ -+ def test_create_subnet_with_two_dns(self): -+ pass -+ -+ def test_create_subnet_with_too_many_dns(self): -+ pass -+ -+ def test_create_subnet_with_one_host_route(self): -+ pass -+ -+ def test_create_subnet_with_two_host_routes(self): -+ pass -+ -+ def test_create_subnet_with_too_many_routes(self): -+ pass -+ -+ def test_update_subnet_dns(self): -+ pass -+ -+ def test_update_subnet_dns_to_None(self): -+ pass -+ -+ def test_update_subnet_dns_with_too_many_entries(self): -+ pass -+ -+ def test_update_subnet_route(self): -+ pass -+ -+ def test_update_subnet_route_to_None(self): -+ pass -+ -+ def test_update_subnet_route_with_too_many_entries(self): -+ pass -+ -+ def test_delete_subnet_with_dns(self): -+ pass -+ -+ def test_delete_subnet_with_route(self): -+ pass -+ -+ def test_delete_subnet_with_dns_and_route(self): -+ pass -+ -+ def test_list_subnets_with_pagination_emulated(self): -+ pass -+ -+ def test_list_subnets_with_pagination_reverse_emulated(self): -+ pass -+ -+ def test_list_subnets_with_sort_emulated(self): -+ pass -+ -+ def test_validate_subnet_host_routes_exhausted(self): -+ pass -+ -+ def test_validate_subnet_dns_nameservers_exhausted(self): -+ pass -+ -+ -+class TestContrailPortsV2(test_plugin.TestPortsV2, -+ JVContrailPluginTestCase): -+ -+ def test_create_port_json(self): -+ pass -+ -+ def test_create_port_bad_tenant(self): -+ pass -+ -+ def test_create_port_public_network(self): -+ pass -+ -+ def test_create_port_public_network_with_ip(self): -+ pass -+ -+ def test_create_ports_bulk_native(self): -+ pass -+ -+ def test_create_ports_bulk_emulated(self): -+ pass -+ -+ def test_create_ports_bulk_wrong_input(self): -+ pass -+ -+ def test_create_ports_bulk_emulated_plugin_failure(self): -+ pass -+ -+ def test_create_ports_bulk_native_plugin_failure(self): -+ pass -+ -+ def test_list_ports(self): -+ pass -+ -+ def test_list_ports_filtered_by_fixed_ip(self): -+ pass -+ -+ def test_list_ports_public_network(self): -+ pass -+ -+ def test_show_port(self): -+ pass -+ -+ def test_delete_port(self): -+ pass -+ -+ def test_delete_port_public_network(self): -+ pass -+ -+ def test_update_port(self): -+ pass -+ -+ def test_update_device_id_null(self): -+ pass -+ -+ def test_delete_network_if_port_exists(self): -+ pass -+ -+ def test_delete_network_port_exists_owned_by_network(self): -+ pass -+ -+ def test_update_port_delete_ip(self): -+ pass -+ -+ def test_no_more_port_exception(self): -+ pass -+ -+ def test_update_port_update_ip(self): -+ pass -+ -+ def test_update_port_update_ip_address_only(self): -+ pass -+ -+ def test_update_port_update_ips(self): -+ pass -+ -+ def test_update_port_add_additional_ip(self): -+ pass -+ -+ def test_requested_duplicate_mac(self): -+ pass -+ -+ def test_mac_generation(self): -+ pass -+ -+ def test_mac_generation_4octet(self): -+ pass -+ -+ def test_bad_mac_format(self): -+ pass -+ -+ def test_mac_exhaustion(self): -+ pass -+ -+ def test_requested_duplicate_ip(self): -+ pass -+ -+ def test_requested_subnet_delete(self): -+ pass -+ -+ def test_requested_subnet_id(self): -+ pass -+ -+ def test_requested_subnet_id_not_on_network(self): -+ pass -+ -+ def test_overlapping_subnets(self): -+ pass -+ -+ def test_requested_subnet_id_v4_and_v6(self): -+ pass -+ -+ def test_range_allocation(self): -+ pass -+ -+ def test_requested_invalid_fixed_ips(self): -+ pass -+ -+ def test_invalid_ip(self): -+ pass -+ -+ def test_requested_split(self): -+ pass -+ -+ def test_duplicate_ips(self): -+ pass -+ -+ def test_fixed_ip_invalid_subnet_id(self): -+ pass -+ -+ def test_fixed_ip_invalid_ip(self): -+ pass -+ -+ def test_requested_ips_only(self): -+ pass -+ -+ def test_recycling(self): -+ pass -+ -+ def test_invalid_admin_state(self): -+ pass -+ -+ def test_invalid_mac_address(self): -+ pass -+ -+ def test_default_allocation_expiration(self): -+ pass -+ -+ def test_update_fixed_ip_lease_expiration(self): -+ pass -+ -+ def test_port_delete_holds_ip(self): -+ pass -+ -+ def test_update_fixed_ip_lease_expiration_invalid_address(self): -+ pass -+ -+ def test_hold_ip_address(self): -+ pass -+ -+ def test_recycle_held_ip_address(self): -+ pass -+ -+ def test_recycle_expired_previously_run_within_context(self): -+ pass -+ -+ def test_update_port_not_admin(self): -+ pass -+ -+ def test_list_ports_with_pagination_emulated(self): -+ pass -+ -+ def test_list_ports_with_pagination_reverse_emulated(self): -+ pass -+ -+ def test_list_ports_with_sort_emulated(self): -+ pass -+ -+ def test_max_fixed_ips_exceeded(self): -+ pass -+ -+ def test_update_max_fixed_ips_exceeded(self): -+ pass -+ -+ def test_recycle_ip_address_without_allocation_pool(self): -+ pass -diff --git setup.cfg setup.cfg -index af52a4d..27ef0ce 100644 ---- setup.cfg -+++ setup.cfg -@@ -61,6 +61,7 @@ data_files = - etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini - etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini - etc/neutron/plugins/ryu = etc/neutron/plugins/ryu/ryu.ini -+ etc/neutron/plugins/juniper/contrail/ContrailPlugin = etc/neutron/plugins/juniper/contrail/ContrailPlugin.ini - scripts = - bin/quantum-rootwrap - bin/neutron-rootwrap diff --git a/contrail/neutron_v4.patch b/contrail/neutron_v4.patch deleted file mode 100644 index 50bdbc8eba..0000000000 --- a/contrail/neutron_v4.patch +++ /dev/null @@ -1,3931 +0,0 @@ -diff --git etc/neutron/plugins/juniper/contrail/ContrailPlugin.ini etc/neutron/plugins/juniper/contrail/ContrailPlugin.ini -new file mode 100644 -index 0000000..ab1cee6 ---- /dev/null -+++ etc/neutron/plugins/juniper/contrail/ContrailPlugin.ini -@@ -0,0 +1,10 @@ -+ -+[APISERVER] -+api_server_ip=10.84.13.34 -+api_server_port=8082 -+multi_tenancy=False -+ -+[KEYSTONE] -+admin_user=admin -+admin_password=contrail123 -+admin_tenant_name=admin -diff --git neutron/extensions/ipam.py neutron/extensions/ipam.py -new file mode 100644 -index 0000000..5d610b3 ---- /dev/null -+++ neutron/extensions/ipam.py -@@ -0,0 +1,140 @@ -+from abc import abstractmethod -+ -+from neutron.api.v2 import attributes as attr -+from neutron.api.v2 import base -+from neutron.common import exceptions as qexception -+from neutron.api import extensions -+from neutron import manager -+from oslo.config import cfg -+ -+ -+# Ipam Exceptions -+class IpamNotFound(qexception.NotFound): -+ message = _("IPAM %(id)s could not be found") -+ -+# Attribute Map -+RESOURCE_ATTRIBUTE_MAP = { -+ 'ipams': { -+ 'id': {'allow_post': False, 'allow_put': False, -+ 'validate': {'type:regex': attr.UUID_PATTERN}, -+ 'is_visible': True}, -+ 'name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'fq_name': {'allow_post': False, 'allow_put': False, -+ 'is_visible': True}, -+ 'tenant_id': {'allow_post': True, 'allow_put': False, -+ 'required_by_policy': True, -+ 'is_visible': True}, -+ 'mgmt': {'allow_post': True, 'allow_put': True, -+ 'is_visible': True, 'default': None}, -+ 'nets_using': {'allow_post': False, 'allow_put': False, -+ 'is_visible': True, 'default': ''} -+ }, -+} -+ -+# TODO should this be tied to ipam extension? -+EXTENDED_ATTRIBUTES_2_0 = { -+ 'networks': { -+ 'contrail:fq_name': {'allow_post': False, -+ 'allow_put': False, -+ 'is_visible': True}, -+ 'contrail:instance_count': {'allow_post': False, -+ 'allow_put': False, -+ 'is_visible': True}, -+ 'contrail:policys': {'allow_post': True, -+ 'allow_put': True, -+ 'default': '', -+ 'is_visible': True}, -+ 'contrail:subnet_ipam': {'allow_post': False, -+ 'allow_put': False, -+ 'default': '', -+ 'is_visible': True}, -+ }, -+ 'subnets': { -+ 'contrail:instance_count': {'allow_post': False, -+ 'allow_put': False, -+ 'is_visible': True}, -+ 'contrail:ipam_fq_name': {'allow_post': True, -+ 'allow_put': True, -+ 'default': '', -+ 'is_visible': True}, -+ } -+} -+ -+ -+class Ipam(object): -+ -+ @classmethod -+ def get_name(cls): -+ return "Network IP Address Management" -+ -+ @classmethod -+ def get_alias(cls): -+ return "ipam" -+ -+ @classmethod -+ def get_description(cls): -+ return ("Configuration object for holding common to a set of" -+ " IP address blocks") -+ -+ @classmethod -+ def get_namespace(cls): -+ return "http://docs.openstack.org/TODO" -+ -+ @classmethod -+ def get_updated(cls): -+ return "2012-07-20T10:00:00-00:00" -+ -+ @classmethod -+ def get_resources(cls): -+ """ Returns Ext Resources """ -+ exts = [] -+ plugin = manager.NeutronManager.get_plugin() -+ for resource_name in ['ipam']: -+ collection_name = resource_name + "s" -+ params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) -+ -+ member_actions = {} -+ -+ controller = base.create_resource(collection_name, -+ resource_name, -+ plugin, params, -+ member_actions=member_actions) -+ -+ ex = extensions.ResourceExtension(collection_name, -+ controller, -+ member_actions=member_actions) -+ exts.append(ex) -+ -+ return exts -+ -+ def get_extended_resources(self, version): -+ if version == "2.0": -+ return EXTENDED_ATTRIBUTES_2_0 -+ else: -+ return {} -+#end class Ipam -+ -+ -+class IpamPluginBase(object): -+ -+ @abstractmethod -+ def create_ipam(self, context, ipam): -+ pass -+ -+ @abstractmethod -+ def update_ipam(self, context, id, ipam): -+ pass -+ -+ @abstractmethod -+ def get_ipam(self, context, id, fields=None): -+ pass -+ -+ @abstractmethod -+ def delete_ipam(self, context, id): -+ pass -+ -+ @abstractmethod -+ def get_ipams(self, context, filters=None, fields=None): -+ pass -+#end class IpamPluginBase -diff --git neutron/extensions/policy.py neutron/extensions/policy.py -new file mode 100644 -index 0000000..59418e4 ---- /dev/null -+++ neutron/extensions/policy.py -@@ -0,0 +1,105 @@ -+from abc import abstractmethod -+ -+from neutron.api.v2 import attributes as attr -+from neutron.api.v2 import base -+from neutron.common import exceptions as qexception -+from neutron.api import extensions -+from neutron import manager -+from oslo.config import cfg -+ -+ -+# Policy Exceptions -+class PolicyNotFound(qexception.NotFound): -+ message = _("Policy %(id)s could not be found") -+ -+# Attribute Map -+RESOURCE_ATTRIBUTE_MAP = { -+ 'policys': { -+ 'id': {'allow_post': False, 'allow_put': False, -+ 'validate': {'type:regex': attr.UUID_PATTERN}, -+ 'is_visible': True}, -+ 'name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'fq_name': {'allow_post': False, 'allow_put': False, -+ 'is_visible': True}, -+ 'tenant_id': {'allow_post': True, 'allow_put': False, -+ 'required_by_policy': True, -+ 'is_visible': True}, -+ 'entries': {'allow_post': True, 'allow_put': True, -+ 'is_visible': True, 'default': ''}, -+ 'nets_using': {'allow_post': False, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ }, -+} -+ -+ -+class Policy(object): -+ -+ @classmethod -+ def get_name(cls): -+ return "Network Policy" -+ -+ @classmethod -+ def get_alias(cls): -+ return "policy" -+ -+ @classmethod -+ def get_description(cls): -+ return ("Configuration object for Network Policies") -+ -+ @classmethod -+ def get_namespace(cls): -+ return "http://docs.openstack.org/TODO" -+ -+ @classmethod -+ def get_updated(cls): -+ return "2012-07-20T10:00:00-00:00" -+ -+ @classmethod -+ def get_resources(cls): -+ """ Returns Ext Resources """ -+ exts = [] -+ plugin = manager.NeutronManager.get_plugin() -+ for resource_name in ['policy']: -+ collection_name = resource_name + "s" -+ params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) -+ -+ member_actions = {} -+ -+ controller = base.create_resource(collection_name, -+ resource_name, -+ plugin, params, -+ member_actions=member_actions) -+ -+ ex = extensions.ResourceExtension(collection_name, -+ controller, -+ member_actions=member_actions) -+ exts.append(ex) -+ -+ return exts -+ -+#end class Policy -+ -+ -+class PolicyPluginBase(object): -+ -+ @abstractmethod -+ def create_policy(self, context, policy): -+ pass -+ -+ @abstractmethod -+ def update_policy(self, context, id, policy): -+ pass -+ -+ @abstractmethod -+ def get_policy(self, context, id, fields=None): -+ pass -+ -+ @abstractmethod -+ def delete_policy(self, context, id): -+ pass -+ -+ @abstractmethod -+ def get_policys(self, context, filters=None, fields=None): -+ pass -+#end class PolicyPluginBase -diff --git neutron/extensions/portbindings.py neutron/extensions/portbindings.py -index dbef592..bbed97b 100644 ---- neutron/extensions/portbindings.py -+++ neutron/extensions/portbindings.py -@@ -45,11 +45,12 @@ VIF_TYPE_802_QBG = '802.1qbg' - VIF_TYPE_802_QBH = '802.1qbh' - VIF_TYPE_HYPERV = 'hyperv' - VIF_TYPE_MIDONET = 'midonet' -+VIF_TYPE_VROUTER = 'vrouter' - VIF_TYPE_OTHER = 'other' - VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS, - VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG, - VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET, -- VIF_TYPE_OTHER] -+ VIF_TYPE_VROUTER, VIF_TYPE_OTHER] - - - EXTENDED_ATTRIBUTES_2_0 = { -diff --git neutron/extensions/vpcroutetable.py neutron/extensions/vpcroutetable.py -new file mode 100644 -index 0000000..ec1f720 ---- /dev/null -+++ neutron/extensions/vpcroutetable.py -@@ -0,0 +1,186 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+ -+# Copyright (c) 2012 OpenStack Foundation. -+# All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+ -+from abc import ABCMeta -+from abc import abstractmethod -+ -+from oslo.config import cfg -+ -+from neutron.api import extensions -+from neutron.api.v2 import attributes as attr -+from neutron.api.v2 import base -+from neutron.common import exceptions as qexception -+from neutron import manager -+from neutron.openstack.common import uuidutils -+from neutron import quota -+ -+ -+# Route table Exceptions -+class RouteTableNotFound(qexception.NotFound): -+ message = _("Route table %(id)s does not exist") -+ -+# Attribute Map -+RESOURCE_ATTRIBUTE_MAP = { -+ 'route_tables': { -+ 'id': {'allow_post': False, 'allow_put': False, -+ 'validate': {'type:uuid': None}, -+ 'is_visible': True, -+ 'primary_key': True}, -+ 'name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': '', -+ 'validate': {'type:name_not_default': None}}, -+ 'fq_name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': '', -+ 'validate': {'type:name_not_default': None}}, -+ 'routes': {'allow_post': True, 'allow_put': True, -+ 'is_visible': True, 'default': ''}, -+ 'tenant_id': {'allow_post': True, 'allow_put': False, -+ 'required_by_policy': True, -+ 'is_visible': True}, -+ }, -+ 'nat_instances': { -+ 'id': {'allow_post': False, 'allow_put': False, -+ 'validate': {'type:uuid': None}, -+ 'is_visible': True, -+ 'primary_key': True}, -+ 'name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': '', -+ 'validate': {'type:name_not_default': None}}, -+ 'fq_name': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': '', -+ 'validate': {'type:name_not_default': None}}, -+ 'internal_net': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'internal_ip': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'external_net': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'external_ip': {'allow_post': True, 'allow_put': False, -+ 'is_visible': True, 'default': ''}, -+ 'tenant_id': {'allow_post': True, 'allow_put': False, -+ 'required_by_policy': True, -+ 'is_visible': True}, -+ }, -+} -+ -+EXTENDED_ATTRIBUTES_2_0 = { -+ 'networks': { -+ 'vpc:route_table': {'allow_post': True, -+ 'allow_put': True, -+ 'default': '', -+ 'is_visible': True}, -+ } -+} -+ -+ -+class Vpcroutetable(extensions.ExtensionDescriptor): -+ """ Route table extension""" -+ -+ @classmethod -+ def get_name(cls): -+ return "route-table" -+ -+ @classmethod -+ def get_alias(cls): -+ return "route-table" -+ -+ @classmethod -+ def get_description(cls): -+ return "VPC route tables extension." -+ -+ @classmethod -+ def get_namespace(cls): -+ # todo -+ return "http://docs.openstack.org/ext/routetables/api/v2.0" -+ -+ @classmethod -+ def get_updated(cls): -+ return "2013-07-24T10:00:00-00:00" -+ -+ @classmethod -+ def get_resources(cls): -+ """ Returns Ext Resources """ -+ my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] -+ attr.PLURALS.update(dict(my_plurals)) -+ exts = [] -+ plugin = manager.NeutronManager.get_plugin() -+ for resource_name in ['route_table', 'nat_instance']: -+ collection_name = resource_name.replace('_', '-') + "s" -+ params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) -+ quota.QUOTAS.register_resource_by_name(resource_name) -+ controller = base.create_resource(collection_name, -+ resource_name, -+ plugin, params, allow_bulk=True, -+ allow_pagination=True, -+ allow_sorting=True) -+ -+ ex = extensions.ResourceExtension(collection_name, -+ controller, -+ attr_map=params) -+ exts.append(ex) -+ -+ return exts -+ -+ def get_extended_resources(self, version): -+ if version == "2.0": -+ return EXTENDED_ATTRIBUTES_2_0 -+ else: -+ return {} -+ -+ -+class RouteTablePluginBase(object): -+ __metaclass__ = ABCMeta -+ -+ @abstractmethod -+ def create_route_table(self, context, route_table): -+ pass -+ -+ @abstractmethod -+ def delete_route_table(self, context, id): -+ pass -+ -+ @abstractmethod -+ def update_route_table(self, context, id, route_table): -+ pass -+ -+ @abstractmethod -+ def get_route_tables(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ pass -+ -+ @abstractmethod -+ def get_route_table(self, context, id, fields=None): -+ pass -+ -+ @abstractmethod -+ def create_nat_instance(self, context, nat_instance): -+ pass -+ -+ @abstractmethod -+ def delete_nat_instance(self, context, id): -+ pass -+ -+ @abstractmethod -+ def get_nat_instances(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ pass -+ -+ @abstractmethod -+ def get_nat_instance(self, context, id, fields=None): -+ pass -diff --git neutron/plugins/juniper/__init__.py neutron/plugins/juniper/__init__.py -new file mode 100644 -index 0000000..7bc8217 ---- /dev/null -+++ neutron/plugins/juniper/__init__.py -@@ -0,0 +1,17 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay Juniper Networks. -diff --git neutron/plugins/juniper/contrail/__init__.py neutron/plugins/juniper/contrail/__init__.py -new file mode 100644 -index 0000000..7bc8217 ---- /dev/null -+++ neutron/plugins/juniper/contrail/__init__.py -@@ -0,0 +1,17 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay Juniper Networks. -diff --git neutron/plugins/juniper/contrail/contrailplugin.py neutron/plugins/juniper/contrail/contrailplugin.py -new file mode 100644 -index 0000000..01f37e2 ---- /dev/null -+++ neutron/plugins/juniper/contrail/contrailplugin.py -@@ -0,0 +1,600 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay, Rudra Rugge, Atul Moghe Juniper Networks. -+ -+import ctdb.config_db -+import httplib2 -+from oslo.config import cfg -+import re -+import string -+ -+from neutron.common import exceptions as exc -+from neutron.db import db_base_plugin_v2 -+from neutron.db import portbindings_base -+from neutron.extensions import l3 -+from neutron.extensions import securitygroup -+from neutron.extensions import portbindings -+from neutron.openstack.common import log as logging -+ -+ -+LOG = logging.getLogger(__name__) -+ -+vnc_opts = [ -+ cfg.StrOpt('api_server_ip', default='127.0.0.1'), -+ cfg.StrOpt('api_server_port', default='8082'), -+] -+ -+ -+def _read_cfg(multi_parser, section, option, default): -+ name_tuple = (section, option) -+ cfg_names = [] -+ cfg_names.append(name_tuple) -+ try: -+ val = multi_parser.get(names=cfg_names, multi=False) -+ except KeyError: -+ val = default -+ -+ return val -+ -+ -+class ContrailPlugin(db_base_plugin_v2.NeutronDbPluginV2, -+ l3.RouterPluginBase, -+ securitygroup.SecurityGroupPluginBase, -+ portbindings_base.PortBindingBaseMixin): -+ -+ # only floatingip part of router extension is supported. -+ supported_extension_aliases = ["ipam", "policy", "security-group", -+ "router", "route-table", "port-security", -+ "binding",] -+ __native_bulk_support = False -+ _cfgdb = None -+ _args = None -+ _tenant_id_dict = {} -+ _tenant_name_dict = {} -+ -+ @classmethod -+ def _parse_class_args(cls, multi_parser): -+ read_ok = multi_parser.read(cfg.CONF.config_file) -+ -+ if len(read_ok) != len(cfg.CONF.config_file): -+ raise cfg.Error("Some config files were not parsed properly") -+ -+ cls._multi_tenancy = _read_cfg(multi_parser, 'APISERVER', -+ 'multi_tenancy', False) -+ cls._max_retries = _read_cfg(multi_parser, 'APISERVER', -+ 'max_retries', -1) -+ cls._retry_interval = _read_cfg(multi_parser, 'APISERVER', -+ 'retry_interval', 3) -+ cls._admin_token = _read_cfg(multi_parser, 'KEYSTONE', -+ 'admin_token', '') -+ cls._auth_url = _read_cfg(multi_parser, 'KEYSTONE', 'auth_url', '') -+ cls._admin_user = _read_cfg(multi_parser, 'KEYSTONE', 'admin_user', -+ 'user1') -+ cls._admin_password = _read_cfg(multi_parser, 'KEYSTONE', -+ 'admin_password', 'password1') -+ cls._admin_tenant_name = _read_cfg(multi_parser, 'KEYSTONE', -+ 'admin_tenant_name', -+ 'default-domain') -+ cls._tenants_api = '%s/tenants' % (cls._auth_url) -+ -+ @classmethod -+ def _connect_to_db(cls): -+ """Connection to config db. -+ -+ Many instantiations of plugin (base + extensions) but need to have -+ only one config db conn (else error from ifmap-server) -+ """ -+ cls._cfgdb_map = {} -+ if cls._cfgdb is None: -+ sip = cfg.CONF.APISERVER.api_server_ip -+ sport = cfg.CONF.APISERVER.api_server_port -+ # Initialize connection to DB and add default entries -+ cls._cfgdb = ctdb.config_db.DBInterface(cls._admin_user, -+ cls._admin_password, -+ cls._admin_tenant_name, -+ sip, sport, -+ cls._max_retries, -+ cls._retry_interval) -+ cls._cfgdb.manager = cls -+ -+ @classmethod -+ def _get_user_cfgdb(cls, context): -+ if not cls._multi_tenancy: -+ return cls._cfgdb -+ user_id = context.user_id -+ role = string.join(context.roles, ",") -+ if user_id not in cls._cfgdb_map: -+ cls._cfgdb_map[user_id] = ctdb.config_db.DBInterface( -+ cls._admin_user, cls._admin_password, cls._admin_tenant_name, -+ cfg.CONF.APISERVER.api_server_ip, -+ cfg.CONF.APISERVER.api_server_port, -+ cls._max_retries, cls._retry_interval, -+ user_info={'user_id': user_id, 'role': role}) -+ cls._cfgdb_map[user_id].manager = cls -+ -+ return cls._cfgdb_map[user_id] -+ -+ @classmethod -+ def _tenant_list_from_keystone(cls): -+ # get all tenants -+ hdrs = {'X-Auth-Token': cls._admin_token, -+ 'Content-Type': 'application/json'} -+ try: -+ rsp, content = httplib2.Http().request(cls._tenants_api, -+ method="GET", headers=hdrs) -+ if rsp.status != 200: -+ return -+ except Exception: -+ return -+ -+ # transform needed for python compatibility -+ content = re.sub('true', 'True', content) -+ content = re.sub('null', 'None', content) -+ content = eval(content) -+ -+ # bail if response is unexpected -+ if 'tenants' not in content: -+ return -+ -+ # create a dictionary for id->name and name->id mapping -+ for tenant in content['tenants']: -+ print 'Adding tenant %s:%s to cache' % (tenant['name'], -+ tenant['id']) -+ cls._tenant_id_dict[tenant['id']] = tenant['name'] -+ cls._tenant_name_dict[tenant['name']] = tenant['id'] -+ -+ def update_security_group(self, context, id, security_group): -+ pass -+ -+ def __init__(self): -+ cfg.CONF.register_opts(vnc_opts, 'APISERVER') -+ -+ multi_parser = cfg.MultiConfigParser() -+ ContrailPlugin._parse_class_args(multi_parser) -+ -+ ContrailPlugin._connect_to_db() -+ self._cfgdb = ContrailPlugin._cfgdb -+ -+ ContrailPlugin._tenant_list_from_keystone() -+ self.base_binding_dict = self._get_base_binding_dict() -+ portbindings_base.register_port_dict_function() -+ -+ def _get_base_binding_dict(self): -+ binding = { -+ portbindings.VIF_TYPE: portbindings.VIF_TYPE_VROUTER, -+ portbindings.CAPABILITIES: { -+ portbindings.CAP_PORT_FILTER: -+ 'security-group' in self.supported_extension_aliases}} -+ return binding -+ -+ # Network API handlers -+ def create_network(self, context, network): -+ """Creates a new Virtual Network, and assigns it a symbolic name.""" -+ cfgdb = self._get_user_cfgdb(context) -+ net_info = cfgdb.network_create(network['network']) -+ -+ # verify transformation is conforming to api -+ net_dict = self._make_network_dict(net_info['q_api_data'], -+ None, False) -+ -+ LOG.debug(_("create_network(): %r"), net_dict) -+ return net_dict -+ -+ def get_network(self, context, id, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ net_info = cfgdb.network_read(id, fields) -+ -+ # verify transformation is conforming to api -+ if not fields: -+ # should return all fields -+ net_dict = self._make_network_dict(net_info['q_api_data'], -+ fields, False) -+ else: -+ net_dict = net_info['q_api_data'] -+ -+ LOG.debug(_("get_network(): %r"), net_dict) -+ return self._fields(net_dict, fields) -+ -+ def update_network(self, context, net_id, network): -+ """Updates the attributes of a particular Virtual Network.""" -+ cfgdb = self._get_user_cfgdb(context) -+ net_info = cfgdb.network_update(net_id, network['network']) -+ -+ # verify transformation is confirming to api -+ net_dict = self._make_network_dict(net_info['q_api_data'], -+ None, False) -+ -+ LOG.debug(_("update_network(): %r"), net_dict) -+ return net_dict -+ -+ def delete_network(self, context, net_id): -+ """Network delete operation. -+ -+ Deletes the network with the specified network identifier -+ belonging to the specified tenant. -+ """ -+ cfgdb = self._get_user_cfgdb(context) -+ cfgdb.network_delete(net_id) -+ LOG.debug(_("delete_network(): %r"), net_id) -+ -+ def get_networks(self, context, filters=None, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ nets_info = cfgdb.network_list(filters) -+ -+ nets_dicts = [] -+ for n_info in nets_info: -+ # verify transformation is conforming to api -+ n_dict = self._make_network_dict(n_info['q_api_data'], fields, -+ False) -+ -+ nets_dicts.append(n_dict) -+ -+ LOG.debug( -+ "get_networks(): filters: %r data: %r", filters, nets_dicts) -+ return nets_dicts -+ -+ def get_networks_count(self, context, filters=None): -+ cfgdb = self._get_user_cfgdb(context) -+ nets_count = cfgdb.network_count(filters) -+ LOG.debug(_("get_networks_count(): %r"), str(nets_count)) -+ return nets_count -+ -+ def create_subnet(self, context, subnet): -+ cfgdb = self._get_user_cfgdb(context) -+ subnet_info = cfgdb.subnet_create(subnet['subnet']) -+ -+ # verify transformation is conforming to api -+ subnet_dict = self._make_subnet_dict(subnet_info['q_api_data']) -+ -+ LOG.debug(_("create_subnet(): %r"), subnet_dict) -+ return subnet_dict -+ -+ def get_subnet(self, context, subnet_id, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ subnet_info = cfgdb.subnet_read(subnet_id) -+ -+ # verify transformation is conforming to api -+ subnet_dict = self._make_subnet_dict(subnet_info['q_api_data'], -+ fields) -+ -+ LOG.debug(_("get_subnet(): %r"), subnet_dict) -+ return self._fields(subnet_dict, fields) -+ -+ def update_subnet(self, context, subnet_id, subnet): -+ cfgdb = self._get_user_cfgdb(context) -+ subnet_info = cfgdb.subnet_update(subnet_id, subnet['subnet']) -+ -+ # verify transformation is conforming to api -+ subnet_dict = self._make_subnet_dict(subnet_info['q_api_data']) -+ -+ LOG.debug(_("update_subnet(): %r"), subnet_dict) -+ return subnet_dict -+ -+ def delete_subnet(self, context, subnet_id): -+ cfgdb = self._get_user_cfgdb(context) -+ cfgdb.subnet_delete(subnet_id) -+ -+ LOG.debug(_("delete_subnet(): %r"), subnet_id) -+ -+ def get_subnets(self, context, filters=None, fields=None): -+ """Called from Neutron API -> get_.""" -+ cfgdb = self._get_user_cfgdb(context) -+ subnets_info = cfgdb.subnets_list(filters) -+ -+ subnets_dicts = [] -+ for sn_info in subnets_info: -+ # verify transformation is conforming to api -+ sn_dict = self._make_subnet_dict(sn_info['q_api_data'], fields) -+ -+ subnets_dicts.append(sn_dict) -+ -+ LOG.debug( -+ "get_subnets(): filters: %r data: %r", filters, subnets_dicts) -+ return subnets_dicts -+ -+ def get_subnets_count(self, context, filters=None): -+ cfgdb = self._get_user_cfgdb(context) -+ subnets_count = cfgdb.subnets_count(filters) -+ LOG.debug(_("get_subnets_count(): %r"), str(subnets_count)) -+ return subnets_count -+ -+ def _make_floatingip_dict(self, floatingip, fields=None): -+ res = {'id': floatingip['id'], -+ 'tenant_id': floatingip['tenant_id'], -+ 'floating_ip_address': floatingip['floating_ip_address'], -+ 'floating_network_id': floatingip['floating_network_id'], -+ 'router_id': floatingip['router_id'], -+ 'port_id': floatingip['fixed_port_id'], -+ 'fixed_ip_address': floatingip['fixed_ip_address']} -+ return self._fields(res, fields) -+ -+ def create_floatingip(self, context, floatingip): -+ cfgdb = self._get_user_cfgdb(context) -+ fip_info = cfgdb.floatingip_create(floatingip['floatingip']) -+ -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ LOG.debug(_("create_floatingip(): %r"), fip_dict) -+ return fip_dict -+ -+ def update_floatingip(self, context, fip_id, floatingip): -+ cfgdb = self._get_user_cfgdb(context) -+ fip_info = cfgdb.floatingip_update(fip_id, -+ floatingip['floatingip']) -+ -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ LOG.debug(_("update_floatingip(): %r"), fip_dict) -+ return fip_dict -+ -+ def get_floatingip(self, context, id, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ fip_info = cfgdb.floatingip_read(id) -+ -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ LOG.debug(_("get_floatingip(): %r"), fip_dict) -+ return fip_dict -+ -+ def delete_floatingip(self, context, fip_id): -+ cfgdb = self._get_user_cfgdb(context) -+ cfgdb.floatingip_delete(fip_id) -+ LOG.debug(_("delete_floating(): %r"), fip_id) -+ -+ def get_floatingips(self, context, filters=None, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ fips_info = cfgdb.floatingip_list(filters) -+ -+ fips_dicts = [] -+ for fip_info in fips_info: -+ # verify transformation is conforming to api -+ fip_dict = self._make_floatingip_dict(fip_info['q_api_data']) -+ -+ fips_dicts.append(fip_dict) -+ -+ LOG.debug(_("get_floatingips(): %r"), fips_dicts) -+ return fips_dicts -+ -+ def get_floatingips_count(self, context, filters=None): -+ cfgdb = self._get_user_cfgdb(context) -+ floatingips_count = cfgdb.floatingip_count(filters) -+ LOG.debug(_("get_floatingips_count(): %r"), str(floatingips_count)) -+ return floatingips_count -+ -+ def create_port(self, context, port): -+ """Creates a port on the specified Virtual Network.""" -+ cfgdb = self._get_user_cfgdb(context) -+ port_info = cfgdb.port_create(port['port']) -+ -+ # verify transformation is conforming to api -+ port_dict = self._make_port_dict(port_info['q_api_data']) -+ self._process_portbindings_create_and_update(context, -+ port['port'], -+ port_dict) -+ -+ LOG.debug(_("create_port(): %r"), port_dict) -+ return port_dict -+ -+ def get_port(self, context, port_id, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ port_info = cfgdb.port_read(port_id) -+ -+ # verify transformation is conforming to api -+ port_dict = self._make_port_dict(port_info['q_api_data'], fields) -+ self._process_portbindings_create_and_update(context, -+ port_info, -+ port_dict) -+ -+ LOG.debug(_("get_port(): %r"), port_dict) -+ return self._fields(port_dict, fields) -+ -+ def update_port(self, context, port_id, port): -+ """Port update on a virtual network. -+ -+ Updates the attributes of a port on the specified Virtual Network. -+ """ -+ cfgdb = self._get_user_cfgdb(context) -+ port_info = cfgdb.port_update(port_id, port['port']) -+ self._process_portbindings_create_and_update(context, -+ port['port'], -+ port_info) -+ -+ # verify transformation is conforming to api -+ port_dict = self._make_port_dict(port_info['q_api_data']) -+ -+ LOG.debug(_("update_port(): %r"), port_dict) -+ return port_dict -+ -+ def delete_port(self, context, port_id): -+ """port delete on a virtual network. -+ -+ Deletes a port on a specified Virtual Network, -+ if the port contains a remote interface attachment, -+ the remote interface is first un-plugged and then the port -+ is deleted. -+ """ -+ cfgdb = self._get_user_cfgdb(context) -+ cfgdb.port_delete(port_id) -+ LOG.debug(_("delete_port(): %r"), port_id) -+ -+ def get_ports(self, context, filters=None, fields=None): -+ """Get all port identifiers in the specified Virtual Network.""" -+ cfgdb = self._get_user_cfgdb(context) -+ ports_info = cfgdb.port_list(filters) -+ -+ ports_dicts = [] -+ for p_info in ports_info: -+ # verify transformation is conforming to api -+ p_dict = self._make_port_dict(p_info['q_api_data'], fields) -+ self._process_portbindings_create_and_update(context, -+ p_info, -+ p_dict) -+ -+ ports_dicts.append(p_dict) -+ -+ LOG.debug( -+ "get_ports(): filter: %r data: %r", filters, ports_dicts) -+ return ports_dicts -+ -+ def get_ports_count(self, context, filters=None): -+ cfgdb = self._get_user_cfgdb(context) -+ ports_count = cfgdb.port_count(filters) -+ LOG.debug(_("get_ports_count(): %r"), str(ports_count)) -+ return ports_count -+ -+ def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id): -+ """Plug in a remote interface. -+ -+ Attaches a remote interface to the specified port on the -+ specified Virtual Network. -+ """ -+ port = self._get_port(tenant_id, net_id, port_id) -+ # Validate attachment -+ self._validate_attachment(tenant_id, net_id, port_id, -+ remote_interface_id) -+ if port['interface_id']: -+ raise exc.PortInUse(net_id=net_id, port_id=port_id, -+ att_id=port['interface_id']) -+ -+ def unplug_interface(self, tenant_id, net_id, port_id): -+ """Unplug a remote interface. -+ -+ Detaches a remote interface from the specified port on the -+ specified Virtual Network. -+ """ -+ self._get_port(tenant_id, net_id, port_id) -+ -+ # Security Group handlers -+ def _make_security_group_rule_dict(self, security_group_rule, fields=None): -+ res = {'id': security_group_rule['id'], -+ 'tenant_id': security_group_rule['tenant_id'], -+ 'security_group_id': security_group_rule['security_group_id'], -+ 'ethertype': security_group_rule['ethertype'], -+ 'direction': security_group_rule['direction'], -+ 'protocol': security_group_rule['protocol'], -+ 'port_range_min': security_group_rule['port_range_min'], -+ 'port_range_max': security_group_rule['port_range_max'], -+ 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], -+ 'remote_group_id': security_group_rule['remote_group_id']} -+ -+ return self._fields(res, fields) -+ -+ def _make_security_group_dict(self, security_group, fields=None): -+ res = {'id': security_group['id'], -+ 'name': security_group['name'], -+ 'tenant_id': security_group['tenant_id'], -+ 'description': security_group['description']} -+ res['security_group_rules'] = [self._make_security_group_rule_dict(r) -+ for r in security_group['rules']] -+ return self._fields(res, fields) -+ -+ def create_security_group(self, context, security_group): -+ cfgdb = self._get_user_cfgdb(context) -+ sg_info = cfgdb.security_group_create( -+ security_group['security_group']) -+ -+ # verify transformation is conforming to api -+ sg_dict = self._make_security_group_dict(sg_info['q_api_data']) -+ -+ LOG.debug(_("create_security_group(): %r"), sg_dict) -+ return sg_dict -+ -+ def delete_security_group(self, context, id): -+ cfgdb = self._get_user_cfgdb(context) -+ cfgdb.security_group_delete(id) -+ LOG.debug(_("delete_security_group(): %r"), id) -+ -+ def get_security_groups(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ cfgdb = self._get_user_cfgdb(context) -+ security_groups_info = cfgdb.security_group_list(context, filters) -+ -+ security_groups_dicts = [] -+ for sg_info in security_groups_info: -+ # verify transformation is conforming to api -+ sg_dict = self._make_security_group_dict(sg_info['q_api_data'], -+ fields) -+ -+ security_groups_dicts.append(sg_dict) -+ -+ LOG.debug( -+ "get_security_groups(): filter: %r data: %r", -+ filters, security_groups_dicts) -+ return security_groups_dicts -+ -+ def get_security_group(self, context, id, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ sg_info = cfgdb.security_group_read(id) -+ -+ # verify transformation is conforming to api -+ sg_dict = self._make_security_group_dict(sg_info['q_api_data'], -+ fields) -+ -+ LOG.debug(_("get_security_group(): %r"), sg_dict) -+ return self._fields(sg_dict, fields) -+ -+ def create_security_group_rule(self, context, security_group_rule): -+ cfgdb = self._get_user_cfgdb(context) -+ sgr_info = cfgdb.security_group_rule_create( -+ security_group_rule['security_group_rule']) -+ -+ # verify transformation is conforming to api -+ sgr_dict = self._make_security_group_rule_dict(sgr_info['q_api_data']) -+ -+ LOG.debug(_("create_security_group_rule(): %r"), sgr_dict) -+ return sgr_dict -+ -+ def delete_security_group_rule(self, context, id): -+ cfgdb = self._get_user_cfgdb(context) -+ cfgdb.security_group_rule_delete(id) -+ LOG.debug(_("delete_security_group_rule(): %r"), id) -+ -+ def get_security_group_rules(self, context, filters=None, fields=None, -+ sorts=None, limit=None, marker=None, -+ page_reverse=False): -+ cfgdb = self._get_user_cfgdb(context) -+ security_group_rules_info = cfgdb.security_group_rule_list(filters) -+ -+ security_group_rules_dicts = [] -+ for sgr_info in security_group_rules_info: -+ for sgr in sgr_info: -+ # verify transformation is conforming to api -+ sgr_dict = self._make_security_group_rule_dict( -+ sgr['q_api_data'], fields) -+ security_group_rules_dicts.append(sgr_dict) -+ -+ LOG.debug( -+ "get_security_group_rules(): filter: %r data: %r ", -+ filters, security_group_rules_dicts) -+ return security_group_rules_dicts -+ -+ def get_security_group_rule(self, context, id, fields=None): -+ cfgdb = self._get_user_cfgdb(context) -+ sgr_info = cfgdb.security_group_rule_read(id) -+ -+ # verify transformation is conforming to api -+ sgr_dict = {} -+ if sgr_info != {}: -+ sgr_dict = self._make_security_group_rule_dict( -+ sgr_info['q_api_data'], fields) -+ -+ LOG.debug(_("get_security_group_rule(): %r"), sgr_dict) -+ return self._fields(sgr_dict, fields) -diff --git neutron/plugins/juniper/contrail/ctdb/__init__.py neutron/plugins/juniper/contrail/ctdb/__init__.py -new file mode 100644 -index 0000000..7bc8217 ---- /dev/null -+++ neutron/plugins/juniper/contrail/ctdb/__init__.py -@@ -0,0 +1,17 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay Juniper Networks. -diff --git neutron/plugins/juniper/contrail/ctdb/config_db.py neutron/plugins/juniper/contrail/ctdb/config_db.py -new file mode 100644 -index 0000000..efe281c ---- /dev/null -+++ neutron/plugins/juniper/contrail/ctdb/config_db.py -@@ -0,0 +1,1708 @@ -+# vim: tabstop=4 shiftwidth=4 softtabstop=4 -+# -+# Copyright 2013 Juniper Networks. All rights reserved. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); you may -+# not use this file except in compliance with the License. You may obtain -+# a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -+# License for the specific language governing permissions and limitations -+# under the License. -+# -+# @author: Hampapur Ajay, Rudra Rugge, Atul Moghe Juniper Networks. -+ -+import json -+import netaddr -+import re -+import requests -+import socket -+import time -+import uuid -+ -+from cfgm_common import exceptions as vnc_exc -+from vnc_api import vnc_api -+ -+from neutron.api.v2 import attributes as attr -+from neutron.common import constants -+from neutron.common import exceptions -+from neutron.extensions import portbindings -+from neutron.openstack.common import log as logging -+ -+ -+LOG = logging.getLogger(__name__) -+ -+_DEFAULT_HEADERS = { -+ 'Content-type': 'application/json; charset="UTF-8"', } -+ -+CREATE = 1 -+READ = 2 -+UPDATE = 3 -+DELETE = 4 -+ -+ -+class DBInterface(object): -+ """An instance of this class forwards requests to vnc cfg api server""" -+ Q_URL_PREFIX = '/extensions/ct' -+ -+ def __init__(self, admin_name, admin_password, admin_tenant_name, -+ api_server_ip, api_server_port, max_retries, retry_interval, -+ user_info=None): -+ self._api_server_ip = api_server_ip -+ self._api_server_port = api_server_port -+ self._max_retries = max_retries -+ self._retry_interval = retry_interval -+ -+ self._db_cache = { -+ 'q_networks': {}, -+ 'q_subnets': {}, -+ 'q_subnet_maps': {}, -+ 'q_policies': {}, -+ 'q_ipams': {}, -+ 'q_floatingips': {}, -+ 'q_ports': {}, -+ 'q_fixed_ip_to_subnet': {}, -+ # obj-uuid to tenant-uuid mapping -+ 'q_obj_to_tenant': {}, -+ # port count per tenant-id -+ 'q_tenant_port_count': {}, -+ 'vnc_networks': {}, -+ 'vnc_ports': {}, -+ 'vnc_projects': {}, -+ 'vnc_instance_ips': {}, -+ } -+ -+ # Retry till a api-server is up or up to max_retries -+ connected = False -+ remaining = self._max_retries -+ if remaining == -1: -+ remaining = 'infinite' -+ msg = _('api-server connection failed. %s attempts left.') -+ -+ while not connected: -+ try: -+ self._vnc_lib = vnc_api.VncApi( -+ admin_name, admin_password, -+ admin_tenant_name, api_server_ip, -+ api_server_port, '/', user_info=user_info) -+ connected = True -+ except requests.exceptions.RequestException: -+ LOG.warn(msg % remaining) -+ if (remaining != 'infinite' and remaining == 0): -+ raise -+ if (remaining != 'infinite'): -+ remaining -= 1 -+ -+ time.sleep(self._retry_interval) -+ -+ # changes 'net_fq_name_str pfx/len' key to 'net_id pfx/len' key -+ # eg. domain1:project1:net1 1.1.1.0/24 becomes 1.1.1.0/24 -+ subnet_map = self._vnc_lib.kv_retrieve(key=None) -+ for kv_dict in subnet_map: -+ key = kv_dict['key'] -+ if len(key.split()) == 1: -+ subnet_id = key -+ # uuid key, fixup value portion to 'net_id pfx/len' format -+ # if not already so -+ if len(kv_dict['value'].split(':')) == 1: -+ # new format already, skip -+ continue -+ -+ net_fq_name = kv_dict['value'].split()[0].split(':') -+ try: -+ net_obj = self._virtual_network_read(fq_name=net_fq_name) -+ except vnc_exc.NoIdError: -+ LOG.warning(_("No network: %r"), net_fq_name) -+ self._vnc_lib.kv_delete(subnet_id) -+ continue -+ -+ new_subnet_key = '%s %s' % (net_obj.uuid, -+ kv_dict['value'].split()[1]) -+ self._vnc_lib.kv_store(subnet_id, new_subnet_key) -+ else: # subnet key -+ if len(key.split()[0].split(':')) == 1: -+ # new format already, skip -+ continue -+ -+ # delete old key, convert to new key format and save -+ old_subnet_key = key -+ self._vnc_lib.kv_delete(old_subnet_key) -+ -+ subnet_id = kv_dict['value'] -+ net_fq_name = key.split()[0].split(':') -+ try: -+ net_obj = self._virtual_network_read(fq_name=net_fq_name) -+ except vnc_exc.NoIdError: -+ LOG.warning(_("No network: %r"), net_fq_name) -+ continue -+ -+ new_subnet_key = '%s %s' % (net_obj.uuid, key.split()[1]) -+ self._vnc_lib.kv_store(new_subnet_key, subnet_id) -+ -+ def _request_api_server(self, url, method, data=None, headers=None): -+ return requests.request(method, url=url, data=data, headers=headers) -+ -+ def _relay_request(self, request): -+ """Send received request to api server""" -+ # chop neutron parts of url and add api server address -+ url_path = re.sub(self.Q_URL_PREFIX, '', request.environ['PATH_INFO']) -+ url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, -+ url_path) -+ -+ return self._request_api_server( -+ url, request.environ['REQUEST_METHOD'], -+ request.body, {'Content-type': request.environ['CONTENT_TYPE']}) -+ -+ def _obj_to_dict(self, obj): -+ return self._vnc_lib.obj_to_dict(obj) -+ #end _obj_to_dict -+ -+ def _ensure_instance_exists(self, instance_id): -+ instance_name = instance_id -+ instance_obj = vnc_api.VirtualMachine(instance_name) -+ try: -+ id = self._vnc_lib.obj_to_id(instance_obj) -+ instance_obj = self._vnc_lib.virtual_machine_read(id=id) -+ except vnc_exc.NoIdError: # instance doesn't exist, create it -+ instance_obj.uuid = instance_id -+ self._vnc_lib.virtual_machine_create(instance_obj) -+ -+ return instance_obj -+ -+ def _ensure_default_security_group_exists(self, proj_id): -+ proj_obj = self._vnc_lib.project_read(id=proj_id) -+ sg_groups = proj_obj.get_security_groups() -+ for sg_group in sg_groups or []: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_group['uuid']) -+ if sg_obj.name == 'default': -+ return -+ -+ sg_obj = vnc_api.SecurityGroup(name='default', parent_obj=proj_obj) -+ self._vnc_lib.security_group_create(sg_obj) -+ -+ #allow all egress traffic -+ def_rule = { -+ 'port_range_min': 0, -+ 'port_range_max': 65535, -+ 'direction': 'egress', -+ 'remote_ip_prefix': None, -+ 'remote_group_id': None, -+ 'protocol': 'any', -+ } -+ rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE) -+ self._security_group_rule_create(sg_obj.uuid, rule) -+ -+ #allow ingress traffic from within default security group -+ def_rule = { -+ 'port_range_min': 0, -+ 'port_range_max': 65535, -+ 'direction': 'ingress', -+ 'remote_ip_prefix': None, -+ 'remote_group_id': None, -+ 'protocol': 'any', -+ } -+ rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE) -+ self._security_group_rule_create(sg_obj.uuid, rule) -+ -+ def _get_obj_tenant_id(self, q_type, obj_uuid): -+ # Get the mapping from cache, else seed cache and return -+ try: -+ return self._db_cache['q_obj_to_tenant'][obj_uuid] -+ except KeyError: -+ # Seed the cache and return -+ if q_type == 'port': -+ port_obj = self._virtual_machine_interface_read(obj_uuid) -+ net_id = port_obj.get_virtual_network_refs()[0]['uuid'] -+ # recurse up type-hierarchy -+ tenant_id = self._get_obj_tenant_id('network', net_id) -+ self._set_obj_tenant_id(obj_uuid, tenant_id) -+ return tenant_id -+ -+ if q_type == 'network': -+ net_obj = self._virtual_network_read(net_id=obj_uuid) -+ tenant_id = net_obj.parent_uuid.replace('-', '') -+ self._set_obj_tenant_id(obj_uuid, tenant_id) -+ return tenant_id -+ -+ def _set_obj_tenant_id(self, obj_uuid, tenant_uuid): -+ self._db_cache['q_obj_to_tenant'][obj_uuid] = tenant_uuid -+ -+ def _del_obj_tenant_id(self, obj_uuid): -+ try: -+ del self._db_cache['q_obj_to_tenant'][obj_uuid] -+ except Exception: -+ pass -+ -+ def _project_read(self, proj_id=None, fq_name=None): -+ if proj_id: -+ proj_obj = self._vnc_lib.project_read(id=proj_id) -+ fq_name_str = json.dumps(proj_obj.get_fq_name()) -+ self._db_cache['vnc_projects'][proj_id] = proj_obj -+ self._db_cache['vnc_projects'][fq_name_str] = proj_obj -+ return proj_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ proj_obj = self._vnc_lib.project_read(fq_name=fq_name) -+ self._db_cache['vnc_projects'][fq_name_str] = proj_obj -+ self._db_cache['vnc_projects'][proj_obj.uuid] = proj_obj -+ return proj_obj -+ -+ def _security_group_rule_create(self, sg_id, sg_rule): -+ sg_vnc = self._vnc_lib.security_group_read(id=sg_id) -+ rules = sg_vnc.get_security_group_entries() -+ if rules is None: -+ rules = vnc_api.PolicyEntriesType([sg_rule]) -+ else: -+ rules.add_policy_rule(sg_rule) -+ -+ sg_vnc.set_security_group_entries(rules) -+ self._vnc_lib.security_group_update(sg_vnc) -+ -+ def _security_group_rule_find(self, sgr_id): -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ project_sgs = self._security_group_list_project(proj_id) -+ -+ for sg in project_sgs: -+ sg_obj = self._vnc_lib.security_group_read(id=sg['uuid']) -+ sgr_entries = sg_obj.get_security_group_entries() -+ if sgr_entries is None: -+ continue -+ -+ for sg_rule in sgr_entries.get_policy_rule(): -+ if sg_rule.get_rule_uuid() == sgr_id: -+ return sg_obj, sg_rule -+ -+ return None, None -+ -+ def _security_group_rule_delete(self, sg_obj, sg_rule): -+ rules = sg_obj.get_security_group_entries() -+ rules.get_policy_rule().remove(sg_rule) -+ sg_obj.set_security_group_entries(rules) -+ self._vnc_lib.security_group_update(sg_obj) -+ -+ def _security_group_create(self, sg_obj): -+ sg_uuid = self._vnc_lib.security_group_create(sg_obj) -+ return sg_uuid -+ -+ def _security_group_delete(self, sg_id): -+ self._vnc_lib.security_group_delete(id=sg_id) -+ -+ def _virtual_network_create(self, net_obj): -+ net_uuid = self._vnc_lib.virtual_network_create(net_obj) -+ -+ return net_uuid -+ -+ def _virtual_network_read(self, net_id=None, fq_name=None): -+ if net_id: -+ net_obj = self._vnc_lib.virtual_network_read(id=net_id) -+ fq_name_str = json.dumps(net_obj.get_fq_name()) -+ self._db_cache['vnc_networks'][net_id] = net_obj -+ self._db_cache['vnc_networks'][fq_name_str] = net_obj -+ return net_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ net_obj = self._vnc_lib.virtual_network_read(fq_name=fq_name) -+ self._db_cache['vnc_networks'][fq_name_str] = net_obj -+ self._db_cache['vnc_networks'][net_obj.uuid] = net_obj -+ return net_obj -+ -+ def _virtual_network_update(self, net_obj): -+ self._vnc_lib.virtual_network_update(net_obj) -+ # read back to get subnet gw allocated by api-server -+ net_obj = self._vnc_lib.virtual_network_read(id=net_obj.uuid) -+ fq_name_str = json.dumps(net_obj.get_fq_name()) -+ -+ self._db_cache['vnc_networks'][net_obj.uuid] = net_obj -+ self._db_cache['vnc_networks'][fq_name_str] = net_obj -+ -+ def _virtual_network_delete(self, net_id): -+ fq_name_str = None -+ try: -+ net_obj = self._db_cache['vnc_networks'][net_id] -+ fq_name_str = json.dumps(net_obj.get_fq_name()) -+ except KeyError: -+ pass -+ -+ self._vnc_lib.virtual_network_delete(id=net_id) -+ -+ try: -+ del self._db_cache['vnc_networks'][net_id] -+ if fq_name_str: -+ del self._db_cache['vnc_networks'][fq_name_str] -+ except KeyError: -+ pass -+ -+ def _virtual_machine_interface_create(self, port_obj): -+ port_uuid = self._vnc_lib.virtual_machine_interface_create(port_obj) -+ -+ return port_uuid -+ -+ def _virtual_machine_interface_read(self, port_id=None, fq_name=None): -+ if port_id: -+ port_obj = self._vnc_lib.virtual_machine_interface_read(id=port_id) -+ fq_name_str = json.dumps(port_obj.get_fq_name()) -+ self._db_cache['vnc_ports'][port_id] = port_obj -+ self._db_cache['vnc_ports'][fq_name_str] = port_obj -+ return port_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ port_obj = self._vnc_lib.virtual_machine_interface_read( -+ fq_name=fq_name) -+ self._db_cache['vnc_ports'][fq_name_str] = port_obj -+ self._db_cache['vnc_ports'][port_obj.uuid] = port_obj -+ return port_obj -+ -+ def _virtual_machine_interface_update(self, port_obj): -+ self._vnc_lib.virtual_machine_interface_update(port_obj) -+ fq_name_str = json.dumps(port_obj.get_fq_name()) -+ -+ self._db_cache['vnc_ports'][port_obj.uuid] = port_obj -+ self._db_cache['vnc_ports'][fq_name_str] = port_obj -+ -+ def _virtual_machine_interface_delete(self, port_id): -+ fq_name_str = None -+ try: -+ port_obj = self._db_cache['vnc_ports'][port_id] -+ fq_name_str = json.dumps(port_obj.get_fq_name()) -+ except KeyError: -+ pass -+ -+ self._vnc_lib.virtual_machine_interface_delete(id=port_id) -+ -+ try: -+ del self._db_cache['vnc_ports'][port_id] -+ if fq_name_str: -+ del self._db_cache['vnc_ports'][fq_name_str] -+ except KeyError: -+ pass -+ -+ def _instance_ip_create(self, iip_obj): -+ iip_uuid = self._vnc_lib.instance_ip_create(iip_obj) -+ -+ return iip_uuid -+ -+ def _instance_ip_read(self, instance_ip_id=None, fq_name=None): -+ if instance_ip_id: -+ iip_obj = self._vnc_lib.instance_ip_read(id=instance_ip_id) -+ fq_name_str = json.dumps(iip_obj.get_fq_name()) -+ self._db_cache['vnc_instance_ips'][instance_ip_id] = iip_obj -+ self._db_cache['vnc_instance_ips'][fq_name_str] = iip_obj -+ return iip_obj -+ -+ if fq_name: -+ fq_name_str = json.dumps(fq_name) -+ iip_obj = self._vnc_lib.instance_ip_read(fq_name=fq_name) -+ self._db_cache['vnc_instance_ips'][fq_name_str] = iip_obj -+ self._db_cache['vnc_instance_ips'][iip_obj.uuid] = iip_obj -+ return iip_obj -+ -+ def _instance_ip_update(self, iip_obj): -+ self._vnc_lib.instance_ip_update(iip_obj) -+ fq_name_str = json.dumps(iip_obj.get_fq_name()) -+ -+ self._db_cache['vnc_instance_ips'][iip_obj.uuid] = iip_obj -+ self._db_cache['vnc_instance_ips'][fq_name_str] = iip_obj -+ -+ def _instance_ip_delete(self, instance_ip_id): -+ fq_name_str = None -+ try: -+ iip_obj = self._db_cache['vnc_instance_ips'][instance_ip_id] -+ fq_name_str = json.dumps(iip_obj.get_fq_name()) -+ except KeyError: -+ pass -+ -+ self._vnc_lib.instance_ip_delete(id=instance_ip_id) -+ -+ try: -+ del self._db_cache['vnc_instance_ips'][instance_ip_id] -+ if fq_name_str: -+ del self._db_cache['vnc_instance_ips'][fq_name_str] -+ except KeyError: -+ pass -+ -+ # find projects on a given domain -+ def _project_list_domain(self, domain_id): -+ fq_name = ['default-domain'] -+ resp_dict = self._vnc_lib.projects_list(parent_fq_name=fq_name) -+ -+ return resp_dict['projects'] -+ -+ # find network ids on a given project -+ def _network_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ LOG.warning(_("Error in converting uuid: %r"), project_id) -+ -+ resp_dict = self._vnc_lib.virtual_networks_list(parent_id=project_uuid) -+ -+ return resp_dict['virtual-networks'] -+ -+ def _security_group_list_project(self, project_id): -+ try: -+ project_uuid = str(uuid.UUID(project_id)) -+ except Exception: -+ LOG.warning(_("Error in converting uuid: %r"), project_id) -+ -+ self._ensure_default_security_group_exists(project_uuid) -+ -+ resp_dict = self._vnc_lib.security_groups_list(parent_id=project_uuid) -+ -+ return resp_dict['security-groups'] -+ -+ def _security_group_entries_list_sg(self, sg_id): -+ try: -+ sg_uuid = str(uuid.UUID(sg_id)) -+ except Exception: -+ LOG.warning(_("Error in converting SG uuid: %r"), sg_id) -+ -+ resp_dict = self._vnc_lib.security_groups_list(parent_id=sg_uuid) -+ -+ return resp_dict['security-groups'] -+ -+ # find floating ip pools a project has access to -+ def _fip_pool_refs_project(self, project_id): -+ project_uuid = str(uuid.UUID(project_id)) -+ project_obj = self._project_read(proj_id=project_uuid) -+ -+ return project_obj.get_floating_ip_pool_refs() -+ -+ # find networks of floating ip pools project has access to -+ def _fip_pool_ref_networks(self, project_id): -+ ret_nets = [] -+ -+ proj_fip_pool_refs = self._fip_pool_refs_project(project_id) -+ if not proj_fip_pool_refs: -+ return ret_nets -+ -+ for fip_pool_ref in proj_fip_pool_refs: -+ fip_uuid = fip_pool_ref['uuid'] -+ fip_pool_obj = self._vnc_lib.floating_ip_pool_read(id=fip_uuid) -+ net_uuid = fip_pool_obj.parent_uuid -+ net_obj = self._virtual_network_read(net_id=net_uuid) -+ ret_nets.append({'uuid': net_obj.uuid, -+ 'fq_name': net_obj.get_fq_name()}) -+ -+ return ret_nets -+ -+ # find floating ip pools defined by network -+ def _fip_pool_list_network(self, net_id): -+ resp_dict = self._vnc_lib.floating_ip_pools_list(parent_id=net_id) -+ -+ return resp_dict['floating-ip-pools'] -+ -+ # find port ids on a given network -+ def _port_list_network(self, network_id): -+ ret_list = [] -+ -+ try: -+ net_obj = self._virtual_network_read(net_id=network_id) -+ except vnc_exc.NoIdError: -+ return ret_list -+ -+ port_back_refs = net_obj.get_virtual_machine_interface_back_refs() -+ if port_back_refs: -+ ret_list = [{'id': port_back_ref['uuid']} -+ for port_back_ref in port_back_refs] -+ -+ return ret_list -+ -+ # find port ids on a given project -+ def _port_list_project(self, project_id): -+ ret_list = [] -+ project_nets = self._network_list_project(project_id) -+ for net in project_nets: -+ net_ports = self._port_list_network(net['uuid']) -+ ret_list.extend(net_ports) -+ -+ return ret_list -+ -+ def _filters_is_present(self, filters, key_name, match_value): -+ """Check if filters present or not. -+ -+ Returns True if no filter is specified -+ OR search-param is not present in filters -+ OR (search-param is present in filters AND -+ resource matches param-list AND -+ shared parameter in filters is False) -+ """ -+ if filters: -+ if key_name in filters: -+ try: -+ filters[key_name].index(match_value) -+ if ('shared' in filters and filters['shared'][0]): -+ # yuck, q-api has shared as list always of 1 elem -+ return False # no shared-resource support -+ except ValueError: # not in requested list -+ return False -+ elif len(filters.keys()) == 1: -+ shared_val = filters.get('shared') -+ if shared_val and shared_val[0]: -+ return False -+ -+ return True -+ -+ def _network_read(self, net_uuid): -+ net_obj = self._virtual_network_read(net_id=net_uuid) -+ return net_obj -+ -+ def _subnet_vnc_create_mapping(self, subnet_id, subnet_key): -+ self._vnc_lib.kv_store(subnet_id, subnet_key) -+ self._vnc_lib.kv_store(subnet_key, subnet_id) -+ self._db_cache['q_subnet_maps'][subnet_id] = subnet_key -+ self._db_cache['q_subnet_maps'][subnet_key] = subnet_id -+ -+ def _subnet_vnc_read_mapping(self, id=None, key=None): -+ if id: -+ try: -+ return self._db_cache['q_subnet_maps'][id] -+ except KeyError: -+ subnet_key = self._vnc_lib.kv_retrieve(id) -+ self._db_cache['q_subnet_maps'][id] = subnet_key -+ return subnet_key -+ if key: -+ try: -+ return self._db_cache['q_subnet_maps'][key] -+ except KeyError: -+ subnet_id = self._vnc_lib.kv_retrieve(key) -+ self._db_cache['q_subnet_maps'][key] = subnet_id -+ return subnet_id -+ -+ def _subnet_vnc_read_or_create_mapping(self, id=None, key=None): -+ if id: -+ return self._subnet_vnc_read_mapping(id=id) -+ -+ # if subnet was created outside of neutron handle it and create -+ # neutron representation now (lazily) -+ try: -+ return self._subnet_vnc_read_mapping(key=key) -+ except vnc_exc.NoIdError: -+ subnet_id = str(uuid.uuid4()) -+ self._subnet_vnc_create_mapping(subnet_id, key) -+ return self._subnet_vnc_read_mapping(key=key) -+ -+ def _subnet_vnc_delete_mapping(self, subnet_id, subnet_key): -+ self._vnc_lib.kv_delete(subnet_id) -+ self._vnc_lib.kv_delete(subnet_key) -+ try: -+ del self._db_cache['q_subnet_maps'][subnet_id] -+ except KeyError: -+ pass -+ try: -+ del self._db_cache['q_subnet_maps'][subnet_key] -+ except KeyError: -+ pass -+ -+ def _subnet_vnc_get_key(self, subnet_vnc, net_id): -+ pfx = subnet_vnc.subnet.get_ip_prefix() -+ pfx_len = subnet_vnc.subnet.get_ip_prefix_len() -+ -+ return '%s %s/%s' % (net_id, pfx, pfx_len) -+ -+ def _subnet_read(self, net_uuid, subnet_key): -+ try: -+ net_obj = self._virtual_network_read(net_id=net_uuid) -+ except vnc_exc.NoIdError: -+ return -+ -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if not ipam_refs: -+ return -+ -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ if self._subnet_vnc_get_key(subnet_vnc, -+ net_uuid) == subnet_key: -+ return subnet_vnc -+ -+ return -+ -+ def _ip_address_to_subnet_id(self, ip_addr, net_obj): -+ # find subnet-id for ip-addr, called when instance-ip created -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(), -+ subnet_vnc.subnet.get_ip_prefix_len()) -+ if netaddr.IPAddress(ip_addr) in netaddr.IPSet([cidr]): -+ subnet_key = self._subnet_vnc_get_key(subnet_vnc, -+ net_obj.uuid) -+ subnet_id = self._subnet_vnc_read_mapping( -+ key=subnet_key) -+ return subnet_id -+ -+ def _security_group_vnc_to_neutron(self, sg_obj): -+ sg_q_dict = self._obj_to_dict(sg_obj) -+ -+ # replace field names -+ sg_q_dict['id'] = sg_obj.uuid -+ sg_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '') -+ sg_q_dict['name'] = sg_obj.name -+ sg_q_dict['description'] = sg_obj.get_id_perms().get_description() -+ -+ # get security group rules -+ sg_q_dict['rules'] = [] -+ rule_list = self.security_group_rules_read(sg_obj.uuid) -+ if rule_list: -+ sg_q_dict['rules'] = [rule['q_api_data'] -+ for rule in rule_list] -+ -+ return {'q_api_data': sg_q_dict} -+ -+ def _security_group_neutron_to_vnc(self, sg_q, operator): -+ if operator == CREATE: -+ project_id = str(uuid.UUID(sg_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ id_perms = vnc_api.IdPermsType( -+ enable=True, description=sg_q['description']) -+ sg_vnc = vnc_api.SecurityGroup( -+ name=sg_q['name'], parent_obj=project_obj, -+ id_perms=id_perms) -+ -+ return sg_vnc -+ -+ def _security_group_rule_vnc_to_neutron(self, sg_id, sg_rule): -+ sgr_q_dict = {} -+ if sg_id is None: -+ return {'q_api_data': sgr_q_dict} -+ -+ try: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=sg_id) -+ -+ direction = 'egress' -+ if sg_rule.get_direction() == '<': -+ direction = 'ingress' -+ -+ remote_cidr = '' -+ remote_sg_uuid = '' -+ if direction == 'ingress': -+ addr = sg_rule.get_src_addresses()[0] -+ else: -+ addr = sg_rule.get_dst_addresses()[0] -+ -+ if addr.get_subnet(): -+ remote_cidr = '%s/%s' % (addr.get_subnet().get_ip_prefix(), -+ addr.get_subnet().get_ip_prefix_len()) -+ elif addr.get_security_group(): -+ if (addr.get_security_group() != 'any') and \ -+ (addr.get_security_group() != 'local'): -+ remote_sg = addr.get_security_group() -+ try: -+ remote_sg_obj = self._vnc_lib.security_group_read( -+ fq_name_str=remote_sg) -+ remote_sg_uuid = remote_sg_obj.uuid -+ except vnc_exc.NoIdError: -+ pass -+ -+ sgr_q_dict['id'] = sg_rule.get_rule_uuid() -+ sgr_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '') -+ sgr_q_dict['security_group_id'] = sg_obj.uuid -+ sgr_q_dict['ethertype'] = 'IPv4' -+ sgr_q_dict['direction'] = direction -+ sgr_q_dict['protocol'] = sg_rule.get_protocol() -+ sgr_q_dict['port_range_min'] = sg_rule.get_dst_ports()[0].\ -+ get_start_port() -+ sgr_q_dict['port_range_max'] = sg_rule.get_dst_ports()[0].\ -+ get_end_port() -+ sgr_q_dict['remote_ip_prefix'] = remote_cidr -+ sgr_q_dict['remote_group_id'] = remote_sg_uuid -+ -+ return {'q_api_data': sgr_q_dict} -+ -+ def _security_group_rule_neutron_to_vnc(self, sgr_q, operator): -+ if operator == CREATE: -+ port_min = 0 -+ port_max = 65535 -+ if sgr_q['port_range_min']: -+ port_min = sgr_q['port_range_min'] -+ if sgr_q['port_range_max']: -+ port_max = sgr_q['port_range_max'] -+ -+ endpt = [vnc_api.AddressType(security_group='any')] -+ if sgr_q['remote_ip_prefix']: -+ cidr = sgr_q['remote_ip_prefix'].split('/') -+ pfx = cidr[0] -+ pfx_len = int(cidr[1]) -+ endpt = [vnc_api.AddressType( -+ subnet=vnc_api.SubnetType(pfx, pfx_len))] -+ elif sgr_q['remote_group_id']: -+ sg_obj = self._vnc_lib.security_group_read( -+ id=sgr_q['remote_group_id']) -+ endpt = [vnc_api.AddressType( -+ security_group=sg_obj.get_fq_name_str())] -+ -+ if sgr_q['direction'] == 'ingress': -+ dir = '<' -+ local = endpt -+ remote = [vnc_api.AddressType(security_group='local')] -+ else: -+ dir = '>' -+ remote = endpt -+ local = [vnc_api.AddressType(security_group='local')] -+ -+ if not sgr_q['protocol']: -+ sgr_q['protocol'] = 'any' -+ -+ sgr_uuid = str(uuid.uuid4()) -+ -+ rule = vnc_api.PolicyRuleType( -+ rule_uuid=sgr_uuid, -+ direction=dir, -+ protocol=sgr_q['protocol'], -+ src_addresses=local, -+ src_ports=[vnc_api.PortType(0, 65535)], -+ dst_addresses=remote, -+ dst_ports=[vnc_api.PortType(port_min, port_max)]) -+ return rule -+ -+ def _network_neutron_to_vnc(self, network_q, operator): -+ net_name = network_q.get('name', None) -+ if operator == CREATE: -+ project_id = str(uuid.UUID(network_q['tenant_id'])) -+ project_obj = self._project_read(proj_id=project_id) -+ id_perms = vnc_api.IdPermsType(enable=True) -+ net_obj = vnc_api.VirtualNetwork( -+ net_name, project_obj, id_perms=id_perms) -+ else: # READ/UPDATE/DELETE -+ net_obj = self._virtual_network_read(net_id=network_q['id']) -+ -+ id_perms = net_obj.get_id_perms() -+ if 'admin_state_up' in network_q: -+ id_perms.enable = network_q['admin_state_up'] -+ net_obj.set_id_perms(id_perms) -+ -+ if 'contrail:policys' in network_q: -+ policy_fq_names = network_q['contrail:policys'] -+ # reset and add with newly specified list -+ net_obj.set_network_policy_list([], []) -+ seq = 0 -+ for p_fq_name in policy_fq_names: -+ domain_name, project_name, policy_name = p_fq_name -+ -+ domain_obj = vnc_api.Domain(domain_name) -+ project_obj = vnc_api.Project(project_name, domain_obj) -+ policy_obj = vnc_api.NetworkPolicy(policy_name, project_obj) -+ -+ net_obj.add_network_policy( -+ policy_obj, -+ vnc_api.VirtualNetworkPolicyType( -+ sequence=vnc_api.SequenceType(seq, 0))) -+ seq = seq + 1 -+ -+ if 'vpc:route_table' in network_q: -+ rt_fq_name = network_q['vpc:route_table'] -+ if rt_fq_name: -+ try: -+ rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name) -+ net_obj.set_route_table(rt_obj) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=net_obj.uuid) -+ -+ return net_obj -+ -+ def _network_vnc_to_neutron(self, net_obj, net_repr='SHOW'): -+ net_q_dict = {} -+ -+ net_q_dict['id'] = net_obj.uuid -+ net_q_dict['name'] = net_obj.name -+ net_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '') -+ net_q_dict['admin_state_up'] = net_obj.get_id_perms().enable -+ net_q_dict['shared'] = False -+ net_q_dict['status'] = constants.NET_STATUS_ACTIVE -+ -+ ipam_refs = net_obj.get_network_ipam_refs() -+ net_q_dict['subnets'] = [] -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnets = ipam_ref['attr'].get_ipam_subnets() -+ for subnet in subnets: -+ sn_info = self._subnet_vnc_to_neutron(subnet, net_obj, -+ ipam_ref['to']) -+ sn_dict = sn_info['q_api_data'] -+ net_q_dict['subnets'].append(sn_dict) -+ sn_ipam = {} -+ sn_ipam['subnet_cidr'] = sn_dict['cidr'] -+ sn_ipam['ipam_fq_name'] = ipam_ref['to'] -+ -+ return {'q_api_data': net_q_dict} -+ -+ def _subnet_neutron_to_vnc(self, subnet_q): -+ cidr = subnet_q['cidr'].split('/') -+ pfx = cidr[0] -+ pfx_len = int(cidr[1]) -+ if subnet_q['gateway_ip'] != attr.ATTR_NOT_SPECIFIED: -+ default_gw = subnet_q['gateway_ip'] -+ else: -+ # Assigned by address manager -+ default_gw = None -+ sub_net = vnc_api.SubnetType(ip_prefix=pfx, -+ ip_prefix_len=pfx_len) -+ subnet_vnc = vnc_api.IpamSubnetType(subnet=sub_net, -+ default_gateway=default_gw) -+ return subnet_vnc -+ -+ def _subnet_vnc_to_neutron(self, subnet_vnc, net_obj, ipam_fq_name): -+ sn_q_dict = { -+ 'name': '', -+ 'tenant_id': net_obj.parent_uuid.replace('-', ''), -+ 'network_id': net_obj.uuid, -+ 'ip_version': 4, -+ } -+ -+ cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(), -+ subnet_vnc.subnet.get_ip_prefix_len()) -+ sn_q_dict['cidr'] = cidr -+ -+ subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_obj.uuid) -+ sn_id = self._subnet_vnc_read_or_create_mapping(key=subnet_key) -+ -+ sn_q_dict['id'] = sn_id -+ -+ sn_q_dict['gateway_ip'] = subnet_vnc.default_gateway -+ -+ first_ip = str(netaddr.IPNetwork(cidr).network + 1) -+ last_ip = str(netaddr.IPNetwork(cidr).broadcast - 2) -+ sn_q_dict['allocation_pools'] = [{'id': 'TODO-allocation_pools-id', -+ 'subnet_id': sn_id, -+ 'first_ip': first_ip, -+ 'last_ip': last_ip, -+ 'available_ranges': {}}] -+ -+ sn_q_dict['enable_dhcp'] = False -+ sn_q_dict['dns_nameservers'] = [{'address': '169.254.169.254', -+ 'subnet_id': sn_id}] -+ -+ sn_q_dict['routes'] = [{'destination': 'TODO-destination', -+ 'nexthop': 'TODO-nexthop', -+ 'subnet_id': sn_id}] -+ -+ sn_q_dict['shared'] = False -+ -+ return {'q_api_data': sn_q_dict} -+ -+ def _floatingip_neutron_to_vnc(self, fip_q, operator): -+ if operator == CREATE: -+ # use first available pool on net -+ net_id = fip_q['floating_network_id'] -+ fq_name = self._fip_pool_list_network(net_id)[0]['fq_name'] -+ fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fq_name) -+ fip_name = str(uuid.uuid4()) -+ fip_obj = vnc_api.FloatingIp(fip_name, fip_pool_obj) -+ fip_obj.uuid = fip_name -+ -+ proj_id = str(uuid.UUID(fip_q['tenant_id'])) -+ proj_obj = self._project_read(proj_id=proj_id) -+ fip_obj.set_project(proj_obj) -+ else: # READ/UPDATE/DELETE -+ fip_obj = self._vnc_lib.floating_ip_read(id=fip_q['id']) -+ -+ if fip_q['port_id']: -+ port_obj = self._virtual_machine_interface_read( -+ port_id=fip_q['port_id']) -+ fip_obj.set_virtual_machine_interface(port_obj) -+ else: -+ fip_obj.set_virtual_machine_interface_list([]) -+ -+ return fip_obj -+ -+ def _floatingip_vnc_to_neutron(self, fip_obj): -+ fip_pool_obj = self._vnc_lib.floating_ip_pool_read( -+ id=fip_obj.parent_uuid) -+ net_obj = self._virtual_network_read(net_id=fip_pool_obj.parent_uuid) -+ -+ tenant_id = fip_obj.get_project_refs()[0]['uuid'].replace('-', '') -+ -+ port_id = None -+ port_refs = fip_obj.get_virtual_machine_interface_refs() -+ if port_refs: -+ port_id = fip_obj.get_virtual_machine_interface_refs()[0]['uuid'] -+ -+ fip_q_dict = { -+ 'id': fip_obj.uuid, -+ 'tenant_id': tenant_id, -+ 'floating_ip_address': fip_obj.get_floating_ip_address(), -+ 'floating_network_id': net_obj.uuid, -+ 'router_id': None, -+ 'fixed_port_id': port_id, -+ 'fixed_ip_address': None, -+ } -+ -+ return {'q_api_data': fip_q_dict} -+ -+ def _port_neutron_to_vnc(self, port_q, net_obj, operator): -+ # if name not passed in use name = uuid = -+ if 'name' in port_q and port_q['name'] != '': -+ port_name = port_q['name'] -+ port_uuid = None -+ else: -+ port_name = str(uuid.uuid4()) -+ port_uuid = port_name -+ -+ if operator == CREATE: -+ instance_name = port_q['device_id'] -+ instance_obj = vnc_api.VirtualMachine(instance_name) -+ -+ id_perms = vnc_api.IdPermsType(enable=True) -+ port_obj = vnc_api.VirtualMachineInterface(port_name, instance_obj, -+ id_perms=id_perms) -+ port_obj.uuid = port_uuid -+ port_obj.set_virtual_network(net_obj) -+ -+ else: # READ/UPDATE/DELETE -+ port_obj = self._virtual_machine_interface_read( -+ port_id=port_q['id']) -+ -+ port_obj.set_security_group_list([]) -+ if ('security_groups' in port_q and -+ port_q['security_groups'].__class__ is not object): -+ for sg_id in port_q['security_groups']: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ port_obj.add_security_group(sg_obj) -+ -+ id_perms = port_obj.get_id_perms() -+ if 'admin_state_up' in port_q: -+ id_perms.enable = port_q['admin_state_up'] -+ port_obj.set_id_perms(id_perms) -+ -+ return port_obj -+ -+ def _port_vnc_to_neutron(self, port_obj, net_obj=None): -+ port_q_dict = {} -+ port_q_dict['name'] = port_obj.name -+ port_q_dict['id'] = port_obj.uuid -+ port_q_dict[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_VROUTER -+ -+ if not net_obj: -+ net_refs = port_obj.get_virtual_network_refs() -+ if net_refs: -+ net_id = net_refs[0]['uuid'] -+ else: -+ net_id = self._vnc_lib.obj_to_id(vnc_api.VirtualNetwork()) -+ -+ proj_id = None -+ # not in cache, get by reading VN obj, and populate cache -+ net_obj = self._virtual_network_read(net_id=net_id) -+ proj_id = net_obj.parent_uuid.replace('-', '') -+ self._set_obj_tenant_id(port_obj.uuid, proj_id) -+ else: -+ net_id = net_obj.uuid -+ proj_id = net_obj.parent_uuid.replace('-', '') -+ -+ port_q_dict['tenant_id'] = proj_id -+ port_q_dict['network_id'] = net_id -+ -+ port_q_dict['mac_address'] = '' -+ mac_refs = port_obj.get_virtual_machine_interface_mac_addresses() -+ if mac_refs: -+ port_q_dict['mac_address'] = mac_refs.mac_address[0] -+ -+ port_q_dict['fixed_ips'] = [] -+ ip_back_refs = port_obj.get_instance_ip_back_refs() -+ if ip_back_refs: -+ for ip_back_ref in ip_back_refs: -+ try: -+ ip_obj = self._instance_ip_read( -+ instance_ip_id=ip_back_ref['uuid']) -+ except vnc_exc.NoIdError: -+ continue -+ -+ ip_addr = ip_obj.get_instance_ip_address() -+ -+ ip_q_dict = {} -+ ip_q_dict['port_id'] = port_obj.uuid -+ ip_q_dict['ip_address'] = ip_addr -+ ip_q_dict['subnet_id'] = self._ip_address_to_subnet_id(ip_addr, -+ net_obj) -+ ip_q_dict['net_id'] = net_id -+ -+ port_q_dict['fixed_ips'].append(ip_q_dict) -+ -+ sg_dict = {'port_security_enabled': True} -+ sg_dict['security_groups'] = [] -+ sg_refs = port_obj.get_security_group_refs() -+ for sg_ref in sg_refs or []: -+ sg_dict['security_groups'].append(sg_ref['uuid']) -+ -+ port_q_dict['admin_state_up'] = port_obj.get_id_perms().enable -+ port_q_dict['status'] = constants.PORT_STATUS_ACTIVE -+ port_q_dict['device_id'] = port_obj.parent_name -+ port_q_dict['device_owner'] = 'TODO-device-owner' -+ -+ return {'q_api_data': port_q_dict} -+ -+ def network_create(self, network_q): -+ net_obj = self._network_neutron_to_vnc(network_q, CREATE) -+ net_uuid = self._virtual_network_create(net_obj) -+ -+ ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW') -+ self._db_cache['q_networks'][net_uuid] = ret_network_q -+ -+ return ret_network_q -+ -+ def network_read(self, net_uuid, fields=None): -+ # see if we can return fast... -+ if fields and (len(fields) == 1) and fields[0] == 'tenant_id': -+ tenant_id = self._get_obj_tenant_id('network', net_uuid) -+ return {'q_api_data': {'id': net_uuid, 'tenant_id': tenant_id}} -+ -+ try: -+ net_obj = self._network_read(net_uuid) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=net_uuid) -+ -+ return self._network_vnc_to_neutron(net_obj, net_repr='SHOW') -+ -+ def network_update(self, net_id, network_q): -+ network_q['id'] = net_id -+ net_obj = self._network_neutron_to_vnc(network_q, UPDATE) -+ self._virtual_network_update(net_obj) -+ -+ ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW') -+ self._db_cache['q_networks'][net_id] = ret_network_q -+ -+ return ret_network_q -+ -+ def network_delete(self, net_id): -+ self._virtual_network_delete(net_id=net_id) -+ try: -+ del self._db_cache['q_networks'][net_id] -+ except KeyError: -+ pass -+ -+ def network_list(self, filters=None): -+ ret_list = [] -+ -+ if filters and 'shared' in filters: -+ if filters['shared'][0]: -+ # no support for shared networks -+ return ret_list -+ -+ # collect phase -+ all_nets = [] # all networks in all projects -+ if filters and 'tenant_id' in filters: -+ # project-id is present -+ if 'id' in filters: -+ # required networks are also specified, -+ # just read and populate ret_list -+ # prune is skipped because all_nets is empty -+ for net_id in filters['id']: -+ net_obj = self._network_read(net_id) -+ net_info = self._network_vnc_to_neutron(net_obj, -+ net_repr='LIST') -+ ret_list.append(net_info) -+ else: -+ # read all networks in project, and prune below -+ project_ids = filters['tenant_id'] -+ for p_id in project_ids: -+ if 'router:external' in filters: -+ all_nets.append(self._fip_pool_ref_networks(p_id)) -+ else: -+ project_nets = self._network_list_project(p_id) -+ all_nets.append(project_nets) -+ elif filters and 'id' in filters: -+ # required networks are specified, just read and populate ret_list -+ # prune is skipped because all_nets is empty -+ for net_id in filters['id']: -+ net_obj = self._network_read(net_id) -+ net_info = self._network_vnc_to_neutron(net_obj, -+ net_repr='LIST') -+ ret_list.append(net_info) -+ else: -+ # read all networks in all projects -+ dom_projects = self._project_list_domain(None) -+ for project in dom_projects: -+ proj_id = project['uuid'] -+ if filters and 'router:external' in filters: -+ all_nets.append(self._fip_pool_ref_networks(proj_id)) -+ else: -+ project_nets = self._network_list_project(proj_id) -+ all_nets.append(project_nets) -+ -+ # prune phase -+ for project_nets in all_nets: -+ for proj_net in project_nets: -+ proj_net_id = proj_net['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_net_id): -+ continue -+ -+ proj_net_fq_name = unicode(proj_net['fq_name']) -+ if not self._filters_is_present(filters, 'contrail:fq_name', -+ proj_net_fq_name): -+ continue -+ -+ try: -+ net_obj = self._network_read(proj_net['uuid']) -+ net_info = self._network_vnc_to_neutron(net_obj, -+ net_repr='LIST') -+ except vnc_exc.NoIdError: -+ continue -+ ret_list.append(net_info) -+ -+ return ret_list -+ -+ def network_count(self, filters=None): -+ nets_info = self.network_list(filters) -+ return len(nets_info) -+ -+ def subnet_create(self, subnet_q): -+ net_id = subnet_q['network_id'] -+ net_obj = self._virtual_network_read(net_id=net_id) -+ -+ ipam_fq_name = subnet_q['contrail:ipam_fq_name'] -+ if ipam_fq_name != '': -+ domain_name, project_name, ipam_name = ipam_fq_name -+ -+ project_obj = vnc_api.Project(project_name) -+ netipam_obj = vnc_api.NetworkIpam(ipam_name, project_obj) -+ else: # link subnet with default ipam -+ project_obj = vnc_api.Project(net_obj.parent_name) -+ netipam_obj = vnc_api.NetworkIpam(project_obj=project_obj) -+ ipam_fq_name = netipam_obj.get_fq_name() -+ -+ subnet_vnc = self._subnet_neutron_to_vnc(subnet_q) -+ subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_id) -+ -+ # Locate list of subnets to which this subnet has to be appended -+ net_ipam_ref = None -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ if ipam_ref['to'] == ipam_fq_name: -+ net_ipam_ref = ipam_ref -+ break -+ -+ if not net_ipam_ref: -+ # First link from net to this ipam -+ vnsn_data = vnc_api.VnSubnetsType(ipam_subnets=[subnet_vnc]) -+ net_obj.add_network_ipam(netipam_obj, vnsn_data) -+ else: # virtual-network already linked to this ipam -+ for subnet in net_ipam_ref['attr'].get_ipam_subnets(): -+ if subnet_key == self._subnet_vnc_get_key(subnet, net_id): -+ # duplicate !! -+ subnet_info = self._subnet_vnc_to_neutron(subnet, -+ net_obj, -+ ipam_fq_name) -+ return subnet_info -+ vnsn_data = net_ipam_ref['attr'] -+ vnsn_data.ipam_subnets.append(subnet_vnc) -+ -+ self._virtual_network_update(net_obj) -+ -+ # allocate an id to the subnet and store mapping with -+ # api-server -+ subnet_id = str(uuid.uuid4()) -+ self._subnet_vnc_create_mapping(subnet_id, subnet_key) -+ -+ # Read in subnet from server to get updated values for gw etc. -+ subnet_vnc = self._subnet_read(net_obj.uuid, subnet_key) -+ subnet_info = self._subnet_vnc_to_neutron(subnet_vnc, net_obj, -+ ipam_fq_name) -+ -+ #self._db_cache['q_subnets'][subnet_id] = subnet_info -+ -+ return subnet_info -+ -+ def subnet_read(self, subnet_id): -+ subnet_key = self._subnet_vnc_read_mapping(id=subnet_id) -+ net_id = subnet_key.split()[0] -+ -+ net_obj = self._network_read(net_id) -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ if self._subnet_vnc_get_key(subnet_vnc, -+ net_id) == subnet_key: -+ ret_subnet_q = self._subnet_vnc_to_neutron( -+ subnet_vnc, net_obj, ipam_ref['to']) -+ self._db_cache['q_subnets'][subnet_id] = ret_subnet_q -+ return ret_subnet_q -+ -+ return {} -+ -+ def subnet_update(self, subnet_id, subnet_q): -+ ret_subnet_q = self.subnet_read(subnet_id) -+ if 'name' in subnet_q: -+ ret_subnet_q['q_api_data']['name'] = subnet_q['name'] -+ return ret_subnet_q -+ -+ def subnet_delete(self, subnet_id): -+ subnet_key = self._subnet_vnc_read_mapping(id=subnet_id) -+ net_id = subnet_key.split()[0] -+ -+ net_obj = self._network_read(net_id) -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ orig_subnets = ipam_ref['attr'].get_ipam_subnets() -+ new_subnets = [subnet_vnc for subnet_vnc in orig_subnets -+ if self._subnet_vnc_get_key(subnet_vnc, net_id) -+ != subnet_key] -+ if len(orig_subnets) != len(new_subnets): -+ # matched subnet to be deleted -+ ipam_ref['attr'].set_ipam_subnets(new_subnets) -+ self._virtual_network_update(net_obj) -+ self._subnet_vnc_delete_mapping(subnet_id, subnet_key) -+ try: -+ del self._db_cache['q_subnets'][subnet_id] -+ except KeyError: -+ pass -+ -+ def subnets_list(self, filters=None): -+ ret_subnets = [] -+ -+ if filters and 'id' in filters: -+ # required subnets are specified, -+ # just read in corresponding net_ids -+ net_ids = set([]) -+ for subnet_id in filters['id']: -+ subnet_key = self._subnet_vnc_read_mapping(id=subnet_id) -+ net_id = subnet_key.split()[0] -+ net_ids.add(net_id) -+ else: -+ nets_info = self.network_list() -+ net_ids = [n_info['q_api_data']['id'] for n_info in nets_info] -+ -+ for net_id in net_ids: -+ net_obj = self._network_read(net_id) -+ ipam_refs = net_obj.get_network_ipam_refs() -+ if ipam_refs: -+ for ipam_ref in ipam_refs: -+ subnet_vncs = ipam_ref['attr'].get_ipam_subnets() -+ for subnet_vnc in subnet_vncs: -+ sn_info = self._subnet_vnc_to_neutron(subnet_vnc, -+ net_obj, -+ ipam_ref['to']) -+ sn_id = sn_info['q_api_data']['id'] -+ sn_proj_id = sn_info['q_api_data']['tenant_id'] -+ sn_net_id = sn_info['q_api_data']['network_id'] -+ -+ if filters: -+ if not self._filters_is_present(filters, 'id', -+ sn_id): -+ continue -+ if not self._filters_is_present(filters, -+ 'tenant_id', -+ sn_proj_id): -+ continue -+ if not self._filters_is_present(filters, -+ 'network_id', -+ sn_net_id): -+ continue -+ -+ ret_subnets.append(sn_info) -+ -+ return ret_subnets -+ -+ def subnets_count(self, filters=None): -+ subnets_info = self.subnets_list(filters) -+ return len(subnets_info) -+ -+ # floatingip api handlers -+ def floatingip_create(self, fip_q): -+ fip_obj = self._floatingip_neutron_to_vnc(fip_q, CREATE) -+ fip_uuid = self._vnc_lib.floating_ip_create(fip_obj) -+ fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid) -+ -+ return self._floatingip_vnc_to_neutron(fip_obj) -+ -+ def floatingip_read(self, fip_uuid): -+ fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid) -+ -+ return self._floatingip_vnc_to_neutron(fip_obj) -+ -+ def floatingip_update(self, fip_id, fip_q): -+ fip_q['id'] = fip_id -+ fip_obj = self._floatingip_neutron_to_vnc(fip_q, UPDATE) -+ self._vnc_lib.floating_ip_update(fip_obj) -+ -+ return self._floatingip_vnc_to_neutron(fip_obj) -+ -+ def floatingip_delete(self, fip_id): -+ self._vnc_lib.floating_ip_delete(id=fip_id) -+ -+ def floatingip_list(self, filters=None): -+ # Find networks, get floatingip backrefs and return -+ ret_list = [] -+ -+ if filters: -+ if 'tenant_id' in filters: -+ proj_ids = [str(uuid.UUID(id)) for id in filters['tenant_id']] -+ elif 'port_id' in filters: -+ # required ports are specified, just read and populate ret_list -+ # prune is skipped because proj_objs is empty -+ proj_ids = [] -+ for port_id in filters['port_id']: -+ port_obj = self._virtual_machine_interface_read( -+ port_id=port_id) -+ fip_back_refs = port_obj.get_floating_ip_back_refs() -+ if not fip_back_refs: -+ continue -+ for fip_back_ref in fip_back_refs: -+ fip_obj = self._vnc_lib.floating_ip_read( -+ id=fip_back_ref['uuid']) -+ ret_list.append(self._floatingip_vnc_to_neutron( -+ fip_obj)) -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ proj_ids = [proj['uuid'] for proj in dom_projects] -+ -+ proj_objs = [self._project_read(proj_id=id) for id in proj_ids] -+ -+ for proj_obj in proj_objs: -+ fip_back_refs = proj_obj.get_floating_ip_back_refs() -+ if not fip_back_refs: -+ continue -+ for fip_back_ref in fip_back_refs: -+ fip_obj = self._vnc_lib.floating_ip_read( -+ id=fip_back_ref['uuid']) -+ ret_list.append(self._floatingip_vnc_to_neutron(fip_obj)) -+ -+ return ret_list -+ -+ def floatingip_count(self, filters=None): -+ floatingip_info = self.floatingip_list(filters) -+ return len(floatingip_info) -+ -+ # port api handlers -+ def port_create(self, port_q): -+ net_id = port_q['network_id'] -+ net_obj = self._network_read(net_id) -+ proj_id = net_obj.parent_uuid -+ -+ self._ensure_instance_exists(port_q['device_id']) -+ -+ # initialize port object -+ port_obj = self._port_neutron_to_vnc(port_q, net_obj, CREATE) -+ -+ # if ip address passed then use it -+ ip_addr = None -+ ip_obj = None -+ if port_q['fixed_ips'].__class__ is not object: -+ ip_addr = port_q['fixed_ips'][0]['ip_address'] -+ ip_name = '%s %s' % (net_id, ip_addr) -+ try: -+ ip_obj = self._instance_ip_read(fq_name=[ip_name]) -+ #ip_id = ip_obj.uuid -+ except Exception as e: -+ ip_obj = None -+ -+ # create the object -+ port_id = self._virtual_machine_interface_create(port_obj) -+ -+ # initialize ip object -+ if ip_obj is None: -+ # Allocate an IP address only if there is a defined subnet -+ if net_obj.get_network_ipam_refs(): -+ ip_name = str(uuid.uuid4()) -+ ip_obj = vnc_api.InstanceIp(name=ip_name) -+ ip_obj.uuid = ip_name -+ ip_obj.set_virtual_machine_interface(port_obj) -+ ip_obj.set_virtual_network(net_obj) -+ if ip_addr: -+ ip_obj.set_instance_ip_address(ip_addr) -+ try: -+ self._instance_ip_create(ip_obj) -+ except Exception as e: -+ # ResourceExhaustionError, resources are not available -+ self._virtual_machine_interface_delete(port_id=port_id) -+ raise e -+ # shared ip address -+ else: -+ if ip_addr == ip_obj.get_instance_ip_address(): -+ ip_obj.add_virtual_machine_interface(port_obj) -+ self._instance_ip_update(ip_obj) -+ -+ port_obj = self._virtual_machine_interface_read(port_id=port_id) -+ -+ ret_port_q = self._port_vnc_to_neutron(port_obj, net_obj) -+ #self._db_cache['q_ports'][port_id] = ret_port_q -+ self._set_obj_tenant_id(port_id, proj_id) -+ -+ # update cache on successful creation -+ tenant_id = proj_id.replace('-', '') -+ if tenant_id not in self._db_cache['q_tenant_port_count']: -+ ncurports = self.port_count({'tenant_id': tenant_id}) -+ else: -+ ncurports = self._db_cache['q_tenant_port_count'][tenant_id] -+ -+ self._db_cache['q_tenant_port_count'][tenant_id] = ncurports + 1 -+ -+ return ret_port_q -+ -+ def port_read(self, port_id): -+ port_obj = self._virtual_machine_interface_read(port_id=port_id) -+ -+ ret_port_q = self._port_vnc_to_neutron(port_obj) -+ self._db_cache['q_ports'][port_id] = ret_port_q -+ -+ return ret_port_q -+ -+ def port_update(self, port_id, port_q): -+ port_q['id'] = port_id -+ port_obj = self._port_neutron_to_vnc(port_q, None, UPDATE) -+ self._virtual_machine_interface_update(port_obj) -+ -+ ret_port_q = self._port_vnc_to_neutron(port_obj) -+ self._db_cache['q_ports'][port_id] = ret_port_q -+ -+ return ret_port_q -+ -+ def port_delete(self, port_id): -+ port_obj = self._port_neutron_to_vnc({'id': port_id}, None, READ) -+ instance_id = port_obj.parent_uuid -+ -+ # release instance IP address -+ iip_back_refs = port_obj.get_instance_ip_back_refs() -+ if iip_back_refs: -+ for iip_back_ref in iip_back_refs: -+ # if name contains IP address then this is shared ip -+ iip_obj = self._vnc_lib.instance_ip_read( -+ id=iip_back_ref['uuid']) -+ name = iip_obj.name -+ if len(name.split(' ')) > 1: -+ name = name.split(' ')[1] -+ -+ # in case of shared ip only delete the link to the VMI -+ try: -+ socket.inet_aton(name) -+ iip_obj.del_virtual_machine_interface(port_obj) -+ self._instance_ip_update(iip_obj) -+ except socket.error: -+ self._instance_ip_delete( -+ instance_ip_id=iip_back_ref['uuid']) -+ -+ # disassociate any floating IP used by instance -+ fip_back_refs = port_obj.get_floating_ip_back_refs() -+ if fip_back_refs: -+ for fip_back_ref in fip_back_refs: -+ fip_obj = self._vnc_lib.floating_ip_read( -+ id=fip_back_ref['uuid']) -+ self.floatingip_update(fip_obj.uuid, {'port_id': None}) -+ -+ self._virtual_machine_interface_delete(port_id=port_id) -+ -+ # delete instance if this was the last port -+ inst_obj = self._vnc_lib.virtual_machine_read(id=instance_id) -+ inst_intfs = inst_obj.get_virtual_machine_interfaces() -+ if not inst_intfs: -+ self._vnc_lib.virtual_machine_delete(id=inst_obj.uuid) -+ -+ try: -+ del self._db_cache['q_ports'][port_id] -+ except KeyError: -+ pass -+ -+ # update cache on successful deletion -+ try: -+ tenant_id = self._get_obj_tenant_id('port', port_id) -+ self._db_cache['q_tenant_port_count'][tenant_id] = \ -+ self._db_cache['q_tenant_port_count'][tenant_id] - 1 -+ except KeyError: -+ pass -+ -+ self._del_obj_tenant_id(port_id) -+ -+ def port_list(self, filters=None): -+ ret_q_ports = [] -+ all_project_ids = [] -+ -+ if 'device_owner' in filters: -+ return ret_q_ports -+ -+ if 'device_id' not in filters: -+ # Listing from back references -+ if not filters: -+ # no filters => return all ports! -+ all_projects = self._project_list_domain(None) -+ all_project_ids = [project['uuid'] for project in all_projects] -+ elif 'tenant_id' in filters: -+ all_project_ids = filters.get('tenant_id') -+ -+ for proj_id in all_project_ids: -+ proj_ports = self._port_list_project(proj_id) -+ for port in proj_ports: -+ try: -+ port_info = self.port_read(port['id']) -+ except vnc_exc.NoIdError: -+ continue -+ ret_q_ports.append(port_info) -+ -+ for net_id in filters.get('network_id', []): -+ net_ports = self._port_list_network(net_id) -+ for port in net_ports: -+ port_info = self.port_read(port['id']) -+ ret_q_ports.append(port_info) -+ -+ return ret_q_ports -+ -+ # Listing from parent to children -+ virtual_machine_ids = filters['device_id'] -+ for vm_id in virtual_machine_ids: -+ resp_dict = self._vnc_lib.virtual_machine_interfaces_list( -+ parent_id=vm_id) -+ vm_intf_ids = resp_dict['virtual-machine-interfaces'] -+ for vm_intf in vm_intf_ids: -+ try: -+ port_info = self.port_read(vm_intf['uuid']) -+ except vnc_exc.NoIdError: -+ continue -+ ret_q_ports.append(port_info) -+ -+ return ret_q_ports -+ -+ def port_count(self, filters=None): -+ if 'device_owner' in filters: -+ return 0 -+ -+ if 'tenant_id' in filters: -+ project_id = filters['tenant_id'][0] -+ try: -+ return self._db_cache['q_tenant_port_count'][project_id] -+ except KeyError: -+ # do it the hard way but remember for next time -+ nports = len(self._port_list_project(project_id)) -+ self._db_cache['q_tenant_port_count'][project_id] = nports -+ else: -+ # across all projects -+ # get only a count from api-server! -+ nports = len(self.port_list(filters)) -+ -+ return nports -+ -+ # security group api handlers -+ def security_group_create(self, sg_q): -+ sg_obj = self._security_group_neutron_to_vnc(sg_q, CREATE) -+ sg_uuid = self._security_group_create(sg_obj) -+ -+ #allow all egress traffic -+ def_rule = { -+ 'port_range_min': 0, -+ 'port_range_max': 65535, -+ 'direction': 'egress', -+ 'remote_ip_prefix': None, -+ 'remote_group_id': None, -+ 'protocol': 'any', -+ } -+ rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE) -+ self._security_group_rule_create(sg_uuid, rule) -+ -+ ret_sg_q = self._security_group_vnc_to_neutron(sg_obj) -+ return ret_sg_q -+ -+ def security_group_read(self, sg_id): -+ try: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=sg_id) -+ -+ return self._security_group_vnc_to_neutron(sg_obj) -+ -+ def security_group_delete(self, sg_id): -+ self._security_group_delete(sg_id) -+ -+ def security_group_list(self, context, filters=None): -+ ret_list = [] -+ -+ # all_sgs[] all sgs in all projects -+ # collect phase -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ all_sgs = [self._security_group_list_project(p_id) for -+ p_id in project_ids] -+ elif filters and 'name' in filters: -+ all_sgs = [self._security_group_list_project( -+ str(uuid.UUID(context.tenant)))] -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ all_sgs = [self._security_group_list_project(project['uuid']) for -+ project in dom_projects] -+ -+ # prune phase -+ for project_sgs in all_sgs: -+ for proj_sg in project_sgs: -+ proj_sg_id = proj_sg['uuid'] -+ if not self._filters_is_present(filters, 'id', proj_sg_id): -+ continue -+ sg_info = self.security_group_read(proj_sg_id) -+ if not self._filters_is_present(filters, 'name', -+ sg_info['q_api_data']['name']): -+ continue -+ ret_list.append(sg_info) -+ -+ return ret_list -+ -+ def security_group_rule_create(self, sgr_q): -+ sg_id = sgr_q['security_group_id'] -+ sg_rule = self._security_group_rule_neutron_to_vnc(sgr_q, CREATE) -+ self._security_group_rule_create(sg_id, sg_rule) -+ ret_sg_rule_q = self._security_group_rule_vnc_to_neutron(sg_id, -+ sg_rule) -+ -+ return ret_sg_rule_q -+ -+ def security_group_rule_read(self, sgr_id): -+ sg_obj, sg_rule = self._security_group_rule_find(sgr_id) -+ if sg_obj and sg_rule: -+ return self._security_group_rule_vnc_to_neutron(sg_obj.uuid, -+ sg_rule) -+ -+ return {} -+ -+ def security_group_rule_delete(self, sgr_id): -+ sg_obj, sg_rule = self._security_group_rule_find(sgr_id) -+ if sg_obj and sg_rule: -+ return self._security_group_rule_delete(sg_obj, sg_rule) -+ -+ def security_group_rules_read(self, sg_id): -+ try: -+ sg_obj = self._vnc_lib.security_group_read(id=sg_id) -+ sgr_entries = sg_obj.get_security_group_entries() -+ if sgr_entries is None: -+ return -+ -+ sg_rules = [self._security_group_rule_vnc_to_neutron( -+ sg_obj.uuid, sg_rule) for -+ sg_rule in sgr_entries.get_policy_rule()] -+ except vnc_exc.NoIdError: -+ raise exceptions.NetworkNotFound(net_id=sg_id) -+ -+ return sg_rules -+ -+ def security_group_rule_list(self, filters=None): -+ ret_list = [] -+ -+ # collect phase -+ if filters and 'tenant_id' in filters: -+ project_ids = filters['tenant_id'] -+ all_sgs = [self._security_group_list_project(p_id) for -+ p_id in project_ids] -+ else: # no filters -+ dom_projects = self._project_list_domain(None) -+ all_sgs = [self._security_group_list_project(project['uuid']) -+ for project in dom_projects] -+ -+ # prune phase -+ for project_sgs in all_sgs: -+ ret_list.extend( -+ self.security_group_rules_read(proj_sg['uuid']) -+ for proj_sg in project_sgs -+ if self._filters_is_present(filters, 'id', proj_sg['uuid']) -+ ) -+ -+ return ret_list -diff --git neutron/tests/unit/juniper/__init__.py neutron/tests/unit/juniper/__init__.py -new file mode 100644 -index 0000000..72bebec ---- /dev/null -+++ neutron/tests/unit/juniper/__init__.py -@@ -0,0 +1,14 @@ -+# Copyright (c) 2012 OpenStack Foundation. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -diff --git neutron/tests/unit/juniper/test_contrail_plugin.py neutron/tests/unit/juniper/test_contrail_plugin.py -new file mode 100644 -index 0000000..3984971 ---- /dev/null -+++ neutron/tests/unit/juniper/test_contrail_plugin.py -@@ -0,0 +1,1021 @@ -+# Copyright (c) 2012 OpenStack Foundation. -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import datetime -+import mock -+import sys -+import uuid -+ -+import neutron.db.api -+from neutron.manager import NeutronManager -+from neutron.tests.unit import test_db_plugin as test_plugin -+ -+ -+subnet_obj = {u'subnet': -+ {'name': '', 'enable_dhcp': True, -+ u'network_id': u'b11ffca3-3dfc-435e-ae0e-8f44da7188b7', -+ 'tenant_id': u'8162e75da480419a8b2ae7088dbc14f5', -+ 'dns_nameservers': '', -+ u'contrail:ipam_fq_name': -+ [u'default-domain', u'admin', u'default-network-ipam'], -+ 'allocation_pools': '', 'host_routes': '', u'ip_version': 4, -+ 'gateway_ip': '', u'cidr': u'20.20.1.0/29'}} -+ -+vn_list = [] -+GlobalProjects = [] -+ -+ -+class MockVncApi(mock.MagicMock): -+ def __init__(self, *args, **kwargs): -+ pass -+ -+ def kv_retrieve(self, *args, **kwargs): -+ return [] -+ -+ def kv_store(self, *args, **kwargs): -+ return -+ -+ def kv_delete(self, *args, **kwargs): -+ return -+ -+ def project_read(self, *args, **kwargs): -+ return GlobalProjects[0] -+ -+ def virtual_network_create(self, net_obj): -+ net_id = unicode(str(uuid.uuid4())) -+ net_obj.set_uuid(net_id) -+ vn_list.append(net_obj) -+ return net_id -+ -+ def virtual_network_read(self, id, *args, **kwargs): -+ if len(vn_list): -+ for index in range(len(vn_list)): -+ if ((vn_list[index].get_uuid()) == id): -+ return vn_list[index] -+ -+ #return a mock object if it is not created so far -+ return MockVirtualNetwork('dummy-net', MockProject()) -+ -+ def virtual_network_delete(self, *args, **kwargs): -+ return -+ -+ def virtual_network_update(self, *args, **kwargs): -+ return -+ -+ def virtual_networks_list(self, *args, **kwargs): -+ return vn_list -+ -+ -+class MockVncObject(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ if not parent_obj: -+ self._fq_name = [name] -+ else: -+ self._fq_name = parent_obj.get_fq_name() + [name] -+ -+ self._ipam_refs = [{'to': [u'default-domain', u'admin', -+ u'default-network-ipam']}] -+ self.uuid = None -+ self.name = name -+ self.network_ipam_refs = [] -+ -+ def set_uuid(self, uuid): -+ self.uuid = uuid -+ -+ def get_uuid(self): -+ return self.uuid -+ -+ def get_fq_name(self): -+ return self._fq_name -+ -+ def get_network_ipam_refs(self): -+ return getattr(self, 'network_ipam_refs', None) -+ -+ def add_network_ipam(self, ref_obj, ref_data): -+ refs = getattr(self, 'network_ipam_refs', []) -+ if not refs: -+ self.network_ipam_refs = [] -+ -+ # if ref already exists, update any attr with it -+ for ref in refs: -+ if ref['to'] == ref_obj.get_fq_name(): -+ ref = {'to': ref_obj.get_fq_name(), 'attr': ref_data} -+ if ref_obj.uuid: -+ ref['uuid'] = ref_obj.uuid -+ return -+ -+ # ref didn't exist before -+ ref_info = {'to': ref_obj.get_fq_name(), 'attr': ref_data} -+ if ref_obj.uuid: -+ ref_info['uuid'] = ref_obj.uuid -+ -+ self.network_ipam_refs.append(ref_info) -+ -+ -+class MockVirtualNetwork(MockVncObject): -+ pass -+ -+ -+class MockSubnetType(mock.MagicMock): -+ def __init__(self, name=None, ip_prefix=None, ip_prefix_len=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self.ip_prefix = ip_prefix -+ self.ip_prefix_len = ip_prefix_len -+ -+ def get_ip_prefix(self): -+ return self.ip_prefix -+ -+ def set_ip_prefix(self, ip_prefix): -+ self.ip_prefix = ip_prefix -+ -+ def get_ip_prefix_len(self): -+ return self.ip_prefix_len -+ -+ def set_ip_prefix_len(self, ip_prefix_len): -+ self.ip_prefix_len = ip_prefix_len -+ -+ -+class MockIpamSubnetType(mock.MagicMock): -+ def __init__(self, name=None, subnet=None, default_gateway=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self.subnet = subnet -+ self.default_gateway = default_gateway -+ -+ def get_subnet(self): -+ return self.subnet -+ -+ def set_subnet(self, subnet): -+ self.subnet = subnet -+ -+ def get_default_gateway(self): -+ return self.default_gateway -+ -+ def set_default_gateway(self, default_gateway): -+ self.default_gateway = default_gateway -+ -+ def validate_IpAddressType(self, value): -+ pass -+ -+ -+class MockVnSubnetsType(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, ipam_subnets=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self.ipam_subnets = [] -+ if ipam_subnets: -+ #self.ipam_subnets = copy.deepcopy(ipam_subnets) -+ self.ipam_subnets = ipam_subnets -+ -+ def get_ipam_subnets(self): -+ return self.ipam_subnets -+ -+ def set_ipam_subnets(self, ipam_subnets): -+ self.ipam_subnets = ipam_subnets -+ -+ def add_ipam_subnets(self, value): -+ self.ipam_subnets.append(value) -+ -+ def insert_ipam_subnets(self, index, value): -+ self.ipam_subnets[index] = value -+ -+ def delete_ipam_subnets(self, value): -+ self.ipam_subnets.remove(value) -+ -+ -+class MockNetworkIpam(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, -+ network_ipam_mgmt=None, id_perms=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self._type = 'default-network-ipam' -+ self.name = name -+ self.uuid = None -+ if parent_obj: -+ self.parent_type = parent_obj._type -+ # copy parent's fq_name -+ self.fq_name = list(parent_obj.fq_name) -+ self.fq_name.append(name) -+ if not parent_obj.get_network_ipams(): -+ parent_obj.network_ipams = [] -+ parent_obj.network_ipams.append(self) -+ else: # No parent obj specified -+ self.parent_type = 'project' -+ self.fq_name = [u'default-domain', u'default-project'] -+ self.fq_name.append(name) -+ -+ # property fields -+ if network_ipam_mgmt: -+ self.network_ipam_mgmt = network_ipam_mgmt -+ if id_perms: -+ self.id_perms = id_perms -+ -+ def get_fq_name(self): -+ return self.fq_name -+ -+ -+class MockProject(mock.MagicMock): -+ def __init__(self, name=None, parent_obj=None, id_perms=None, -+ *args, **kwargs): -+ super(mock.MagicMock, self).__init__() -+ self._type = 'project' -+ self.uuid = None -+ self.parent_type = 'domain' -+ self.fq_name = [u'default-domain'] -+ self.fq_name.append(name) -+ -+ def get_fq_name(self): -+ return self.fq_name -+ -+ -+def GlobalProjectApi(project_name): -+ if not GlobalProjects: -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ return GlobalProjects[0] -+ -+ -+# Mock definations for different pkgs, modules and VncApi -+mock_vnc_api_cls = mock.MagicMock(name='MockVncApi', side_effect=MockVncApi) -+mock_vnc_api_mod = mock.MagicMock(name='vnc_api_mock_mod') -+mock_vnc_api_mod.VncApi = mock_vnc_api_cls -+mock_vnc_api_mod.VirtualNetwork = MockVirtualNetwork -+mock_vnc_api_mod.SubnetType = MockSubnetType -+mock_vnc_api_mod.IpamSubnetType = MockIpamSubnetType -+mock_vnc_api_mod.VnSubnetsType = MockVnSubnetsType -+mock_vnc_api_mod.NetworkIpam = MockNetworkIpam -+mock_vnc_api_mod.Project = GlobalProjectApi -+ -+mock_vnc_api_pkg = mock.MagicMock(name='vnc_api_mock_pkg') -+mock_vnc_api_pkg.vnc_api = mock_vnc_api_mod -+mock_vnc_common_mod = mock.MagicMock(name='vnc_common_mock_mod') -+mock_vnc_exception_mod = mock.MagicMock(name='vnc_exception_mock_mod') -+sys.modules['neutron.plugins.juniper.contrail.ctdb.vnc_api'] = \ -+ mock_vnc_api_pkg -+sys.modules['neutron.plugins.juniper.contrail.ctdb.vnc_api.vnc_api'] = \ -+ mock_vnc_api_mod -+sys.modules['neutron.plugins.juniper.contrail.ctdb.vnc_api.common'] = \ -+ mock_vnc_common_mod -+sys.modules[('neutron.plugins.juniper.contrail.ctdb.vnc_api.common.' -+ 'exceptions')] = \ -+ mock_vnc_exception_mod -+ -+CONTRAIL_PKG_PATH = "neutron.plugins.juniper.contrail.contrailplugin" -+ -+ -+class RouterInstance(object): -+ def __init__(self): -+ self._name = 'rounter_instance' -+ -+ -+class Context(object): -+ def __init__(self, tenant_id=''): -+ self.read_only = False -+ self.show_deleted = False -+ self.roles = [u'admin', u'KeystoneServiceAdmin', u'KeystoneAdmin'] -+ self._read_deleted = 'no' -+ self.timestamp = datetime.datetime.now() -+ self.auth_token = None -+ self._session = None -+ self._is_admin = True -+ self.admin = uuid.uuid4().hex.decode() -+ self.request_id = 'req-' + str(uuid.uuid4()) -+ self.tenant = tenant_id -+ -+ -+class JVContrailPluginTestCase(test_plugin.NeutronDbPluginV2TestCase): -+ _plugin_name = ('%s.ContrailPlugin' % CONTRAIL_PKG_PATH) -+ -+ def setUp(self): -+ -+ mock_vnc_common_mod.exceptions = mock_vnc_exception_mod -+ -+ mock_vnc_api_mod.common = mock_vnc_common_mod -+ mock_vnc_api_mod.VncApi = mock_vnc_api_cls -+ -+ mock_vnc_api_pkg.vnc_api = mock_vnc_api_mod -+ -+ super(JVContrailPluginTestCase, self).setUp(self._plugin_name) -+ neutron.db.api._ENGINE = mock.MagicMock() -+ -+ def teardown(self): -+ super(JVContrailPluginTestCase, self).setUp(self._plugin_name) -+ -+ -+class TestContrailNetworks(test_plugin.TestNetworksV2, -+ JVContrailPluginTestCase): -+ -+ def test_create_network(self): -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ router_inst = RouterInstance() -+ network = { -+ 'router:external': router_inst, -+ u'name': u'network1', -+ 'admin_state_up': 'True', -+ 'tenant_id': uuid.uuid4().hex.decode(), -+ 'vpc:route_table': '', -+ 'shared': False, -+ 'port_security_enabled': True, -+ u'contrail:policys': [], -+ } -+ -+ networks_req[u'network'] = network -+ context_obj = Context(network['tenant_id']) -+ -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ net = plugin_obj.create_network(context_obj, networks_req) -+ if 'contrail:fq_name' not in net.keys(): -+ assert False -+ else: -+ assert True -+ -+ def test_delete_network(self): -+ # First create the network and request to delete the same -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ router_inst = RouterInstance() -+ network = { -+ 'router:external': router_inst, -+ u'name': u'network1', -+ 'admin_state_up': 'True', -+ 'tenant_id': uuid.uuid4().hex.decode(), -+ 'vpc:route_table': '', -+ 'shared': False, -+ 'port_security_enabled': True, -+ u'contrail:policys': [], -+ } -+ -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ networks_req[u'network'] = network -+ net_dict = plugin_obj.create_network(context_obj, networks_req) -+ net_id = net_dict.get('id') -+ -+ plugin_obj.delete_network(context_obj, net_id) -+ mock_vnc_api_cls.virtual_network_delete.assert_called_once() -+ -+ def test_update_network(self): -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ router_inst = RouterInstance() -+ network = { -+ 'router:external': router_inst, -+ u'name': u'network1', -+ 'admin_state_up': 'True', -+ 'tenant_id': uuid.uuid4().hex.decode(), -+ 'vpc:route_table': '', -+ 'shared': False, -+ 'port_security_enabled': True, -+ u'contrail:policys': [], -+ } -+ -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ networks_req[u'network'] = network -+ net_dict = plugin_obj.create_network(context_obj, networks_req) -+ net_id = net_dict.get('id') -+ # change one of the attribute and update the network -+ network['admin_state_up'] = 'False' -+ new_dict = plugin_obj.update_network(context_obj, net_id, -+ networks_req) -+ self.assertNotEqual(net_dict.get('admin_state_up'), -+ new_dict.get('admin_state_up')) -+ -+ # Not supported test cases in the this TestClass -+ def test_create_networks_bulk_emulated(self): -+ pass -+ -+ def test_create_networks_bulk_emulated_plugin_failure(self): -+ pass -+ -+ def test_create_public_network(self): -+ pass -+ -+ def test_create_networks_bulk_wrong_input(self): -+ pass -+ -+ def test_update_shared_network_noadmin_returns_403(self): -+ pass -+ -+ def test_update_network_set_shared(self): -+ pass -+ -+ def test_update_network_set_not_shared_multi_tenants_returns_409(self): -+ pass -+ -+ def test_update_network_set_not_shared_multi_tenants2_returns_409(self): -+ pass -+ -+ def test_update_network_set_not_shared_single_tenant(self): -+ pass -+ -+ def test_update_network_set_not_shared_other_tenant_returns_409(self): -+ pass -+ -+ def test_update_network_with_subnet_set_shared(self): -+ pass -+ -+ def test_show_network(self): -+ pass -+ -+ def test_show_network_with_subnet(self): -+ pass -+ -+ def test_list_networks(self): -+ pass -+ -+ def test_list_shared_networks_with_non_admin_user(self): -+ pass -+ -+ def test_list_networks_with_parameters(self): -+ pass -+ -+ def test_list_networks_with_fields(self): -+ pass -+ -+ def test_list_networks_with_parameters_invalid_values(self): -+ pass -+ -+ def test_list_networks_with_pagination_emulated(self): -+ pass -+ -+ def test_list_networks_without_pk_in_fields_pagination_emulated(self): -+ pass -+ -+ def test_list_networks_with_sort_emulated(self): -+ pass -+ -+ def test_list_networks_with_pagination_reverse_emulated(self): -+ pass -+ -+ def test_invalid_admin_status(self): -+ pass -+ -+ -+class TestContrailSubnetsV2(test_plugin.TestSubnetsV2, -+ JVContrailPluginTestCase): -+ -+ def test_create_subnet(self): -+ #First create virtual network without subnet and then -+ #create subnet to update given network. -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ router_inst = RouterInstance() -+ network = { -+ 'router:external': router_inst, -+ u'name': u'network1', -+ 'admin_state_up': 'True', -+ 'tenant_id': uuid.uuid4().hex.decode(), -+ 'vpc:route_table': '', -+ 'shared': False, -+ 'port_security_enabled': True, -+ u'contrail:policys': [], -+ } -+ -+ networks_req[u'network'] = network -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ net = plugin_obj.create_network(context_obj, networks_req) -+ -+ subnet_obj[u'subnet']['network_id'] = net['id'] -+ subnet_dict = plugin_obj.create_subnet(context_obj, subnet_obj) -+ self.assertEqual(subnet_dict['cidr'], -+ subnet_obj['subnet']['cidr']) -+ -+ def test_delete_subnet(self): -+ #First create virtual network without subnet and then -+ #create subnet to update given network. -+ plugin_obj = NeutronManager.get_plugin() -+ networks_req = {} -+ router_inst = RouterInstance() -+ network = { -+ 'router:external': router_inst, -+ u'name': u'network1', -+ 'admin_state_up': 'True', -+ 'tenant_id': uuid.uuid4().hex.decode(), -+ 'vpc:route_table': '', -+ 'shared': False, -+ 'port_security_enabled': True, -+ u'contrail:policys': [], -+ } -+ -+ networks_req[u'network'] = network -+ context_obj = Context(network['tenant_id']) -+ #create project -+ if not GlobalProjects: -+ project_name = 'admin' -+ GlobalProjects.append(MockProject(name=project_name)) -+ -+ net = plugin_obj.create_network(context_obj, networks_req) -+ -+ subnet_obj[u'subnet']['network_id'] = net['id'] -+ subnet_dict = plugin_obj.create_subnet(context_obj, subnet_obj) -+ subnet_id = subnet_dict['id'] -+ plugin_obj.delete_subnet(context_obj, subnet_id) -+ -+ def test_update_subnet_gateway_in_allocation_pool_returns_409(self): -+ pass -+ -+ def test_delete_network(self): -+ pass -+ -+ def test_update_subnet_gw_outside_cidr_force_on_returns_400(self): -+ pass -+ -+ def test_update_subnet_adding_additional_host_routes_and_dns(self): -+ pass -+ -+ def test_update_subnet_no_gateway(self): -+ pass -+ -+ def test_create_subnet_bad_cidr(self): -+ pass -+ -+ def test_create_subnet_gw_of_network_force_on_returns_400(self): -+ pass -+ -+ def test_create_subnet_gw_outside_cidr_force_on_returns_400(self): -+ pass -+ -+ def test_create_two_subnets(self): -+ pass -+ -+ def test_create_two_subnets_same_cidr_returns_400(self): -+ pass -+ -+ def test_create_subnet_bad_V4_cidr(self): -+ pass -+ -+ def test_create_subnet_bad_V6_cidr(self): -+ pass -+ -+ def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): -+ pass -+ -+ def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): -+ pass -+ -+ def test_create_subnets_bulk_native(self): -+ pass -+ -+ def test_create_subnets_bulk_emulated(self): -+ pass -+ -+ def test_create_subnets_bulk_emulated_plugin_failure(self): -+ pass -+ -+ def test_create_subnets_bulk_native_plugin_failure(self): -+ pass -+ -+ def test_delete_subnet_port_exists_owned_by_network(self): -+ pass -+ -+ def test_delete_subnet_port_exists_owned_by_other(self): -+ pass -+ -+ def test_create_subnet_bad_tenant(self): -+ pass -+ -+ def test_create_subnet_bad_ip_version(self): -+ pass -+ -+ def test_create_subnet_bad_ip_version_null(self): -+ pass -+ -+ def test_create_subnet_bad_uuid(self): -+ pass -+ -+ def test_create_subnet_bad_boolean(self): -+ pass -+ -+ def test_create_subnet_bad_pools(self): -+ pass -+ -+ def test_create_subnet_bad_nameserver(self): -+ pass -+ -+ def test_create_subnet_bad_hostroutes(self): -+ pass -+ -+ def test_create_subnet_defaults(self): -+ pass -+ -+ def test_create_subnet_gw_values(self): -+ pass -+ -+ def test_create_force_subnet_gw_values(self): -+ pass -+ -+ def test_create_subnet_with_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_with_none_gateway(self): -+ pass -+ -+ def test_create_subnet_with_none_gateway_fully_allocated(self): -+ pass -+ -+ def test_subnet_with_allocation_range(self): -+ pass -+ -+ def test_create_subnet_with_none_gateway_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_with_v6_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_with_large_allocation_pool(self): -+ pass -+ -+ def test_create_subnet_multiple_allocation_pools(self): -+ pass -+ -+ def test_create_subnet_with_dhcp_disabled(self): -+ pass -+ -+ def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( -+ self): -+ pass -+ -+ def test_create_subnet_gateway_in_allocation_pool_returns_409(self): -+ pass -+ -+ def test_create_subnet_overlapping_allocation_pools_returns_409(self): -+ pass -+ -+ def test_create_subnet_invalid_allocation_pool_returns_400(self): -+ pass -+ -+ def test_create_subnet_out_of_range_allocation_pool_returns_400(self): -+ pass -+ -+ def test_create_subnet_shared_returns_400(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv6_cidrv4(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_cidrv6(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_gatewayv6(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv6_gatewayv4(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv6_dns_v4(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): -+ pass -+ -+ def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): -+ pass -+ -+ def test_create_subnet_gw_bcast_force_on_returns_400(self): -+ pass -+ -+ def test_update_subnet(self): -+ pass -+ -+ def test_update_subnet_shared_returns_400(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv4_gatewayv6(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv6_gatewayv4(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv4_dns_v6(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): -+ pass -+ -+ def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): -+ pass -+ -+ def test_show_subnet(self): -+ pass -+ -+ def test_list_subnets(self): -+ pass -+ -+ def test_list_subnets_shared(self): -+ pass -+ -+ def test_list_subnets_with_parameter(self): -+ pass -+ -+ def test_invalid_ip_version(self): -+ pass -+ -+ def test_invalid_subnet(self): -+ pass -+ -+ def test_invalid_ip_address(self): -+ pass -+ -+ def test_invalid_uuid(self): -+ pass -+ -+ def test_create_subnet_with_one_dns(self): -+ pass -+ -+ def test_create_subnet_with_two_dns(self): -+ pass -+ -+ def test_create_subnet_with_too_many_dns(self): -+ pass -+ -+ def test_create_subnet_with_one_host_route(self): -+ pass -+ -+ def test_create_subnet_with_two_host_routes(self): -+ pass -+ -+ def test_create_subnet_with_too_many_routes(self): -+ pass -+ -+ def test_create_subnet_as_admin(self): -+ pass -+ -+ def test_update_subnet_dns(self): -+ pass -+ -+ def test_update_subnet_dns_to_None(self): -+ pass -+ -+ def test_update_subnet_dns_with_too_many_entries(self): -+ pass -+ -+ def test_update_subnet_route(self): -+ pass -+ -+ def test_update_subnet_route_to_None(self): -+ pass -+ -+ def test_update_subnet_route_with_too_many_entries(self): -+ pass -+ -+ def test_delete_subnet_with_dns(self): -+ pass -+ -+ def test_delete_subnet_with_route(self): -+ pass -+ -+ def test_delete_subnet_with_dns_and_route(self): -+ pass -+ -+ def test_list_subnets_with_pagination_emulated(self): -+ pass -+ -+ def test_list_subnets_with_pagination_reverse_emulated(self): -+ pass -+ -+ def test_list_subnets_with_sort_emulated(self): -+ pass -+ -+ def test_validate_subnet_host_routes_exhausted(self): -+ pass -+ -+ def test_validate_subnet_dns_nameservers_exhausted(self): -+ pass -+ -+ def test_update_subnet_gw_ip_in_use_returns_409(self): -+ pass -+ -+ -+class TestContrailPortsV2(test_plugin.TestPortsV2, -+ JVContrailPluginTestCase): -+ -+ def test_create_port(self): -+ pass -+ -+ def test_create_port_json(self): -+ pass -+ -+ def test_create_port_bad_tenant(self): -+ pass -+ -+ def test_create_port_public_network(self): -+ pass -+ -+ def test_create_port_public_network_with_ip(self): -+ pass -+ -+ def test_create_ports_bulk_native(self): -+ pass -+ -+ def test_create_ports_bulk_emulated(self): -+ pass -+ -+ def test_create_ports_bulk_wrong_input(self): -+ pass -+ -+ def test_create_ports_bulk_emulated_plugin_failure(self): -+ pass -+ -+ def test_create_ports_bulk_native_plugin_failure(self): -+ pass -+ -+ def test_create_port_as_admin(self): -+ pass -+ -+ def test_list_ports(self): -+ pass -+ -+ def test_list_ports_filtered_by_fixed_ip(self): -+ pass -+ -+ def test_list_ports_public_network(self): -+ pass -+ -+ def test_show_port(self): -+ pass -+ -+ def test_delete_port(self): -+ pass -+ -+ def test_delete_port_public_network(self): -+ pass -+ -+ def test_update_port(self): -+ pass -+ -+ def test_update_device_id_null(self): -+ pass -+ -+ def test_delete_network_if_port_exists(self): -+ pass -+ -+ def test_delete_network_port_exists_owned_by_network(self): -+ pass -+ -+ def test_update_port_delete_ip(self): -+ pass -+ -+ def test_no_more_port_exception(self): -+ pass -+ -+ def test_update_port_update_ip(self): -+ pass -+ -+ def test_update_port_update_ip_address_only(self): -+ pass -+ -+ def test_update_port_update_ips(self): -+ pass -+ -+ def test_update_port_add_additional_ip(self): -+ pass -+ -+ def test_requested_duplicate_mac(self): -+ pass -+ -+ def test_mac_generation(self): -+ pass -+ -+ def test_mac_generation_4octet(self): -+ pass -+ -+ def test_bad_mac_format(self): -+ pass -+ -+ def test_mac_exhaustion(self): -+ pass -+ -+ def test_requested_duplicate_ip(self): -+ pass -+ -+ def test_requested_subnet_delete(self): -+ pass -+ -+ def test_requested_subnet_id(self): -+ pass -+ -+ def test_requested_subnet_id_not_on_network(self): -+ pass -+ -+ def test_overlapping_subnets(self): -+ pass -+ -+ def test_requested_subnet_id_v4_and_v6(self): -+ pass -+ -+ def test_range_allocation(self): -+ pass -+ -+ def test_requested_invalid_fixed_ips(self): -+ pass -+ -+ def test_invalid_ip(self): -+ pass -+ -+ def test_requested_split(self): -+ pass -+ -+ def test_duplicate_ips(self): -+ pass -+ -+ def test_fixed_ip_invalid_subnet_id(self): -+ pass -+ -+ def test_fixed_ip_invalid_ip(self): -+ pass -+ -+ def test_requested_ips_only(self): -+ pass -+ -+ def test_recycling(self): -+ pass -+ -+ def test_invalid_admin_state(self): -+ pass -+ -+ def test_invalid_mac_address(self): -+ pass -+ -+ def test_default_allocation_expiration(self): -+ pass -+ -+ def test_update_fixed_ip_lease_expiration(self): -+ pass -+ -+ def test_port_delete_holds_ip(self): -+ pass -+ -+ def test_update_fixed_ip_lease_expiration_invalid_address(self): -+ pass -+ -+ def test_hold_ip_address(self): -+ pass -+ -+ def test_recycle_held_ip_address(self): -+ pass -+ -+ def test_recycle_expired_previously_run_within_context(self): -+ pass -+ -+ def test_update_port_not_admin(self): -+ pass -+ -+ def test_list_ports_with_pagination_emulated(self): -+ pass -+ -+ def test_list_ports_with_pagination_reverse_emulated(self): -+ pass -+ -+ def test_list_ports_with_sort_emulated(self): -+ pass -+ -+ def test_max_fixed_ips_exceeded(self): -+ pass -+ -+ def test_update_max_fixed_ips_exceeded(self): -+ pass -+ -+ def test_recycle_ip_address_without_allocation_pool(self): -+ pass -+ -+ def test_recycle_ip_address_on_exhausted_allocation_pool(self): -+ pass -+ -+ def test_recycle_ip_address_outside_allocation_pool(self): -+ pass -+ -+ def test_recycle_ip_address_in_allocation_pool(self): -+ pass -diff --git setup.cfg setup.cfg -index bae3ffd..6c2ea4f 100644 ---- setup.cfg -+++ setup.cfg -@@ -63,6 +63,7 @@ data_files = - etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini - etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini - etc/neutron/plugins/ryu = etc/neutron/plugins/ryu/ryu.ini -+ etc/neutron/plugins/juniper/contrail/ContrailPlugin = etc/neutron/plugins/juniper/contrail/ContrailPlugin.ini - scripts = - bin/quantum-rootwrap - bin/neutron-rootwrap diff --git a/contrail/nova_v3.patch b/contrail/nova_v3.patch deleted file mode 100644 index a766f0afd8..0000000000 --- a/contrail/nova_v3.patch +++ /dev/null @@ -1,3252 +0,0 @@ -diff --git nova/network/model.py nova/network/model.py -index e11bfeb..a004cb4 100644 ---- nova/network/model.py -+++ nova/network/model.py -@@ -38,6 +38,7 @@ VIF_TYPE_802_QBG = '802.1qbg' - VIF_TYPE_802_QBH = '802.1qbh' - VIF_TYPE_MLNX_DIRECT = 'mlnx_direct' - VIF_TYPE_MIDONET = 'midonet' -+VIF_TYPE_CONTRAIL = 'contrail' - VIF_TYPE_OTHER = 'other' - - # Constant for max length of network interface names -diff --git nova/virt/libvirt/vif.py nova/virt/libvirt/vif.py -index 0e38345..32ef4bb 100644 ---- nova/virt/libvirt/vif.py -+++ nova/virt/libvirt/vif.py -@@ -312,6 +312,17 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - - return conf - -+ def get_config_contrail(self, instance, vif, image_meta, -+ inst_type): -+ conf = super(LibvirtGenericVIFDriver, -+ self).get_config(instance, vif, -+ image_meta, inst_type) -+ -+ dev = self.get_vif_devname(vif) -+ designer.set_vif_host_backend_ethernet_config(conf, dev) -+ -+ return conf -+ - def get_config_mlnx_direct(self, instance, vif, image_meta, - inst_type): - conf = super(LibvirtGenericVIFDriver, -@@ -375,6 +386,11 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - vif, - image_meta, - inst_type) -+ elif vif_type == network_model.VIF_TYPE_CONTRAIL: -+ return self.get_config_contrail(instance, -+ vif, -+ image_meta, -+ inst_type) - else: - raise exception.NovaException( - _("Unexpected vif_type=%s") % vif_type) -@@ -567,6 +583,31 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - except processutils.ProcessExecutionError: - LOG.exception(_("Failed while plugging vif"), instance=instance) - -+ def plug_contrail(self, instance, vif): -+ """Plug using Contrail Driver -+ """ -+ super(LibvirtGenericVIFDriver, -+ self).plug(instance, vif) -+ dev = self.get_vif_devname(vif) -+ iface_id = vif['id'] -+ from nova.virt import netutils -+ net, prefix_len=netutils.get_net_and_prefixlen(vif['network']['subnets'][0]['cidr']) -+ try: -+ linux_net.create_tap_dev(dev) -+ utils.execute('config_parser', 'create', -+ '--port_id', vif['id'], -+ '--tap_name', dev, -+ '--ip_address', vif['network']['subnets'][0]['ips'][0]['address'], -+ '--instance_id', instance['uuid'], -+ '--vn_id', vif['network']['id'], -+ '--mac_address', vif['address'], -+ '--display_name', instance['display_name'], -+ '--hostname', instance['hostname'], -+ '--host', instance['host'], -+ '--prefix_len', prefix_len) -+ except processutils.ProcessExecutionError: -+ LOG.exception(_("Failed while plugging vif"), instance=instance) -+ - def plug(self, instance, vif): - vif_type = vif['type'] - -@@ -574,7 +615,6 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - 'vif=%(vif)s'), - {'vif_type': vif_type, 'instance': instance, - 'vif': vif}) -- - if vif_type is None: - raise exception.NovaException( - _("vif_type parameter must be present " -@@ -595,6 +635,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - self.plug_mlnx_direct(instance, vif) - elif vif_type == network_model.VIF_TYPE_MIDONET: - self.plug_midonet(instance, vif) -+ elif vif_type == network_model.VIF_TYPE_CONTRAIL: -+ self.plug_contrail(instance, vif) - else: - raise exception.NovaException( - _("Unexpected vif_type=%s") % vif_type) -@@ -746,6 +788,19 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - except processutils.ProcessExecutionError: - LOG.exception(_("Failed while unplugging vif"), instance=instance) - -+ def unplug_contrail(self, instance, vif): -+ """Unplug using Contrail Driver -+ """ -+ super(LibvirtGenericVIFDriver, -+ self).unplug(instance, vif) -+ dev = self.get_vif_devname(vif) -+ try: -+ utils.execute('config_parser', 'delete', -+ '--port_id', vif['id']) -+ linux_net.delete_net_dev(dev) -+ except processutils.ProcessExecutionError: -+ LOG.exception(_("Failed while unplugging vif"), instance=instance) -+ - def unplug(self, instance, vif): - vif_type = vif['type'] - -@@ -770,6 +825,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - self.unplug_ivs(instance, vif) - elif vif_type == network_model.VIF_TYPE_IOVISOR: - self.unplug_iovisor(instance, vif) -+ elif vif_type == network_model.VIF_TYPE_CONTRAIL: -+ self.unplug_contrail(instance, vif) - elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT: - self.unplug_mlnx_direct(instance, vif) - elif vif_type == network_model.VIF_TYPE_MIDONET: -diff --git plugins/contrail/config_parser.py plugins/contrail/config_parser.py -new file mode 100755 -index 0000000..acac9fb ---- /dev/null -+++ plugins/contrail/config_parser.py -@@ -0,0 +1,126 @@ -+#! /usr/bin/env python -+import os -+import sys -+import cgitb -+import argparse -+ -+VIF_DIR = '/opt/stack/nova/plugins/contrail/vif/' -+ -+class ContrailVifDelete(object): -+ def __init__(self, port_id): -+ if os.path.exists(VIF_DIR+port_id): -+ os.remove(VIF_DIR+port_id) -+ #end __init__ -+# end ContrailVifDelete -+ -+class ContrailVifUpdate(object): -+ def __init__(self, port_id, tap_name, ip_address, instance_id, vn_id, mac_address, display_name, -+ hostname, host, prefix_len): -+ try: -+ os.makedirs(VIF_DIR) -+ except OSError: -+ if os.path.exists(VIF_DIR): -+ pass -+ else: -+ raise -+ self.__update_vif(port_id, tap_name, -+ ip_address, instance_id, -+ vn_id, mac_address, display_name, -+ hostname, host, prefix_len) -+ -+ # end __init__ -+ -+ def __update_vif(self, port_id, tap_name, -+ ip_address, instance_id, -+ vn_id, mac_address, -+ display_name, -+ hostname, -+ host, prefix_len): -+ if (port_id and tap_name and -+ ip_address and instance_id and -+ vn_id and mac_address and -+ host and prefix_len): -+ import ConfigParser -+ config = ConfigParser.RawConfigParser() -+ config.add_section('Vif') -+ config.set('Vif', 'port_id', port_id) -+ config.set('Vif', 'tap_name', tap_name) -+ config.set('Vif', 'ip_address', ip_address) -+ config.set('Vif', 'instance_id', instance_id) -+ config.set('Vif', 'vn_id', vn_id) -+ config.set('Vif', 'mac_address', mac_address) -+ config.set('Vif', 'display_name', display_name) -+ config.set('Vif', 'hostname', hostname) -+ config.set('Vif', 'host', host) -+ config.set('Vif', 'prefix_len', prefix_len) -+ with open(VIF_DIR + port_id, 'wb') as configfile: -+ config.write(configfile) -+ -+ # end __update_vif -+# end ContrailVifUpdate -+ -+def update_vif_file(args): -+ if args.which is 'create': -+ if (args.port_id and args.tap_name and -+ args.ip_address and args.instance_id and -+ args.vn_id and args.mac_address and -+ args.host and args.prefix_len): -+ ContrailVifUpdate(args.port_id, args.tap_name, -+ args.ip_address, args.instance_id, -+ args.vn_id, args.mac_address, args.display_name, -+ args.hostname, args.host, args.prefix_len) -+ elif args.which is 'delete': -+ if args.port_id is not None: -+ ContrailVifDelete(args.port_id) -+ -+# end update_vif_file -+ -+def parse_args(args_str): -+ ''' -+ ''' -+ # Source any specified config/ini file -+ # Turn off help, so we all options in response to -h -+ conf_parser = argparse.ArgumentParser(add_help=False) -+ args, remaining_argv = conf_parser.parse_known_args(args_str.split()) -+ parser = argparse.ArgumentParser( -+ # Inherit options from config_parser -+ parents=[conf_parser], -+ # print script description with -h/--help -+ description=__doc__, -+ # Don't mess with format of description -+ formatter_class=argparse.RawDescriptionHelpFormatter, -+ ) -+ #defaults.update(secopts) -+ #defaults.update(ksopts) -+ #parser.set_defaults(**defaults) -+ -+ subparsers = parser.add_subparsers(help='commands') -+ create_parser = subparsers.add_parser('create', help='Create/Modify vif') -+ create_parser.set_defaults(which='create') -+ create_parser.add_argument("--port_id", help = "port id") -+ create_parser.add_argument("--tap_name", help = "tap_name") -+ create_parser.add_argument("--ip_address", help = "ip_address") -+ create_parser.add_argument("--instance_id", help = "instance_id") -+ create_parser.add_argument("--vn_id", help = "vn_id") -+ create_parser.add_argument("--mac_address", help = "mac_address") -+ create_parser.add_argument("--display_name", help = "display_name") -+ create_parser.add_argument("--hostname", help = "hostname") -+ create_parser.add_argument("--host", help = "host") -+ create_parser.add_argument("--prefix_len", help = "prefix_len") -+ delete_parser = subparsers.add_parser('delete', help='Delete vif') -+ delete_parser.set_defaults(which='delete') -+ delete_parser.add_argument("--port_id", help = "port id") -+ print parser.parse_args() -+ args = parser.parse_args(remaining_argv) -+ update_vif_file(args) -+# end parse_args -+ -+def main(args_str=None): -+ if not args_str: -+ args_str = ' '.join(sys.argv[1:]) -+ args = parse_args(args_str) -+# end main -+ -+if __name__ == '__main__': -+ cgitb.enable(format='text') -+ main() -diff --git plugins/contrail/contrail_vif.py plugins/contrail/contrail_vif.py -new file mode 100644 -index 0000000..7d004a0 ---- /dev/null -+++ plugins/contrail/contrail_vif.py -@@ -0,0 +1,298 @@ -+# -+# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. -+# -+ -+""" -+contril vif plug, communicate with contrail agent -+""" -+import os -+import pyinotify -+import sys -+import socket -+import cgitb -+from nova import utils -+from nova import exception -+from nova.openstack.common import log as logging -+from pyinotify import WatchManager, Notifier, ThreadedNotifier, EventsCodes, ProcessEvent -+import gevent -+from gevent import monkey; monkey.patch_time() -+ -+LOG = logging.getLogger(__name__) -+ -+''' -+struct Port { -+ 1:required tuuid port_id, -+ 2:required tuuid instance_id, -+ 3:required string tap_name, -+ 4:required string ip_address, -+ 5:required tuuid vn_id, -+ 6:required string mac_address, -+ 7:optional string display_name, -+ 8:optional string hostname, -+ 9:optional string host; -+ 10: optional byte prefix_len; -+} -+''' -+ -+class ContrailVifPlug(object): -+ "" -+ "" -+ def __init__(self): -+ self._agent_alive = False -+ self._agent_connected = False -+ self._port_dict = {} -+ self._protocol = None -+ self._notifier = None -+ #end __init__ -+ -+ def _agent_connect(self, protocol): -+ # Agent connect for first time -+ if protocol != None: -+ from instance_service import InstanceService -+ service = InstanceService.Client(protocol) -+ return service.Connect() -+ else: -+ return False -+ #end __agent_connect -+ -+ def _keep_alive(self): -+ try: -+ if self._agent_alive == False: -+ self._protocol = self._agent_conn_open() -+ if self._protocol == None: -+ return -+ from instance_service import InstanceService -+ service = InstanceService.Client(self._protocol) -+ aa_latest = service.KeepAliveCheck() -+ if self._agent_alive == False and aa_latest == True: -+ port_l = [v for k, v in self._port_dict.iteritems()] -+ service.AddPort(port_l) -+ self._agent_alive = True -+ return -+ if self._agent_alive == True and aa_latest == False: -+ self._agent_alive = False -+ return -+ except: -+ self._agent_alive = False -+ #end _keep_alive -+ -+ def _agent_conn_open(self): -+ import socket -+ import sys -+ import uuid -+ from thrift.transport import TTransport, TSocket -+ from thrift.transport.TTransport import TTransportException -+ from thrift.protocol import TBinaryProtocol, TProtocol -+ from instance_service import InstanceService -+ from instance_service import ttypes -+ try: -+ socket = TSocket.TSocket("127.0.0.1", 9090) -+ transport = TTransport.TFramedTransport(socket) -+ transport.open() -+ protocol = TBinaryProtocol.TBinaryProtocol(transport) -+ self._agent_connected = self._agent_connect(protocol) -+ return protocol -+ except TTransportException: -+ return None -+ #end _agent_conn_open -+ -+ def get_dev_name(self, iface_id): -+ return "tap" + iface_id[0:11] -+ #end get_dev_name -+ -+ def _convert_to_bl(self, id): -+ import uuid -+ hexstr = uuid.UUID(id).hex -+ return [int(hexstr[i:i+2], 16) for i in range(32) if i%2 == 0] -+ #end _convert_to_bl -+ -+ def _agent_inform_port_add(self, port, port_id): -+ # First add to the port list -+ self._port_dict[port_id] = port -+ if not self._agent_alive: -+ return -+ from instance_service import InstanceService -+ import socket -+ try: -+ service = InstanceService.Client(self._protocol) -+ service.AddPort([port]) -+ except: -+ self._agent_alive = False -+ #end _agent_inform_port_add -+ -+ def _agent_inform_port_delete(self, port_id): -+ # First add to the port list -+ if port_id in self._port_dict: -+ del_port_id = self._port_dict[port_id].port_id -+ del self._port_dict[port_id] -+ if not self._agent_alive: -+ return -+ from instance_service import InstanceService -+ import socket -+ try: -+ service = InstanceService.Client(self._protocol) -+ service.DeletePort(del_port_id) -+ except: -+ self._agent_alive = False -+ #end _agent_inform_port_delete -+ -+ -+#end class ContrailVifPlug -+ -+def launch_interval_looping(contrail_vif_plug): -+ while True: -+ contrail_vif_plug._keep_alive() -+ gevent.sleep(2) -+#end launch_internal_looping -+ -+ -+class ContrailVifDir(ProcessEvent): -+ def __init__(self, contrail_vif, vif_dir): -+ self._create_port_list(contrail_vif, vif_dir) -+ self._contrail_vif_plug = contrail_vif -+ #end __init__ -+ -+ def _convert_to_bl(self, id): -+ import uuid -+ hexstr = uuid.UUID(id).hex -+ return [int(hexstr[i:i+2], 16) for i in range(32) if i%2 == 0] -+ #end _convert_to_bl -+ -+ def _create_port_list(self, contrail_vif, vif_dir): -+ import os -+ files = [f for f in os.listdir(vif_dir) if os.path.isfile(os.path.join(vif_dir,f))] -+ for f in files: -+ print f -+ file_name = os.path.join(vif_dir, f) -+ port, port_id = self._read_port_info_from_file(file_name) -+ if port is not None: -+ contrail_vif._agent_inform_port_add(port, port_id) -+ #end create_port_list -+ -+ def _get_port_info(self, config): -+ import ConfigParser -+ #import pdb; pdb.set_trace() -+ from instance_service import ttypes -+ try: -+ vif = 'Vif' -+ port = ttypes.Port(self._convert_to_bl(config.get(vif, 'port_id')), -+ self._convert_to_bl(config.get(vif, 'instance_id')), -+ config.get(vif, 'tap_name'), -+ config.get(vif, 'ip_address'), -+ self._convert_to_bl(config.get(vif, 'vn_id')), -+ config.get(vif, 'mac_address'), -+ config.get(vif, 'display_name'), -+ config.get(vif, 'hostname'), -+ config.get(vif, 'host')) -+ #print config.get(vif, 'prefix_len') -+ return port -+ except: -+ return None -+ -+ def _print_port_info(self, config): -+ import ConfigParser -+ try: -+ vif = 'Vif' -+ print config.get(vif, 'port_id') -+ print config.get(vif, 'instance_id') -+ print config.get(vif, 'tap_name') -+ print config.get(vif, 'ip_address') -+ print config.get(vif, 'vn_id') -+ print config.get(vif, 'mac_address') -+ print config.get(vif, 'display_name') -+ print config.get(vif, 'hostname') -+ print config.get(vif, 'host') -+ print config.get(vif, 'prefix_len') -+ except: -+ return -+ #end __print_port_into -+ -+ def _read_port_info_from_file(self, file_name): -+ import ConfigParser -+ config = ConfigParser.ConfigParser() -+ config.read(file_name) -+ self._print_port_info(config) -+ port = self._get_port_info(config) -+ if port is not None: -+ return port, config.get('Vif', 'port_id') -+ else: -+ return None, None -+ #end _read_port_info_from_file -+ -+ def _is_allowed(self, file_name): -+ ret = True -+ if (file_name[0] == '.' or file_name[-1] == '~'): -+ ret = False -+ return ret -+ #end _is_allowed -+ -+ def process_IN_CREATE(self, event): -+ file_name = os.path.join(event.path, event.name) -+ if not self._is_allowed(event.name): -+ return -+ print "Create: %s" % file_name -+ port, port_id = self._read_port_info_from_file(file_name) -+ if port is not None: -+ print "In create: %s" % port.tap_name -+ self._contrail_vif_plug._agent_inform_port_add(port, port_id) -+ -+ #end process_IN_CREATE -+ -+ def process_IN_DELETE(self, event): -+ if not self._is_allowed(event.name): -+ return -+ print "Remove: %s" % os.path.join(event.path, event.name) -+ file_name = os.path.join(event.path, event.name) -+ #import pdb; pdb.set_trace() -+ if self._is_allowed(event.name): -+ self._contrail_vif_plug._agent_inform_port_delete(event.name) -+ return -+ -+ #end process_IN_DELETE -+ -+ def process_IN_MODIFY(self, event): -+ if not self._is_allowed(event.name): -+ return -+ file_name = os.path.join(event.path, event.name) -+ print "Modify: %s" % file_name -+ port, port_id = self._read_port_info_from_file(file_name) -+ if port is not None: -+ print "In modify %s" % port.tap_name -+ self._contrail_vif_plug._agent_inform_port_add(port, port_id) -+ #end process_IN_MODIFY -+#end ContrilVifDir -+ -+ -+VIF_DIR = '/opt/stack/nova/plugins/contrail/vif' -+def contrail_vif_dir_monitor(contrail_vif_plug): -+ #import pdb; pdb.set_trace() -+ wm = WatchManager() -+ notifier = pyinotify.ThreadedNotifier(wm, ContrailVifDir(contrail_vif_plug, VIF_DIR)) -+ contrail_vif_plug._notifier = notifier -+ # watched events -+ mask = pyinotify.IN_DELETE | \ -+ pyinotify.IN_CREATE | \ -+ pyinotify.IN_MODIFY | \ -+ pyinotify.IN_ISDIR -+ wm.add_watch(VIF_DIR, mask, quiet=False) -+ notifier.start() -+#end contrail_vif_dir_monitor -+ -+def main(args_str = None): -+ try: -+ os.makedirs(VIF_DIR) -+ except OSError: -+ if os.path.exists(VIF_DIR): -+ pass -+ else: -+ raise -+ contrail_vif_plug = ContrailVifPlug() -+ contrail_vif_dir_monitor_task = gevent.spawn(contrail_vif_dir_monitor, contrail_vif_plug) -+ #import pdb; pdb.set_trace() -+ contrail_timer_task = gevent.spawn(launch_interval_looping, contrail_vif_plug) -+ gevent.joinall([contrail_timer_task, contrail_vif_dir_monitor_task]) -+#end main -+ -+if __name__ == '__main__': -+ cgitb.enable(format='text') -+ main() -diff --git plugins/contrail/instance_service/InstanceService-remote plugins/contrail/instance_service/InstanceService-remote -new file mode 100644 -index 0000000..76626d4 ---- /dev/null -+++ plugins/contrail/instance_service/InstanceService-remote -@@ -0,0 +1,165 @@ -+#!/usr/bin/env python -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+import sys -+import pprint -+from urlparse import urlparse -+from thrift.transport import TTransport -+from thrift.transport import TSocket -+from thrift.transport import THttpClient -+from thrift.protocol import TBinaryProtocol -+ -+import InstanceService -+from ttypes import * -+ -+if len(sys.argv) <= 1 or sys.argv[1] == '--help': -+ print '' -+ print 'Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] function [arg1 [arg2...]]' -+ print '' -+ print 'Functions:' -+ print ' bool AddPort(PortList port_list)' -+ print ' bool KeepAliveCheck()' -+ print ' bool Connect()' -+ print ' bool DeletePort(tuuid port_id)' -+ print ' bool TunnelNHEntryAdd(string src_ip, string dst_ip, string vrf_name)' -+ print ' bool TunnelNHEntryDelete(string src_ip, string dst_ip, string vrf_name)' -+ print ' bool RouteEntryAdd(string ip_address, string gw_ip, string vrf_name, string label)' -+ print ' bool RouteEntryDelete(string ip_address, string vrf_name)' -+ print ' bool AddHostRoute(string ip_address, string vrf_name)' -+ print ' bool AddLocalVmRoute(string ip_address, string intf_uuid, string vrf_name, string label)' -+ print ' bool AddRemoteVmRoute(string ip_address, string gw_ip, string vrf_name, string label)' -+ print ' bool CreateVrf(string vrf_name)' -+ print '' -+ sys.exit(0) -+ -+pp = pprint.PrettyPrinter(indent = 2) -+host = 'localhost' -+port = 9090 -+uri = '' -+framed = False -+http = False -+argi = 1 -+ -+if sys.argv[argi] == '-h': -+ parts = sys.argv[argi+1].split(':') -+ host = parts[0] -+ if len(parts) > 1: -+ port = int(parts[1]) -+ argi += 2 -+ -+if sys.argv[argi] == '-u': -+ url = urlparse(sys.argv[argi+1]) -+ parts = url[1].split(':') -+ host = parts[0] -+ if len(parts) > 1: -+ port = int(parts[1]) -+ else: -+ port = 80 -+ uri = url[2] -+ if url[4]: -+ uri += '?%s' % url[4] -+ http = True -+ argi += 2 -+ -+if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed': -+ framed = True -+ argi += 1 -+ -+cmd = sys.argv[argi] -+args = sys.argv[argi+1:] -+ -+if http: -+ transport = THttpClient.THttpClient(host, port, uri) -+else: -+ socket = TSocket.TSocket(host, port) -+ if framed: -+ transport = TTransport.TFramedTransport(socket) -+ else: -+ transport = TTransport.TBufferedTransport(socket) -+protocol = TBinaryProtocol.TBinaryProtocol(transport) -+client = InstanceService.Client(protocol) -+transport.open() -+ -+if cmd == 'AddPort': -+ if len(args) != 1: -+ print 'AddPort requires 1 args' -+ sys.exit(1) -+ pp.pprint(client.AddPort(eval(args[0]),)) -+ -+elif cmd == 'KeepAliveCheck': -+ if len(args) != 0: -+ print 'KeepAliveCheck requires 0 args' -+ sys.exit(1) -+ pp.pprint(client.KeepAliveCheck()) -+ -+elif cmd == 'Connect': -+ if len(args) != 0: -+ print 'Connect requires 0 args' -+ sys.exit(1) -+ pp.pprint(client.Connect()) -+ -+elif cmd == 'DeletePort': -+ if len(args) != 1: -+ print 'DeletePort requires 1 args' -+ sys.exit(1) -+ pp.pprint(client.DeletePort(eval(args[0]),)) -+ -+elif cmd == 'TunnelNHEntryAdd': -+ if len(args) != 3: -+ print 'TunnelNHEntryAdd requires 3 args' -+ sys.exit(1) -+ pp.pprint(client.TunnelNHEntryAdd(args[0],args[1],args[2],)) -+ -+elif cmd == 'TunnelNHEntryDelete': -+ if len(args) != 3: -+ print 'TunnelNHEntryDelete requires 3 args' -+ sys.exit(1) -+ pp.pprint(client.TunnelNHEntryDelete(args[0],args[1],args[2],)) -+ -+elif cmd == 'RouteEntryAdd': -+ if len(args) != 4: -+ print 'RouteEntryAdd requires 4 args' -+ sys.exit(1) -+ pp.pprint(client.RouteEntryAdd(args[0],args[1],args[2],args[3],)) -+ -+elif cmd == 'RouteEntryDelete': -+ if len(args) != 2: -+ print 'RouteEntryDelete requires 2 args' -+ sys.exit(1) -+ pp.pprint(client.RouteEntryDelete(args[0],args[1],)) -+ -+elif cmd == 'AddHostRoute': -+ if len(args) != 2: -+ print 'AddHostRoute requires 2 args' -+ sys.exit(1) -+ pp.pprint(client.AddHostRoute(args[0],args[1],)) -+ -+elif cmd == 'AddLocalVmRoute': -+ if len(args) != 4: -+ print 'AddLocalVmRoute requires 4 args' -+ sys.exit(1) -+ pp.pprint(client.AddLocalVmRoute(args[0],args[1],args[2],args[3],)) -+ -+elif cmd == 'AddRemoteVmRoute': -+ if len(args) != 4: -+ print 'AddRemoteVmRoute requires 4 args' -+ sys.exit(1) -+ pp.pprint(client.AddRemoteVmRoute(args[0],args[1],args[2],args[3],)) -+ -+elif cmd == 'CreateVrf': -+ if len(args) != 1: -+ print 'CreateVrf requires 1 args' -+ sys.exit(1) -+ pp.pprint(client.CreateVrf(args[0],)) -+ -+else: -+ print 'Unrecognized method %s' % cmd -+ sys.exit(1) -+ -+transport.close() -diff --git plugins/contrail/instance_service/InstanceService.py plugins/contrail/instance_service/InstanceService.py -new file mode 100644 -index 0000000..2219045 ---- /dev/null -+++ plugins/contrail/instance_service/InstanceService.py -@@ -0,0 +1,2275 @@ -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+from thrift.Thrift import TType, TMessageType, TException -+from ttypes import * -+from thrift.Thrift import TProcessor -+from thrift.transport import TTransport -+from thrift.protocol import TBinaryProtocol, TProtocol -+try: -+ from thrift.protocol import fastbinary -+except: -+ fastbinary = None -+ -+ -+class Iface: -+ def AddPort(self, port_list): -+ """ -+ Parameters: -+ - port_list -+ """ -+ pass -+ -+ def KeepAliveCheck(self, ): -+ pass -+ -+ def Connect(self, ): -+ pass -+ -+ def DeletePort(self, port_id): -+ """ -+ Parameters: -+ - port_id -+ """ -+ pass -+ -+ def TunnelNHEntryAdd(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ pass -+ -+ def TunnelNHEntryDelete(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ pass -+ -+ def RouteEntryAdd(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ pass -+ -+ def RouteEntryDelete(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ pass -+ -+ def AddHostRoute(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ pass -+ -+ def AddLocalVmRoute(self, ip_address, intf_uuid, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - intf_uuid -+ - vrf_name -+ - label -+ """ -+ pass -+ -+ def AddRemoteVmRoute(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ pass -+ -+ def CreateVrf(self, vrf_name): -+ """ -+ Parameters: -+ - vrf_name -+ """ -+ pass -+ -+ -+class Client(Iface): -+ def __init__(self, iprot, oprot=None): -+ self._iprot = self._oprot = iprot -+ if oprot is not None: -+ self._oprot = oprot -+ self._seqid = 0 -+ -+ def AddPort(self, port_list): -+ """ -+ Parameters: -+ - port_list -+ """ -+ self.send_AddPort(port_list) -+ return self.recv_AddPort() -+ -+ def send_AddPort(self, port_list): -+ self._oprot.writeMessageBegin('AddPort', TMessageType.CALL, self._seqid) -+ args = AddPort_args() -+ args.port_list = port_list -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddPort(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddPort_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddPort failed: unknown result"); -+ -+ def KeepAliveCheck(self, ): -+ self.send_KeepAliveCheck() -+ return self.recv_KeepAliveCheck() -+ -+ def send_KeepAliveCheck(self, ): -+ self._oprot.writeMessageBegin('KeepAliveCheck', TMessageType.CALL, self._seqid) -+ args = KeepAliveCheck_args() -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_KeepAliveCheck(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = KeepAliveCheck_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "KeepAliveCheck failed: unknown result"); -+ -+ def Connect(self, ): -+ self.send_Connect() -+ return self.recv_Connect() -+ -+ def send_Connect(self, ): -+ self._oprot.writeMessageBegin('Connect', TMessageType.CALL, self._seqid) -+ args = Connect_args() -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_Connect(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = Connect_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "Connect failed: unknown result"); -+ -+ def DeletePort(self, port_id): -+ """ -+ Parameters: -+ - port_id -+ """ -+ self.send_DeletePort(port_id) -+ return self.recv_DeletePort() -+ -+ def send_DeletePort(self, port_id): -+ self._oprot.writeMessageBegin('DeletePort', TMessageType.CALL, self._seqid) -+ args = DeletePort_args() -+ args.port_id = port_id -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_DeletePort(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = DeletePort_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "DeletePort failed: unknown result"); -+ -+ def TunnelNHEntryAdd(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ self.send_TunnelNHEntryAdd(src_ip, dst_ip, vrf_name) -+ return self.recv_TunnelNHEntryAdd() -+ -+ def send_TunnelNHEntryAdd(self, src_ip, dst_ip, vrf_name): -+ self._oprot.writeMessageBegin('TunnelNHEntryAdd', TMessageType.CALL, self._seqid) -+ args = TunnelNHEntryAdd_args() -+ args.src_ip = src_ip -+ args.dst_ip = dst_ip -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_TunnelNHEntryAdd(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = TunnelNHEntryAdd_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "TunnelNHEntryAdd failed: unknown result"); -+ -+ def TunnelNHEntryDelete(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ self.send_TunnelNHEntryDelete(src_ip, dst_ip, vrf_name) -+ return self.recv_TunnelNHEntryDelete() -+ -+ def send_TunnelNHEntryDelete(self, src_ip, dst_ip, vrf_name): -+ self._oprot.writeMessageBegin('TunnelNHEntryDelete', TMessageType.CALL, self._seqid) -+ args = TunnelNHEntryDelete_args() -+ args.src_ip = src_ip -+ args.dst_ip = dst_ip -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_TunnelNHEntryDelete(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = TunnelNHEntryDelete_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "TunnelNHEntryDelete failed: unknown result"); -+ -+ def RouteEntryAdd(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ self.send_RouteEntryAdd(ip_address, gw_ip, vrf_name, label) -+ return self.recv_RouteEntryAdd() -+ -+ def send_RouteEntryAdd(self, ip_address, gw_ip, vrf_name, label): -+ self._oprot.writeMessageBegin('RouteEntryAdd', TMessageType.CALL, self._seqid) -+ args = RouteEntryAdd_args() -+ args.ip_address = ip_address -+ args.gw_ip = gw_ip -+ args.vrf_name = vrf_name -+ args.label = label -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_RouteEntryAdd(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = RouteEntryAdd_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "RouteEntryAdd failed: unknown result"); -+ -+ def RouteEntryDelete(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ self.send_RouteEntryDelete(ip_address, vrf_name) -+ return self.recv_RouteEntryDelete() -+ -+ def send_RouteEntryDelete(self, ip_address, vrf_name): -+ self._oprot.writeMessageBegin('RouteEntryDelete', TMessageType.CALL, self._seqid) -+ args = RouteEntryDelete_args() -+ args.ip_address = ip_address -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_RouteEntryDelete(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = RouteEntryDelete_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "RouteEntryDelete failed: unknown result"); -+ -+ def AddHostRoute(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ self.send_AddHostRoute(ip_address, vrf_name) -+ return self.recv_AddHostRoute() -+ -+ def send_AddHostRoute(self, ip_address, vrf_name): -+ self._oprot.writeMessageBegin('AddHostRoute', TMessageType.CALL, self._seqid) -+ args = AddHostRoute_args() -+ args.ip_address = ip_address -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddHostRoute(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddHostRoute_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddHostRoute failed: unknown result"); -+ -+ def AddLocalVmRoute(self, ip_address, intf_uuid, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - intf_uuid -+ - vrf_name -+ - label -+ """ -+ self.send_AddLocalVmRoute(ip_address, intf_uuid, vrf_name, label) -+ return self.recv_AddLocalVmRoute() -+ -+ def send_AddLocalVmRoute(self, ip_address, intf_uuid, vrf_name, label): -+ self._oprot.writeMessageBegin('AddLocalVmRoute', TMessageType.CALL, self._seqid) -+ args = AddLocalVmRoute_args() -+ args.ip_address = ip_address -+ args.intf_uuid = intf_uuid -+ args.vrf_name = vrf_name -+ args.label = label -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddLocalVmRoute(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddLocalVmRoute_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddLocalVmRoute failed: unknown result"); -+ -+ def AddRemoteVmRoute(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ self.send_AddRemoteVmRoute(ip_address, gw_ip, vrf_name, label) -+ return self.recv_AddRemoteVmRoute() -+ -+ def send_AddRemoteVmRoute(self, ip_address, gw_ip, vrf_name, label): -+ self._oprot.writeMessageBegin('AddRemoteVmRoute', TMessageType.CALL, self._seqid) -+ args = AddRemoteVmRoute_args() -+ args.ip_address = ip_address -+ args.gw_ip = gw_ip -+ args.vrf_name = vrf_name -+ args.label = label -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddRemoteVmRoute(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddRemoteVmRoute_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddRemoteVmRoute failed: unknown result"); -+ -+ def CreateVrf(self, vrf_name): -+ """ -+ Parameters: -+ - vrf_name -+ """ -+ self.send_CreateVrf(vrf_name) -+ return self.recv_CreateVrf() -+ -+ def send_CreateVrf(self, vrf_name): -+ self._oprot.writeMessageBegin('CreateVrf', TMessageType.CALL, self._seqid) -+ args = CreateVrf_args() -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_CreateVrf(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = CreateVrf_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "CreateVrf failed: unknown result"); -+ -+ -+class Processor(Iface, TProcessor): -+ def __init__(self, handler): -+ self._handler = handler -+ self._processMap = {} -+ self._processMap["AddPort"] = Processor.process_AddPort -+ self._processMap["KeepAliveCheck"] = Processor.process_KeepAliveCheck -+ self._processMap["Connect"] = Processor.process_Connect -+ self._processMap["DeletePort"] = Processor.process_DeletePort -+ self._processMap["TunnelNHEntryAdd"] = Processor.process_TunnelNHEntryAdd -+ self._processMap["TunnelNHEntryDelete"] = Processor.process_TunnelNHEntryDelete -+ self._processMap["RouteEntryAdd"] = Processor.process_RouteEntryAdd -+ self._processMap["RouteEntryDelete"] = Processor.process_RouteEntryDelete -+ self._processMap["AddHostRoute"] = Processor.process_AddHostRoute -+ self._processMap["AddLocalVmRoute"] = Processor.process_AddLocalVmRoute -+ self._processMap["AddRemoteVmRoute"] = Processor.process_AddRemoteVmRoute -+ self._processMap["CreateVrf"] = Processor.process_CreateVrf -+ -+ def process(self, iprot, oprot): -+ (name, type, seqid) = iprot.readMessageBegin() -+ if name not in self._processMap: -+ iprot.skip(TType.STRUCT) -+ iprot.readMessageEnd() -+ x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) -+ oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) -+ x.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ return -+ else: -+ self._processMap[name](self, seqid, iprot, oprot) -+ return True -+ -+ def process_AddPort(self, seqid, iprot, oprot): -+ args = AddPort_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddPort_result() -+ result.success = self._handler.AddPort(args.port_list) -+ oprot.writeMessageBegin("AddPort", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_KeepAliveCheck(self, seqid, iprot, oprot): -+ args = KeepAliveCheck_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = KeepAliveCheck_result() -+ result.success = self._handler.KeepAliveCheck() -+ oprot.writeMessageBegin("KeepAliveCheck", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_Connect(self, seqid, iprot, oprot): -+ args = Connect_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = Connect_result() -+ result.success = self._handler.Connect() -+ oprot.writeMessageBegin("Connect", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_DeletePort(self, seqid, iprot, oprot): -+ args = DeletePort_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = DeletePort_result() -+ result.success = self._handler.DeletePort(args.port_id) -+ oprot.writeMessageBegin("DeletePort", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_TunnelNHEntryAdd(self, seqid, iprot, oprot): -+ args = TunnelNHEntryAdd_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = TunnelNHEntryAdd_result() -+ result.success = self._handler.TunnelNHEntryAdd(args.src_ip, args.dst_ip, args.vrf_name) -+ oprot.writeMessageBegin("TunnelNHEntryAdd", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_TunnelNHEntryDelete(self, seqid, iprot, oprot): -+ args = TunnelNHEntryDelete_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = TunnelNHEntryDelete_result() -+ result.success = self._handler.TunnelNHEntryDelete(args.src_ip, args.dst_ip, args.vrf_name) -+ oprot.writeMessageBegin("TunnelNHEntryDelete", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_RouteEntryAdd(self, seqid, iprot, oprot): -+ args = RouteEntryAdd_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = RouteEntryAdd_result() -+ result.success = self._handler.RouteEntryAdd(args.ip_address, args.gw_ip, args.vrf_name, args.label) -+ oprot.writeMessageBegin("RouteEntryAdd", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_RouteEntryDelete(self, seqid, iprot, oprot): -+ args = RouteEntryDelete_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = RouteEntryDelete_result() -+ result.success = self._handler.RouteEntryDelete(args.ip_address, args.vrf_name) -+ oprot.writeMessageBegin("RouteEntryDelete", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_AddHostRoute(self, seqid, iprot, oprot): -+ args = AddHostRoute_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddHostRoute_result() -+ result.success = self._handler.AddHostRoute(args.ip_address, args.vrf_name) -+ oprot.writeMessageBegin("AddHostRoute", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_AddLocalVmRoute(self, seqid, iprot, oprot): -+ args = AddLocalVmRoute_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddLocalVmRoute_result() -+ result.success = self._handler.AddLocalVmRoute(args.ip_address, args.intf_uuid, args.vrf_name, args.label) -+ oprot.writeMessageBegin("AddLocalVmRoute", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_AddRemoteVmRoute(self, seqid, iprot, oprot): -+ args = AddRemoteVmRoute_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddRemoteVmRoute_result() -+ result.success = self._handler.AddRemoteVmRoute(args.ip_address, args.gw_ip, args.vrf_name, args.label) -+ oprot.writeMessageBegin("AddRemoteVmRoute", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_CreateVrf(self, seqid, iprot, oprot): -+ args = CreateVrf_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = CreateVrf_result() -+ result.success = self._handler.CreateVrf(args.vrf_name) -+ oprot.writeMessageBegin("CreateVrf", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ -+# HELPER FUNCTIONS AND STRUCTURES -+ -+class AddPort_args: -+ """ -+ Attributes: -+ - port_list -+ """ -+ -+ thrift_spec = None -+ def __init__(self, port_list=None,): -+ self.port_list = port_list -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == -1: -+ if ftype == TType.LIST: -+ self.port_list = [] -+ (_etype24, _size21) = iprot.readListBegin() -+ for _i25 in xrange(_size21): -+ _elem26 = Port() -+ _elem26.read(iprot) -+ self.port_list.append(_elem26) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddPort_args') -+ if self.port_list is not None: -+ oprot.writeFieldBegin('port_list', TType.LIST, -1) -+ oprot.writeListBegin(TType.STRUCT, len(self.port_list)) -+ for iter27 in self.port_list: -+ iter27.write(oprot) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddPort_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddPort_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class KeepAliveCheck_args: -+ -+ thrift_spec = ( -+ ) -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('KeepAliveCheck_args') -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class KeepAliveCheck_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('KeepAliveCheck_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class Connect_args: -+ -+ thrift_spec = ( -+ ) -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('Connect_args') -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class Connect_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('Connect_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class DeletePort_args: -+ """ -+ Attributes: -+ - port_id -+ """ -+ -+ thrift_spec = None -+ def __init__(self, port_id=None,): -+ self.port_id = port_id -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == -1: -+ if ftype == TType.LIST: -+ self.port_id = [] -+ (_etype31, _size28) = iprot.readListBegin() -+ for _i32 in xrange(_size28): -+ _elem33 = iprot.readI16(); -+ self.port_id.append(_elem33) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('DeletePort_args') -+ if self.port_id is not None: -+ oprot.writeFieldBegin('port_id', TType.LIST, -1) -+ oprot.writeListBegin(TType.I16, len(self.port_id)) -+ for iter34 in self.port_id: -+ oprot.writeI16(iter34) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class DeletePort_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('DeletePort_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryAdd_args: -+ """ -+ Attributes: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'src_ip', None, None, ), # 1 -+ (2, TType.STRING, 'dst_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ ) -+ -+ def __init__(self, src_ip=None, dst_ip=None, vrf_name=None,): -+ self.src_ip = src_ip -+ self.dst_ip = dst_ip -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.src_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.dst_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryAdd_args') -+ if self.src_ip is not None: -+ oprot.writeFieldBegin('src_ip', TType.STRING, 1) -+ oprot.writeString(self.src_ip) -+ oprot.writeFieldEnd() -+ if self.dst_ip is not None: -+ oprot.writeFieldBegin('dst_ip', TType.STRING, 2) -+ oprot.writeString(self.dst_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.src_ip is None: -+ raise TProtocol.TProtocolException(message='Required field src_ip is unset!') -+ if self.dst_ip is None: -+ raise TProtocol.TProtocolException(message='Required field dst_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryAdd_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryAdd_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryDelete_args: -+ """ -+ Attributes: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'src_ip', None, None, ), # 1 -+ (2, TType.STRING, 'dst_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ ) -+ -+ def __init__(self, src_ip=None, dst_ip=None, vrf_name=None,): -+ self.src_ip = src_ip -+ self.dst_ip = dst_ip -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.src_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.dst_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryDelete_args') -+ if self.src_ip is not None: -+ oprot.writeFieldBegin('src_ip', TType.STRING, 1) -+ oprot.writeString(self.src_ip) -+ oprot.writeFieldEnd() -+ if self.dst_ip is not None: -+ oprot.writeFieldBegin('dst_ip', TType.STRING, 2) -+ oprot.writeString(self.dst_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.src_ip is None: -+ raise TProtocol.TProtocolException(message='Required field src_ip is unset!') -+ if self.dst_ip is None: -+ raise TProtocol.TProtocolException(message='Required field dst_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryDelete_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryDelete_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryAdd_args: -+ """ -+ Attributes: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'gw_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ (4, TType.STRING, 'label', None, None, ), # 4 -+ ) -+ -+ def __init__(self, ip_address=None, gw_ip=None, vrf_name=None, label=None,): -+ self.ip_address = ip_address -+ self.gw_ip = gw_ip -+ self.vrf_name = vrf_name -+ self.label = label -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.gw_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.label = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryAdd_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.gw_ip is not None: -+ oprot.writeFieldBegin('gw_ip', TType.STRING, 2) -+ oprot.writeString(self.gw_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ if self.label is not None: -+ oprot.writeFieldBegin('label', TType.STRING, 4) -+ oprot.writeString(self.label) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.gw_ip is None: -+ raise TProtocol.TProtocolException(message='Required field gw_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryAdd_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryAdd_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryDelete_args: -+ """ -+ Attributes: -+ - ip_address -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'vrf_name', None, None, ), # 2 -+ ) -+ -+ def __init__(self, ip_address=None, vrf_name=None,): -+ self.ip_address = ip_address -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryDelete_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 2) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.vrf_name is None: -+ raise TProtocol.TProtocolException(message='Required field vrf_name is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryDelete_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryDelete_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddHostRoute_args: -+ """ -+ Attributes: -+ - ip_address -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'vrf_name', None, None, ), # 2 -+ ) -+ -+ def __init__(self, ip_address=None, vrf_name=None,): -+ self.ip_address = ip_address -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddHostRoute_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 2) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddHostRoute_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddHostRoute_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddLocalVmRoute_args: -+ """ -+ Attributes: -+ - ip_address -+ - intf_uuid -+ - vrf_name -+ - label -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'intf_uuid', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ (4, TType.STRING, 'label', None, None, ), # 4 -+ ) -+ -+ def __init__(self, ip_address=None, intf_uuid=None, vrf_name=None, label=None,): -+ self.ip_address = ip_address -+ self.intf_uuid = intf_uuid -+ self.vrf_name = vrf_name -+ self.label = label -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.intf_uuid = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.label = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddLocalVmRoute_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.intf_uuid is not None: -+ oprot.writeFieldBegin('intf_uuid', TType.STRING, 2) -+ oprot.writeString(self.intf_uuid) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ if self.label is not None: -+ oprot.writeFieldBegin('label', TType.STRING, 4) -+ oprot.writeString(self.label) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.intf_uuid is None: -+ raise TProtocol.TProtocolException(message='Required field intf_uuid is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddLocalVmRoute_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddLocalVmRoute_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddRemoteVmRoute_args: -+ """ -+ Attributes: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'gw_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ (4, TType.STRING, 'label', None, None, ), # 4 -+ ) -+ -+ def __init__(self, ip_address=None, gw_ip=None, vrf_name=None, label=None,): -+ self.ip_address = ip_address -+ self.gw_ip = gw_ip -+ self.vrf_name = vrf_name -+ self.label = label -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.gw_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.label = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddRemoteVmRoute_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.gw_ip is not None: -+ oprot.writeFieldBegin('gw_ip', TType.STRING, 2) -+ oprot.writeString(self.gw_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ if self.label is not None: -+ oprot.writeFieldBegin('label', TType.STRING, 4) -+ oprot.writeString(self.label) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.gw_ip is None: -+ raise TProtocol.TProtocolException(message='Required field gw_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddRemoteVmRoute_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddRemoteVmRoute_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class CreateVrf_args: -+ """ -+ Attributes: -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'vrf_name', None, None, ), # 1 -+ ) -+ -+ def __init__(self, vrf_name=None,): -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('CreateVrf_args') -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 1) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.vrf_name is None: -+ raise TProtocol.TProtocolException(message='Required field vrf_name is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class CreateVrf_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('CreateVrf_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -diff --git plugins/contrail/instance_service/__init__.py plugins/contrail/instance_service/__init__.py -new file mode 100644 -index 0000000..f34ead4 ---- /dev/null -+++ plugins/contrail/instance_service/__init__.py -@@ -0,0 +1 @@ -+__all__ = ['ttypes', 'constants', 'InstanceService'] -diff --git plugins/contrail/instance_service/constants.py plugins/contrail/instance_service/constants.py -new file mode 100644 -index 0000000..73f07fe ---- /dev/null -+++ plugins/contrail/instance_service/constants.py -@@ -0,0 +1,11 @@ -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+from thrift.Thrift import TType, TMessageType, TException -+from ttypes import * -+ -diff --git plugins/contrail/instance_service/ttypes.py plugins/contrail/instance_service/ttypes.py -new file mode 100644 -index 0000000..564f82e ---- /dev/null -+++ plugins/contrail/instance_service/ttypes.py -@@ -0,0 +1,210 @@ -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+from thrift.Thrift import TType, TMessageType, TException -+ -+from thrift.transport import TTransport -+from thrift.protocol import TBinaryProtocol, TProtocol -+try: -+ from thrift.protocol import fastbinary -+except: -+ fastbinary = None -+ -+ -+ -+class Port: -+ """ -+ Attributes: -+ - port_id -+ - instance_id -+ - tap_name -+ - ip_address -+ - vn_id -+ - mac_address -+ - display_name -+ - hostname -+ - host -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.LIST, 'port_id', (TType.I16,None), None, ), # 1 -+ (2, TType.LIST, 'instance_id', (TType.I16,None), None, ), # 2 -+ (3, TType.STRING, 'tap_name', None, None, ), # 3 -+ (4, TType.STRING, 'ip_address', None, None, ), # 4 -+ (5, TType.LIST, 'vn_id', (TType.I16,None), None, ), # 5 -+ (6, TType.STRING, 'mac_address', None, None, ), # 6 -+ (7, TType.STRING, 'display_name', None, None, ), # 7 -+ (8, TType.STRING, 'hostname', None, None, ), # 8 -+ (9, TType.STRING, 'host', None, None, ), # 9 -+ ) -+ -+ def __init__(self, port_id=None, instance_id=None, tap_name=None, ip_address=None, vn_id=None, mac_address=None, display_name=None, hostname=None, host=None,): -+ self.port_id = port_id -+ self.instance_id = instance_id -+ self.tap_name = tap_name -+ self.ip_address = ip_address -+ self.vn_id = vn_id -+ self.mac_address = mac_address -+ self.display_name = display_name -+ self.hostname = hostname -+ self.host = host -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.LIST: -+ self.port_id = [] -+ (_etype3, _size0) = iprot.readListBegin() -+ for _i4 in xrange(_size0): -+ _elem5 = iprot.readI16(); -+ self.port_id.append(_elem5) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.LIST: -+ self.instance_id = [] -+ (_etype9, _size6) = iprot.readListBegin() -+ for _i10 in xrange(_size6): -+ _elem11 = iprot.readI16(); -+ self.instance_id.append(_elem11) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.tap_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 5: -+ if ftype == TType.LIST: -+ self.vn_id = [] -+ (_etype15, _size12) = iprot.readListBegin() -+ for _i16 in xrange(_size12): -+ _elem17 = iprot.readI16(); -+ self.vn_id.append(_elem17) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ elif fid == 6: -+ if ftype == TType.STRING: -+ self.mac_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 7: -+ if ftype == TType.STRING: -+ self.display_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 8: -+ if ftype == TType.STRING: -+ self.hostname = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 9: -+ if ftype == TType.STRING: -+ self.host = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('Port') -+ if self.port_id is not None: -+ oprot.writeFieldBegin('port_id', TType.LIST, 1) -+ oprot.writeListBegin(TType.I16, len(self.port_id)) -+ for iter18 in self.port_id: -+ oprot.writeI16(iter18) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ if self.instance_id is not None: -+ oprot.writeFieldBegin('instance_id', TType.LIST, 2) -+ oprot.writeListBegin(TType.I16, len(self.instance_id)) -+ for iter19 in self.instance_id: -+ oprot.writeI16(iter19) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ if self.tap_name is not None: -+ oprot.writeFieldBegin('tap_name', TType.STRING, 3) -+ oprot.writeString(self.tap_name) -+ oprot.writeFieldEnd() -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 4) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.vn_id is not None: -+ oprot.writeFieldBegin('vn_id', TType.LIST, 5) -+ oprot.writeListBegin(TType.I16, len(self.vn_id)) -+ for iter20 in self.vn_id: -+ oprot.writeI16(iter20) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ if self.mac_address is not None: -+ oprot.writeFieldBegin('mac_address', TType.STRING, 6) -+ oprot.writeString(self.mac_address) -+ oprot.writeFieldEnd() -+ if self.display_name is not None: -+ oprot.writeFieldBegin('display_name', TType.STRING, 7) -+ oprot.writeString(self.display_name) -+ oprot.writeFieldEnd() -+ if self.hostname is not None: -+ oprot.writeFieldBegin('hostname', TType.STRING, 8) -+ oprot.writeString(self.hostname) -+ oprot.writeFieldEnd() -+ if self.host is not None: -+ oprot.writeFieldBegin('host', TType.STRING, 9) -+ oprot.writeString(self.host) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.port_id is None: -+ raise TProtocol.TProtocolException(message='Required field port_id is unset!') -+ if self.instance_id is None: -+ raise TProtocol.TProtocolException(message='Required field instance_id is unset!') -+ if self.tap_name is None: -+ raise TProtocol.TProtocolException(message='Required field tap_name is unset!') -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.vn_id is None: -+ raise TProtocol.TProtocolException(message='Required field vn_id is unset!') -+ if self.mac_address is None: -+ raise TProtocol.TProtocolException(message='Required field mac_address is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) diff --git a/contrail/nova_v4.patch b/contrail/nova_v4.patch deleted file mode 100644 index 93716cb3a7..0000000000 --- a/contrail/nova_v4.patch +++ /dev/null @@ -1,3252 +0,0 @@ -diff --git nova/network/model.py nova/network/model.py -index 9a543a3..cf5f56d 100644 ---- nova/network/model.py -+++ nova/network/model.py -@@ -39,6 +39,7 @@ VIF_TYPE_802_QBG = '802.1qbg' - VIF_TYPE_802_QBH = '802.1qbh' - VIF_TYPE_MLNX_DIRECT = 'mlnx_direct' - VIF_TYPE_MIDONET = 'midonet' -+VIF_TYPE_VROUTER = 'vrouter' - VIF_TYPE_OTHER = 'other' - - # Constant for max length of network interface names -diff --git nova/virt/libvirt/vif.py nova/virt/libvirt/vif.py -index cfff8dd..0a51783 100644 ---- nova/virt/libvirt/vif.py -+++ nova/virt/libvirt/vif.py -@@ -309,6 +309,17 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - - return conf - -+ def get_config_contrail(self, instance, vif, image_meta, -+ inst_type): -+ conf = super(LibvirtGenericVIFDriver, -+ self).get_config(instance, vif, -+ image_meta, inst_type) -+ -+ dev = self.get_vif_devname(vif) -+ designer.set_vif_host_backend_ethernet_config(conf, dev) -+ -+ return conf -+ - def get_config_mlnx_direct(self, instance, vif, image_meta, - inst_type): - conf = super(LibvirtGenericVIFDriver, -@@ -372,6 +383,11 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - vif, - image_meta, - inst_type) -+ elif vif_type == network_model.VIF_TYPE_VROUTER: -+ return self.get_config_contrail(instance, -+ vif, -+ image_meta, -+ inst_type) - else: - raise exception.NovaException( - _("Unexpected vif_type=%s") % vif_type) -@@ -564,6 +580,31 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - except processutils.ProcessExecutionError: - LOG.exception(_("Failed while plugging vif"), instance=instance) - -+ def plug_contrail(self, instance, vif): -+ """Plug using Contrail Driver -+ """ -+ super(LibvirtGenericVIFDriver, -+ self).plug(instance, vif) -+ dev = self.get_vif_devname(vif) -+ iface_id = vif['id'] -+ from nova.virt import netutils -+ net, prefix_len=netutils.get_net_and_prefixlen(vif['network']['subnets'][0]['cidr']) -+ try: -+ linux_net.create_tap_dev(dev) -+ utils.execute('config_parser', 'create', -+ '--port_id', vif['id'], -+ '--tap_name', dev, -+ '--ip_address', vif['network']['subnets'][0]['ips'][0]['address'], -+ '--instance_id', instance['uuid'], -+ '--vn_id', vif['network']['id'], -+ '--mac_address', vif['address'], -+ '--display_name', instance['display_name'], -+ '--hostname', instance['hostname'], -+ '--host', instance['host'], -+ '--prefix_len', prefix_len) -+ except processutils.ProcessExecutionError: -+ LOG.exception(_("Failed while plugging vif"), instance=instance) -+ - def plug(self, instance, vif): - vif_type = vif['type'] - -@@ -571,7 +612,6 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - 'vif=%(vif)s'), - {'vif_type': vif_type, 'instance': instance, - 'vif': vif}) -- - if vif_type is None: - raise exception.NovaException( - _("vif_type parameter must be present " -@@ -592,6 +632,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - self.plug_mlnx_direct(instance, vif) - elif vif_type == network_model.VIF_TYPE_MIDONET: - self.plug_midonet(instance, vif) -+ elif vif_type == network_model.VIF_TYPE_VROUTER: -+ self.plug_contrail(instance, vif) - else: - raise exception.NovaException( - _("Unexpected vif_type=%s") % vif_type) -@@ -746,6 +788,19 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - except processutils.ProcessExecutionError: - LOG.exception(_("Failed while unplugging vif"), instance=instance) - -+ def unplug_contrail(self, instance, vif): -+ """Unplug using Contrail Driver -+ """ -+ super(LibvirtGenericVIFDriver, -+ self).unplug(instance, vif) -+ dev = self.get_vif_devname(vif) -+ try: -+ utils.execute('config_parser', 'delete', -+ '--port_id', vif['id']) -+ linux_net.delete_net_dev(dev) -+ except processutils.ProcessExecutionError: -+ LOG.exception(_("Failed while unplugging vif"), instance=instance) -+ - def unplug(self, instance, vif): - vif_type = vif['type'] - -@@ -770,6 +825,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - self.unplug_ivs(instance, vif) - elif vif_type == network_model.VIF_TYPE_IOVISOR: - self.unplug_iovisor(instance, vif) -+ elif vif_type == network_model.VIF_TYPE_VROUTER: -+ self.unplug_contrail(instance, vif) - elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT: - self.unplug_mlnx_direct(instance, vif) - elif vif_type == network_model.VIF_TYPE_MIDONET: -diff --git plugins/contrail/config_parser.py plugins/contrail/config_parser.py -new file mode 100755 -index 0000000..acac9fb ---- /dev/null -+++ plugins/contrail/config_parser.py -@@ -0,0 +1,126 @@ -+#! /usr/bin/env python -+import os -+import sys -+import cgitb -+import argparse -+ -+VIF_DIR = '/opt/stack/nova/plugins/contrail/vif/' -+ -+class ContrailVifDelete(object): -+ def __init__(self, port_id): -+ if os.path.exists(VIF_DIR+port_id): -+ os.remove(VIF_DIR+port_id) -+ #end __init__ -+# end ContrailVifDelete -+ -+class ContrailVifUpdate(object): -+ def __init__(self, port_id, tap_name, ip_address, instance_id, vn_id, mac_address, display_name, -+ hostname, host, prefix_len): -+ try: -+ os.makedirs(VIF_DIR) -+ except OSError: -+ if os.path.exists(VIF_DIR): -+ pass -+ else: -+ raise -+ self.__update_vif(port_id, tap_name, -+ ip_address, instance_id, -+ vn_id, mac_address, display_name, -+ hostname, host, prefix_len) -+ -+ # end __init__ -+ -+ def __update_vif(self, port_id, tap_name, -+ ip_address, instance_id, -+ vn_id, mac_address, -+ display_name, -+ hostname, -+ host, prefix_len): -+ if (port_id and tap_name and -+ ip_address and instance_id and -+ vn_id and mac_address and -+ host and prefix_len): -+ import ConfigParser -+ config = ConfigParser.RawConfigParser() -+ config.add_section('Vif') -+ config.set('Vif', 'port_id', port_id) -+ config.set('Vif', 'tap_name', tap_name) -+ config.set('Vif', 'ip_address', ip_address) -+ config.set('Vif', 'instance_id', instance_id) -+ config.set('Vif', 'vn_id', vn_id) -+ config.set('Vif', 'mac_address', mac_address) -+ config.set('Vif', 'display_name', display_name) -+ config.set('Vif', 'hostname', hostname) -+ config.set('Vif', 'host', host) -+ config.set('Vif', 'prefix_len', prefix_len) -+ with open(VIF_DIR + port_id, 'wb') as configfile: -+ config.write(configfile) -+ -+ # end __update_vif -+# end ContrailVifUpdate -+ -+def update_vif_file(args): -+ if args.which is 'create': -+ if (args.port_id and args.tap_name and -+ args.ip_address and args.instance_id and -+ args.vn_id and args.mac_address and -+ args.host and args.prefix_len): -+ ContrailVifUpdate(args.port_id, args.tap_name, -+ args.ip_address, args.instance_id, -+ args.vn_id, args.mac_address, args.display_name, -+ args.hostname, args.host, args.prefix_len) -+ elif args.which is 'delete': -+ if args.port_id is not None: -+ ContrailVifDelete(args.port_id) -+ -+# end update_vif_file -+ -+def parse_args(args_str): -+ ''' -+ ''' -+ # Source any specified config/ini file -+ # Turn off help, so we all options in response to -h -+ conf_parser = argparse.ArgumentParser(add_help=False) -+ args, remaining_argv = conf_parser.parse_known_args(args_str.split()) -+ parser = argparse.ArgumentParser( -+ # Inherit options from config_parser -+ parents=[conf_parser], -+ # print script description with -h/--help -+ description=__doc__, -+ # Don't mess with format of description -+ formatter_class=argparse.RawDescriptionHelpFormatter, -+ ) -+ #defaults.update(secopts) -+ #defaults.update(ksopts) -+ #parser.set_defaults(**defaults) -+ -+ subparsers = parser.add_subparsers(help='commands') -+ create_parser = subparsers.add_parser('create', help='Create/Modify vif') -+ create_parser.set_defaults(which='create') -+ create_parser.add_argument("--port_id", help = "port id") -+ create_parser.add_argument("--tap_name", help = "tap_name") -+ create_parser.add_argument("--ip_address", help = "ip_address") -+ create_parser.add_argument("--instance_id", help = "instance_id") -+ create_parser.add_argument("--vn_id", help = "vn_id") -+ create_parser.add_argument("--mac_address", help = "mac_address") -+ create_parser.add_argument("--display_name", help = "display_name") -+ create_parser.add_argument("--hostname", help = "hostname") -+ create_parser.add_argument("--host", help = "host") -+ create_parser.add_argument("--prefix_len", help = "prefix_len") -+ delete_parser = subparsers.add_parser('delete', help='Delete vif') -+ delete_parser.set_defaults(which='delete') -+ delete_parser.add_argument("--port_id", help = "port id") -+ print parser.parse_args() -+ args = parser.parse_args(remaining_argv) -+ update_vif_file(args) -+# end parse_args -+ -+def main(args_str=None): -+ if not args_str: -+ args_str = ' '.join(sys.argv[1:]) -+ args = parse_args(args_str) -+# end main -+ -+if __name__ == '__main__': -+ cgitb.enable(format='text') -+ main() -diff --git plugins/contrail/contrail_vif.py plugins/contrail/contrail_vif.py -new file mode 100644 -index 0000000..7d004a0 ---- /dev/null -+++ plugins/contrail/contrail_vif.py -@@ -0,0 +1,298 @@ -+# -+# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. -+# -+ -+""" -+contril vif plug, communicate with contrail agent -+""" -+import os -+import pyinotify -+import sys -+import socket -+import cgitb -+from nova import utils -+from nova import exception -+from nova.openstack.common import log as logging -+from pyinotify import WatchManager, Notifier, ThreadedNotifier, EventsCodes, ProcessEvent -+import gevent -+from gevent import monkey; monkey.patch_time() -+ -+LOG = logging.getLogger(__name__) -+ -+''' -+struct Port { -+ 1:required tuuid port_id, -+ 2:required tuuid instance_id, -+ 3:required string tap_name, -+ 4:required string ip_address, -+ 5:required tuuid vn_id, -+ 6:required string mac_address, -+ 7:optional string display_name, -+ 8:optional string hostname, -+ 9:optional string host; -+ 10: optional byte prefix_len; -+} -+''' -+ -+class ContrailVifPlug(object): -+ "" -+ "" -+ def __init__(self): -+ self._agent_alive = False -+ self._agent_connected = False -+ self._port_dict = {} -+ self._protocol = None -+ self._notifier = None -+ #end __init__ -+ -+ def _agent_connect(self, protocol): -+ # Agent connect for first time -+ if protocol != None: -+ from instance_service import InstanceService -+ service = InstanceService.Client(protocol) -+ return service.Connect() -+ else: -+ return False -+ #end __agent_connect -+ -+ def _keep_alive(self): -+ try: -+ if self._agent_alive == False: -+ self._protocol = self._agent_conn_open() -+ if self._protocol == None: -+ return -+ from instance_service import InstanceService -+ service = InstanceService.Client(self._protocol) -+ aa_latest = service.KeepAliveCheck() -+ if self._agent_alive == False and aa_latest == True: -+ port_l = [v for k, v in self._port_dict.iteritems()] -+ service.AddPort(port_l) -+ self._agent_alive = True -+ return -+ if self._agent_alive == True and aa_latest == False: -+ self._agent_alive = False -+ return -+ except: -+ self._agent_alive = False -+ #end _keep_alive -+ -+ def _agent_conn_open(self): -+ import socket -+ import sys -+ import uuid -+ from thrift.transport import TTransport, TSocket -+ from thrift.transport.TTransport import TTransportException -+ from thrift.protocol import TBinaryProtocol, TProtocol -+ from instance_service import InstanceService -+ from instance_service import ttypes -+ try: -+ socket = TSocket.TSocket("127.0.0.1", 9090) -+ transport = TTransport.TFramedTransport(socket) -+ transport.open() -+ protocol = TBinaryProtocol.TBinaryProtocol(transport) -+ self._agent_connected = self._agent_connect(protocol) -+ return protocol -+ except TTransportException: -+ return None -+ #end _agent_conn_open -+ -+ def get_dev_name(self, iface_id): -+ return "tap" + iface_id[0:11] -+ #end get_dev_name -+ -+ def _convert_to_bl(self, id): -+ import uuid -+ hexstr = uuid.UUID(id).hex -+ return [int(hexstr[i:i+2], 16) for i in range(32) if i%2 == 0] -+ #end _convert_to_bl -+ -+ def _agent_inform_port_add(self, port, port_id): -+ # First add to the port list -+ self._port_dict[port_id] = port -+ if not self._agent_alive: -+ return -+ from instance_service import InstanceService -+ import socket -+ try: -+ service = InstanceService.Client(self._protocol) -+ service.AddPort([port]) -+ except: -+ self._agent_alive = False -+ #end _agent_inform_port_add -+ -+ def _agent_inform_port_delete(self, port_id): -+ # First add to the port list -+ if port_id in self._port_dict: -+ del_port_id = self._port_dict[port_id].port_id -+ del self._port_dict[port_id] -+ if not self._agent_alive: -+ return -+ from instance_service import InstanceService -+ import socket -+ try: -+ service = InstanceService.Client(self._protocol) -+ service.DeletePort(del_port_id) -+ except: -+ self._agent_alive = False -+ #end _agent_inform_port_delete -+ -+ -+#end class ContrailVifPlug -+ -+def launch_interval_looping(contrail_vif_plug): -+ while True: -+ contrail_vif_plug._keep_alive() -+ gevent.sleep(2) -+#end launch_internal_looping -+ -+ -+class ContrailVifDir(ProcessEvent): -+ def __init__(self, contrail_vif, vif_dir): -+ self._create_port_list(contrail_vif, vif_dir) -+ self._contrail_vif_plug = contrail_vif -+ #end __init__ -+ -+ def _convert_to_bl(self, id): -+ import uuid -+ hexstr = uuid.UUID(id).hex -+ return [int(hexstr[i:i+2], 16) for i in range(32) if i%2 == 0] -+ #end _convert_to_bl -+ -+ def _create_port_list(self, contrail_vif, vif_dir): -+ import os -+ files = [f for f in os.listdir(vif_dir) if os.path.isfile(os.path.join(vif_dir,f))] -+ for f in files: -+ print f -+ file_name = os.path.join(vif_dir, f) -+ port, port_id = self._read_port_info_from_file(file_name) -+ if port is not None: -+ contrail_vif._agent_inform_port_add(port, port_id) -+ #end create_port_list -+ -+ def _get_port_info(self, config): -+ import ConfigParser -+ #import pdb; pdb.set_trace() -+ from instance_service import ttypes -+ try: -+ vif = 'Vif' -+ port = ttypes.Port(self._convert_to_bl(config.get(vif, 'port_id')), -+ self._convert_to_bl(config.get(vif, 'instance_id')), -+ config.get(vif, 'tap_name'), -+ config.get(vif, 'ip_address'), -+ self._convert_to_bl(config.get(vif, 'vn_id')), -+ config.get(vif, 'mac_address'), -+ config.get(vif, 'display_name'), -+ config.get(vif, 'hostname'), -+ config.get(vif, 'host')) -+ #print config.get(vif, 'prefix_len') -+ return port -+ except: -+ return None -+ -+ def _print_port_info(self, config): -+ import ConfigParser -+ try: -+ vif = 'Vif' -+ print config.get(vif, 'port_id') -+ print config.get(vif, 'instance_id') -+ print config.get(vif, 'tap_name') -+ print config.get(vif, 'ip_address') -+ print config.get(vif, 'vn_id') -+ print config.get(vif, 'mac_address') -+ print config.get(vif, 'display_name') -+ print config.get(vif, 'hostname') -+ print config.get(vif, 'host') -+ print config.get(vif, 'prefix_len') -+ except: -+ return -+ #end __print_port_into -+ -+ def _read_port_info_from_file(self, file_name): -+ import ConfigParser -+ config = ConfigParser.ConfigParser() -+ config.read(file_name) -+ self._print_port_info(config) -+ port = self._get_port_info(config) -+ if port is not None: -+ return port, config.get('Vif', 'port_id') -+ else: -+ return None, None -+ #end _read_port_info_from_file -+ -+ def _is_allowed(self, file_name): -+ ret = True -+ if (file_name[0] == '.' or file_name[-1] == '~'): -+ ret = False -+ return ret -+ #end _is_allowed -+ -+ def process_IN_CREATE(self, event): -+ file_name = os.path.join(event.path, event.name) -+ if not self._is_allowed(event.name): -+ return -+ print "Create: %s" % file_name -+ port, port_id = self._read_port_info_from_file(file_name) -+ if port is not None: -+ print "In create: %s" % port.tap_name -+ self._contrail_vif_plug._agent_inform_port_add(port, port_id) -+ -+ #end process_IN_CREATE -+ -+ def process_IN_DELETE(self, event): -+ if not self._is_allowed(event.name): -+ return -+ print "Remove: %s" % os.path.join(event.path, event.name) -+ file_name = os.path.join(event.path, event.name) -+ #import pdb; pdb.set_trace() -+ if self._is_allowed(event.name): -+ self._contrail_vif_plug._agent_inform_port_delete(event.name) -+ return -+ -+ #end process_IN_DELETE -+ -+ def process_IN_MODIFY(self, event): -+ if not self._is_allowed(event.name): -+ return -+ file_name = os.path.join(event.path, event.name) -+ print "Modify: %s" % file_name -+ port, port_id = self._read_port_info_from_file(file_name) -+ if port is not None: -+ print "In modify %s" % port.tap_name -+ self._contrail_vif_plug._agent_inform_port_add(port, port_id) -+ #end process_IN_MODIFY -+#end ContrilVifDir -+ -+ -+VIF_DIR = '/opt/stack/nova/plugins/contrail/vif' -+def contrail_vif_dir_monitor(contrail_vif_plug): -+ #import pdb; pdb.set_trace() -+ wm = WatchManager() -+ notifier = pyinotify.ThreadedNotifier(wm, ContrailVifDir(contrail_vif_plug, VIF_DIR)) -+ contrail_vif_plug._notifier = notifier -+ # watched events -+ mask = pyinotify.IN_DELETE | \ -+ pyinotify.IN_CREATE | \ -+ pyinotify.IN_MODIFY | \ -+ pyinotify.IN_ISDIR -+ wm.add_watch(VIF_DIR, mask, quiet=False) -+ notifier.start() -+#end contrail_vif_dir_monitor -+ -+def main(args_str = None): -+ try: -+ os.makedirs(VIF_DIR) -+ except OSError: -+ if os.path.exists(VIF_DIR): -+ pass -+ else: -+ raise -+ contrail_vif_plug = ContrailVifPlug() -+ contrail_vif_dir_monitor_task = gevent.spawn(contrail_vif_dir_monitor, contrail_vif_plug) -+ #import pdb; pdb.set_trace() -+ contrail_timer_task = gevent.spawn(launch_interval_looping, contrail_vif_plug) -+ gevent.joinall([contrail_timer_task, contrail_vif_dir_monitor_task]) -+#end main -+ -+if __name__ == '__main__': -+ cgitb.enable(format='text') -+ main() -diff --git plugins/contrail/instance_service/InstanceService-remote plugins/contrail/instance_service/InstanceService-remote -new file mode 100644 -index 0000000..76626d4 ---- /dev/null -+++ plugins/contrail/instance_service/InstanceService-remote -@@ -0,0 +1,165 @@ -+#!/usr/bin/env python -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+import sys -+import pprint -+from urlparse import urlparse -+from thrift.transport import TTransport -+from thrift.transport import TSocket -+from thrift.transport import THttpClient -+from thrift.protocol import TBinaryProtocol -+ -+import InstanceService -+from ttypes import * -+ -+if len(sys.argv) <= 1 or sys.argv[1] == '--help': -+ print '' -+ print 'Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] function [arg1 [arg2...]]' -+ print '' -+ print 'Functions:' -+ print ' bool AddPort(PortList port_list)' -+ print ' bool KeepAliveCheck()' -+ print ' bool Connect()' -+ print ' bool DeletePort(tuuid port_id)' -+ print ' bool TunnelNHEntryAdd(string src_ip, string dst_ip, string vrf_name)' -+ print ' bool TunnelNHEntryDelete(string src_ip, string dst_ip, string vrf_name)' -+ print ' bool RouteEntryAdd(string ip_address, string gw_ip, string vrf_name, string label)' -+ print ' bool RouteEntryDelete(string ip_address, string vrf_name)' -+ print ' bool AddHostRoute(string ip_address, string vrf_name)' -+ print ' bool AddLocalVmRoute(string ip_address, string intf_uuid, string vrf_name, string label)' -+ print ' bool AddRemoteVmRoute(string ip_address, string gw_ip, string vrf_name, string label)' -+ print ' bool CreateVrf(string vrf_name)' -+ print '' -+ sys.exit(0) -+ -+pp = pprint.PrettyPrinter(indent = 2) -+host = 'localhost' -+port = 9090 -+uri = '' -+framed = False -+http = False -+argi = 1 -+ -+if sys.argv[argi] == '-h': -+ parts = sys.argv[argi+1].split(':') -+ host = parts[0] -+ if len(parts) > 1: -+ port = int(parts[1]) -+ argi += 2 -+ -+if sys.argv[argi] == '-u': -+ url = urlparse(sys.argv[argi+1]) -+ parts = url[1].split(':') -+ host = parts[0] -+ if len(parts) > 1: -+ port = int(parts[1]) -+ else: -+ port = 80 -+ uri = url[2] -+ if url[4]: -+ uri += '?%s' % url[4] -+ http = True -+ argi += 2 -+ -+if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed': -+ framed = True -+ argi += 1 -+ -+cmd = sys.argv[argi] -+args = sys.argv[argi+1:] -+ -+if http: -+ transport = THttpClient.THttpClient(host, port, uri) -+else: -+ socket = TSocket.TSocket(host, port) -+ if framed: -+ transport = TTransport.TFramedTransport(socket) -+ else: -+ transport = TTransport.TBufferedTransport(socket) -+protocol = TBinaryProtocol.TBinaryProtocol(transport) -+client = InstanceService.Client(protocol) -+transport.open() -+ -+if cmd == 'AddPort': -+ if len(args) != 1: -+ print 'AddPort requires 1 args' -+ sys.exit(1) -+ pp.pprint(client.AddPort(eval(args[0]),)) -+ -+elif cmd == 'KeepAliveCheck': -+ if len(args) != 0: -+ print 'KeepAliveCheck requires 0 args' -+ sys.exit(1) -+ pp.pprint(client.KeepAliveCheck()) -+ -+elif cmd == 'Connect': -+ if len(args) != 0: -+ print 'Connect requires 0 args' -+ sys.exit(1) -+ pp.pprint(client.Connect()) -+ -+elif cmd == 'DeletePort': -+ if len(args) != 1: -+ print 'DeletePort requires 1 args' -+ sys.exit(1) -+ pp.pprint(client.DeletePort(eval(args[0]),)) -+ -+elif cmd == 'TunnelNHEntryAdd': -+ if len(args) != 3: -+ print 'TunnelNHEntryAdd requires 3 args' -+ sys.exit(1) -+ pp.pprint(client.TunnelNHEntryAdd(args[0],args[1],args[2],)) -+ -+elif cmd == 'TunnelNHEntryDelete': -+ if len(args) != 3: -+ print 'TunnelNHEntryDelete requires 3 args' -+ sys.exit(1) -+ pp.pprint(client.TunnelNHEntryDelete(args[0],args[1],args[2],)) -+ -+elif cmd == 'RouteEntryAdd': -+ if len(args) != 4: -+ print 'RouteEntryAdd requires 4 args' -+ sys.exit(1) -+ pp.pprint(client.RouteEntryAdd(args[0],args[1],args[2],args[3],)) -+ -+elif cmd == 'RouteEntryDelete': -+ if len(args) != 2: -+ print 'RouteEntryDelete requires 2 args' -+ sys.exit(1) -+ pp.pprint(client.RouteEntryDelete(args[0],args[1],)) -+ -+elif cmd == 'AddHostRoute': -+ if len(args) != 2: -+ print 'AddHostRoute requires 2 args' -+ sys.exit(1) -+ pp.pprint(client.AddHostRoute(args[0],args[1],)) -+ -+elif cmd == 'AddLocalVmRoute': -+ if len(args) != 4: -+ print 'AddLocalVmRoute requires 4 args' -+ sys.exit(1) -+ pp.pprint(client.AddLocalVmRoute(args[0],args[1],args[2],args[3],)) -+ -+elif cmd == 'AddRemoteVmRoute': -+ if len(args) != 4: -+ print 'AddRemoteVmRoute requires 4 args' -+ sys.exit(1) -+ pp.pprint(client.AddRemoteVmRoute(args[0],args[1],args[2],args[3],)) -+ -+elif cmd == 'CreateVrf': -+ if len(args) != 1: -+ print 'CreateVrf requires 1 args' -+ sys.exit(1) -+ pp.pprint(client.CreateVrf(args[0],)) -+ -+else: -+ print 'Unrecognized method %s' % cmd -+ sys.exit(1) -+ -+transport.close() -diff --git plugins/contrail/instance_service/InstanceService.py plugins/contrail/instance_service/InstanceService.py -new file mode 100644 -index 0000000..2219045 ---- /dev/null -+++ plugins/contrail/instance_service/InstanceService.py -@@ -0,0 +1,2275 @@ -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+from thrift.Thrift import TType, TMessageType, TException -+from ttypes import * -+from thrift.Thrift import TProcessor -+from thrift.transport import TTransport -+from thrift.protocol import TBinaryProtocol, TProtocol -+try: -+ from thrift.protocol import fastbinary -+except: -+ fastbinary = None -+ -+ -+class Iface: -+ def AddPort(self, port_list): -+ """ -+ Parameters: -+ - port_list -+ """ -+ pass -+ -+ def KeepAliveCheck(self, ): -+ pass -+ -+ def Connect(self, ): -+ pass -+ -+ def DeletePort(self, port_id): -+ """ -+ Parameters: -+ - port_id -+ """ -+ pass -+ -+ def TunnelNHEntryAdd(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ pass -+ -+ def TunnelNHEntryDelete(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ pass -+ -+ def RouteEntryAdd(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ pass -+ -+ def RouteEntryDelete(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ pass -+ -+ def AddHostRoute(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ pass -+ -+ def AddLocalVmRoute(self, ip_address, intf_uuid, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - intf_uuid -+ - vrf_name -+ - label -+ """ -+ pass -+ -+ def AddRemoteVmRoute(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ pass -+ -+ def CreateVrf(self, vrf_name): -+ """ -+ Parameters: -+ - vrf_name -+ """ -+ pass -+ -+ -+class Client(Iface): -+ def __init__(self, iprot, oprot=None): -+ self._iprot = self._oprot = iprot -+ if oprot is not None: -+ self._oprot = oprot -+ self._seqid = 0 -+ -+ def AddPort(self, port_list): -+ """ -+ Parameters: -+ - port_list -+ """ -+ self.send_AddPort(port_list) -+ return self.recv_AddPort() -+ -+ def send_AddPort(self, port_list): -+ self._oprot.writeMessageBegin('AddPort', TMessageType.CALL, self._seqid) -+ args = AddPort_args() -+ args.port_list = port_list -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddPort(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddPort_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddPort failed: unknown result"); -+ -+ def KeepAliveCheck(self, ): -+ self.send_KeepAliveCheck() -+ return self.recv_KeepAliveCheck() -+ -+ def send_KeepAliveCheck(self, ): -+ self._oprot.writeMessageBegin('KeepAliveCheck', TMessageType.CALL, self._seqid) -+ args = KeepAliveCheck_args() -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_KeepAliveCheck(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = KeepAliveCheck_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "KeepAliveCheck failed: unknown result"); -+ -+ def Connect(self, ): -+ self.send_Connect() -+ return self.recv_Connect() -+ -+ def send_Connect(self, ): -+ self._oprot.writeMessageBegin('Connect', TMessageType.CALL, self._seqid) -+ args = Connect_args() -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_Connect(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = Connect_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "Connect failed: unknown result"); -+ -+ def DeletePort(self, port_id): -+ """ -+ Parameters: -+ - port_id -+ """ -+ self.send_DeletePort(port_id) -+ return self.recv_DeletePort() -+ -+ def send_DeletePort(self, port_id): -+ self._oprot.writeMessageBegin('DeletePort', TMessageType.CALL, self._seqid) -+ args = DeletePort_args() -+ args.port_id = port_id -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_DeletePort(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = DeletePort_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "DeletePort failed: unknown result"); -+ -+ def TunnelNHEntryAdd(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ self.send_TunnelNHEntryAdd(src_ip, dst_ip, vrf_name) -+ return self.recv_TunnelNHEntryAdd() -+ -+ def send_TunnelNHEntryAdd(self, src_ip, dst_ip, vrf_name): -+ self._oprot.writeMessageBegin('TunnelNHEntryAdd', TMessageType.CALL, self._seqid) -+ args = TunnelNHEntryAdd_args() -+ args.src_ip = src_ip -+ args.dst_ip = dst_ip -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_TunnelNHEntryAdd(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = TunnelNHEntryAdd_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "TunnelNHEntryAdd failed: unknown result"); -+ -+ def TunnelNHEntryDelete(self, src_ip, dst_ip, vrf_name): -+ """ -+ Parameters: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ self.send_TunnelNHEntryDelete(src_ip, dst_ip, vrf_name) -+ return self.recv_TunnelNHEntryDelete() -+ -+ def send_TunnelNHEntryDelete(self, src_ip, dst_ip, vrf_name): -+ self._oprot.writeMessageBegin('TunnelNHEntryDelete', TMessageType.CALL, self._seqid) -+ args = TunnelNHEntryDelete_args() -+ args.src_ip = src_ip -+ args.dst_ip = dst_ip -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_TunnelNHEntryDelete(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = TunnelNHEntryDelete_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "TunnelNHEntryDelete failed: unknown result"); -+ -+ def RouteEntryAdd(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ self.send_RouteEntryAdd(ip_address, gw_ip, vrf_name, label) -+ return self.recv_RouteEntryAdd() -+ -+ def send_RouteEntryAdd(self, ip_address, gw_ip, vrf_name, label): -+ self._oprot.writeMessageBegin('RouteEntryAdd', TMessageType.CALL, self._seqid) -+ args = RouteEntryAdd_args() -+ args.ip_address = ip_address -+ args.gw_ip = gw_ip -+ args.vrf_name = vrf_name -+ args.label = label -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_RouteEntryAdd(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = RouteEntryAdd_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "RouteEntryAdd failed: unknown result"); -+ -+ def RouteEntryDelete(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ self.send_RouteEntryDelete(ip_address, vrf_name) -+ return self.recv_RouteEntryDelete() -+ -+ def send_RouteEntryDelete(self, ip_address, vrf_name): -+ self._oprot.writeMessageBegin('RouteEntryDelete', TMessageType.CALL, self._seqid) -+ args = RouteEntryDelete_args() -+ args.ip_address = ip_address -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_RouteEntryDelete(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = RouteEntryDelete_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "RouteEntryDelete failed: unknown result"); -+ -+ def AddHostRoute(self, ip_address, vrf_name): -+ """ -+ Parameters: -+ - ip_address -+ - vrf_name -+ """ -+ self.send_AddHostRoute(ip_address, vrf_name) -+ return self.recv_AddHostRoute() -+ -+ def send_AddHostRoute(self, ip_address, vrf_name): -+ self._oprot.writeMessageBegin('AddHostRoute', TMessageType.CALL, self._seqid) -+ args = AddHostRoute_args() -+ args.ip_address = ip_address -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddHostRoute(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddHostRoute_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddHostRoute failed: unknown result"); -+ -+ def AddLocalVmRoute(self, ip_address, intf_uuid, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - intf_uuid -+ - vrf_name -+ - label -+ """ -+ self.send_AddLocalVmRoute(ip_address, intf_uuid, vrf_name, label) -+ return self.recv_AddLocalVmRoute() -+ -+ def send_AddLocalVmRoute(self, ip_address, intf_uuid, vrf_name, label): -+ self._oprot.writeMessageBegin('AddLocalVmRoute', TMessageType.CALL, self._seqid) -+ args = AddLocalVmRoute_args() -+ args.ip_address = ip_address -+ args.intf_uuid = intf_uuid -+ args.vrf_name = vrf_name -+ args.label = label -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddLocalVmRoute(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddLocalVmRoute_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddLocalVmRoute failed: unknown result"); -+ -+ def AddRemoteVmRoute(self, ip_address, gw_ip, vrf_name, label): -+ """ -+ Parameters: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ self.send_AddRemoteVmRoute(ip_address, gw_ip, vrf_name, label) -+ return self.recv_AddRemoteVmRoute() -+ -+ def send_AddRemoteVmRoute(self, ip_address, gw_ip, vrf_name, label): -+ self._oprot.writeMessageBegin('AddRemoteVmRoute', TMessageType.CALL, self._seqid) -+ args = AddRemoteVmRoute_args() -+ args.ip_address = ip_address -+ args.gw_ip = gw_ip -+ args.vrf_name = vrf_name -+ args.label = label -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_AddRemoteVmRoute(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = AddRemoteVmRoute_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "AddRemoteVmRoute failed: unknown result"); -+ -+ def CreateVrf(self, vrf_name): -+ """ -+ Parameters: -+ - vrf_name -+ """ -+ self.send_CreateVrf(vrf_name) -+ return self.recv_CreateVrf() -+ -+ def send_CreateVrf(self, vrf_name): -+ self._oprot.writeMessageBegin('CreateVrf', TMessageType.CALL, self._seqid) -+ args = CreateVrf_args() -+ args.vrf_name = vrf_name -+ args.write(self._oprot) -+ self._oprot.writeMessageEnd() -+ self._oprot.trans.flush() -+ -+ def recv_CreateVrf(self, ): -+ (fname, mtype, rseqid) = self._iprot.readMessageBegin() -+ if mtype == TMessageType.EXCEPTION: -+ x = TApplicationException() -+ x.read(self._iprot) -+ self._iprot.readMessageEnd() -+ raise x -+ result = CreateVrf_result() -+ result.read(self._iprot) -+ self._iprot.readMessageEnd() -+ if result.success is not None: -+ return result.success -+ raise TApplicationException(TApplicationException.MISSING_RESULT, "CreateVrf failed: unknown result"); -+ -+ -+class Processor(Iface, TProcessor): -+ def __init__(self, handler): -+ self._handler = handler -+ self._processMap = {} -+ self._processMap["AddPort"] = Processor.process_AddPort -+ self._processMap["KeepAliveCheck"] = Processor.process_KeepAliveCheck -+ self._processMap["Connect"] = Processor.process_Connect -+ self._processMap["DeletePort"] = Processor.process_DeletePort -+ self._processMap["TunnelNHEntryAdd"] = Processor.process_TunnelNHEntryAdd -+ self._processMap["TunnelNHEntryDelete"] = Processor.process_TunnelNHEntryDelete -+ self._processMap["RouteEntryAdd"] = Processor.process_RouteEntryAdd -+ self._processMap["RouteEntryDelete"] = Processor.process_RouteEntryDelete -+ self._processMap["AddHostRoute"] = Processor.process_AddHostRoute -+ self._processMap["AddLocalVmRoute"] = Processor.process_AddLocalVmRoute -+ self._processMap["AddRemoteVmRoute"] = Processor.process_AddRemoteVmRoute -+ self._processMap["CreateVrf"] = Processor.process_CreateVrf -+ -+ def process(self, iprot, oprot): -+ (name, type, seqid) = iprot.readMessageBegin() -+ if name not in self._processMap: -+ iprot.skip(TType.STRUCT) -+ iprot.readMessageEnd() -+ x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) -+ oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) -+ x.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ return -+ else: -+ self._processMap[name](self, seqid, iprot, oprot) -+ return True -+ -+ def process_AddPort(self, seqid, iprot, oprot): -+ args = AddPort_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddPort_result() -+ result.success = self._handler.AddPort(args.port_list) -+ oprot.writeMessageBegin("AddPort", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_KeepAliveCheck(self, seqid, iprot, oprot): -+ args = KeepAliveCheck_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = KeepAliveCheck_result() -+ result.success = self._handler.KeepAliveCheck() -+ oprot.writeMessageBegin("KeepAliveCheck", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_Connect(self, seqid, iprot, oprot): -+ args = Connect_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = Connect_result() -+ result.success = self._handler.Connect() -+ oprot.writeMessageBegin("Connect", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_DeletePort(self, seqid, iprot, oprot): -+ args = DeletePort_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = DeletePort_result() -+ result.success = self._handler.DeletePort(args.port_id) -+ oprot.writeMessageBegin("DeletePort", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_TunnelNHEntryAdd(self, seqid, iprot, oprot): -+ args = TunnelNHEntryAdd_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = TunnelNHEntryAdd_result() -+ result.success = self._handler.TunnelNHEntryAdd(args.src_ip, args.dst_ip, args.vrf_name) -+ oprot.writeMessageBegin("TunnelNHEntryAdd", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_TunnelNHEntryDelete(self, seqid, iprot, oprot): -+ args = TunnelNHEntryDelete_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = TunnelNHEntryDelete_result() -+ result.success = self._handler.TunnelNHEntryDelete(args.src_ip, args.dst_ip, args.vrf_name) -+ oprot.writeMessageBegin("TunnelNHEntryDelete", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_RouteEntryAdd(self, seqid, iprot, oprot): -+ args = RouteEntryAdd_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = RouteEntryAdd_result() -+ result.success = self._handler.RouteEntryAdd(args.ip_address, args.gw_ip, args.vrf_name, args.label) -+ oprot.writeMessageBegin("RouteEntryAdd", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_RouteEntryDelete(self, seqid, iprot, oprot): -+ args = RouteEntryDelete_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = RouteEntryDelete_result() -+ result.success = self._handler.RouteEntryDelete(args.ip_address, args.vrf_name) -+ oprot.writeMessageBegin("RouteEntryDelete", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_AddHostRoute(self, seqid, iprot, oprot): -+ args = AddHostRoute_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddHostRoute_result() -+ result.success = self._handler.AddHostRoute(args.ip_address, args.vrf_name) -+ oprot.writeMessageBegin("AddHostRoute", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_AddLocalVmRoute(self, seqid, iprot, oprot): -+ args = AddLocalVmRoute_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddLocalVmRoute_result() -+ result.success = self._handler.AddLocalVmRoute(args.ip_address, args.intf_uuid, args.vrf_name, args.label) -+ oprot.writeMessageBegin("AddLocalVmRoute", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_AddRemoteVmRoute(self, seqid, iprot, oprot): -+ args = AddRemoteVmRoute_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = AddRemoteVmRoute_result() -+ result.success = self._handler.AddRemoteVmRoute(args.ip_address, args.gw_ip, args.vrf_name, args.label) -+ oprot.writeMessageBegin("AddRemoteVmRoute", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ def process_CreateVrf(self, seqid, iprot, oprot): -+ args = CreateVrf_args() -+ args.read(iprot) -+ iprot.readMessageEnd() -+ result = CreateVrf_result() -+ result.success = self._handler.CreateVrf(args.vrf_name) -+ oprot.writeMessageBegin("CreateVrf", TMessageType.REPLY, seqid) -+ result.write(oprot) -+ oprot.writeMessageEnd() -+ oprot.trans.flush() -+ -+ -+# HELPER FUNCTIONS AND STRUCTURES -+ -+class AddPort_args: -+ """ -+ Attributes: -+ - port_list -+ """ -+ -+ thrift_spec = None -+ def __init__(self, port_list=None,): -+ self.port_list = port_list -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == -1: -+ if ftype == TType.LIST: -+ self.port_list = [] -+ (_etype24, _size21) = iprot.readListBegin() -+ for _i25 in xrange(_size21): -+ _elem26 = Port() -+ _elem26.read(iprot) -+ self.port_list.append(_elem26) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddPort_args') -+ if self.port_list is not None: -+ oprot.writeFieldBegin('port_list', TType.LIST, -1) -+ oprot.writeListBegin(TType.STRUCT, len(self.port_list)) -+ for iter27 in self.port_list: -+ iter27.write(oprot) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddPort_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddPort_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class KeepAliveCheck_args: -+ -+ thrift_spec = ( -+ ) -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('KeepAliveCheck_args') -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class KeepAliveCheck_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('KeepAliveCheck_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class Connect_args: -+ -+ thrift_spec = ( -+ ) -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('Connect_args') -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class Connect_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('Connect_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class DeletePort_args: -+ """ -+ Attributes: -+ - port_id -+ """ -+ -+ thrift_spec = None -+ def __init__(self, port_id=None,): -+ self.port_id = port_id -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == -1: -+ if ftype == TType.LIST: -+ self.port_id = [] -+ (_etype31, _size28) = iprot.readListBegin() -+ for _i32 in xrange(_size28): -+ _elem33 = iprot.readI16(); -+ self.port_id.append(_elem33) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('DeletePort_args') -+ if self.port_id is not None: -+ oprot.writeFieldBegin('port_id', TType.LIST, -1) -+ oprot.writeListBegin(TType.I16, len(self.port_id)) -+ for iter34 in self.port_id: -+ oprot.writeI16(iter34) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class DeletePort_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('DeletePort_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryAdd_args: -+ """ -+ Attributes: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'src_ip', None, None, ), # 1 -+ (2, TType.STRING, 'dst_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ ) -+ -+ def __init__(self, src_ip=None, dst_ip=None, vrf_name=None,): -+ self.src_ip = src_ip -+ self.dst_ip = dst_ip -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.src_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.dst_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryAdd_args') -+ if self.src_ip is not None: -+ oprot.writeFieldBegin('src_ip', TType.STRING, 1) -+ oprot.writeString(self.src_ip) -+ oprot.writeFieldEnd() -+ if self.dst_ip is not None: -+ oprot.writeFieldBegin('dst_ip', TType.STRING, 2) -+ oprot.writeString(self.dst_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.src_ip is None: -+ raise TProtocol.TProtocolException(message='Required field src_ip is unset!') -+ if self.dst_ip is None: -+ raise TProtocol.TProtocolException(message='Required field dst_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryAdd_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryAdd_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryDelete_args: -+ """ -+ Attributes: -+ - src_ip -+ - dst_ip -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'src_ip', None, None, ), # 1 -+ (2, TType.STRING, 'dst_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ ) -+ -+ def __init__(self, src_ip=None, dst_ip=None, vrf_name=None,): -+ self.src_ip = src_ip -+ self.dst_ip = dst_ip -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.src_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.dst_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryDelete_args') -+ if self.src_ip is not None: -+ oprot.writeFieldBegin('src_ip', TType.STRING, 1) -+ oprot.writeString(self.src_ip) -+ oprot.writeFieldEnd() -+ if self.dst_ip is not None: -+ oprot.writeFieldBegin('dst_ip', TType.STRING, 2) -+ oprot.writeString(self.dst_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.src_ip is None: -+ raise TProtocol.TProtocolException(message='Required field src_ip is unset!') -+ if self.dst_ip is None: -+ raise TProtocol.TProtocolException(message='Required field dst_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class TunnelNHEntryDelete_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('TunnelNHEntryDelete_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryAdd_args: -+ """ -+ Attributes: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'gw_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ (4, TType.STRING, 'label', None, None, ), # 4 -+ ) -+ -+ def __init__(self, ip_address=None, gw_ip=None, vrf_name=None, label=None,): -+ self.ip_address = ip_address -+ self.gw_ip = gw_ip -+ self.vrf_name = vrf_name -+ self.label = label -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.gw_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.label = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryAdd_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.gw_ip is not None: -+ oprot.writeFieldBegin('gw_ip', TType.STRING, 2) -+ oprot.writeString(self.gw_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ if self.label is not None: -+ oprot.writeFieldBegin('label', TType.STRING, 4) -+ oprot.writeString(self.label) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.gw_ip is None: -+ raise TProtocol.TProtocolException(message='Required field gw_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryAdd_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryAdd_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryDelete_args: -+ """ -+ Attributes: -+ - ip_address -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'vrf_name', None, None, ), # 2 -+ ) -+ -+ def __init__(self, ip_address=None, vrf_name=None,): -+ self.ip_address = ip_address -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryDelete_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 2) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.vrf_name is None: -+ raise TProtocol.TProtocolException(message='Required field vrf_name is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class RouteEntryDelete_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('RouteEntryDelete_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddHostRoute_args: -+ """ -+ Attributes: -+ - ip_address -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'vrf_name', None, None, ), # 2 -+ ) -+ -+ def __init__(self, ip_address=None, vrf_name=None,): -+ self.ip_address = ip_address -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddHostRoute_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 2) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddHostRoute_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddHostRoute_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddLocalVmRoute_args: -+ """ -+ Attributes: -+ - ip_address -+ - intf_uuid -+ - vrf_name -+ - label -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'intf_uuid', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ (4, TType.STRING, 'label', None, None, ), # 4 -+ ) -+ -+ def __init__(self, ip_address=None, intf_uuid=None, vrf_name=None, label=None,): -+ self.ip_address = ip_address -+ self.intf_uuid = intf_uuid -+ self.vrf_name = vrf_name -+ self.label = label -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.intf_uuid = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.label = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddLocalVmRoute_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.intf_uuid is not None: -+ oprot.writeFieldBegin('intf_uuid', TType.STRING, 2) -+ oprot.writeString(self.intf_uuid) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ if self.label is not None: -+ oprot.writeFieldBegin('label', TType.STRING, 4) -+ oprot.writeString(self.label) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.intf_uuid is None: -+ raise TProtocol.TProtocolException(message='Required field intf_uuid is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddLocalVmRoute_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddLocalVmRoute_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddRemoteVmRoute_args: -+ """ -+ Attributes: -+ - ip_address -+ - gw_ip -+ - vrf_name -+ - label -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'ip_address', None, None, ), # 1 -+ (2, TType.STRING, 'gw_ip', None, None, ), # 2 -+ (3, TType.STRING, 'vrf_name', None, None, ), # 3 -+ (4, TType.STRING, 'label', None, None, ), # 4 -+ ) -+ -+ def __init__(self, ip_address=None, gw_ip=None, vrf_name=None, label=None,): -+ self.ip_address = ip_address -+ self.gw_ip = gw_ip -+ self.vrf_name = vrf_name -+ self.label = label -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.STRING: -+ self.gw_ip = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.label = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddRemoteVmRoute_args') -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 1) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.gw_ip is not None: -+ oprot.writeFieldBegin('gw_ip', TType.STRING, 2) -+ oprot.writeString(self.gw_ip) -+ oprot.writeFieldEnd() -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 3) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ if self.label is not None: -+ oprot.writeFieldBegin('label', TType.STRING, 4) -+ oprot.writeString(self.label) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.gw_ip is None: -+ raise TProtocol.TProtocolException(message='Required field gw_ip is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class AddRemoteVmRoute_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('AddRemoteVmRoute_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class CreateVrf_args: -+ """ -+ Attributes: -+ - vrf_name -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.STRING, 'vrf_name', None, None, ), # 1 -+ ) -+ -+ def __init__(self, vrf_name=None,): -+ self.vrf_name = vrf_name -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.STRING: -+ self.vrf_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('CreateVrf_args') -+ if self.vrf_name is not None: -+ oprot.writeFieldBegin('vrf_name', TType.STRING, 1) -+ oprot.writeString(self.vrf_name) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.vrf_name is None: -+ raise TProtocol.TProtocolException(message='Required field vrf_name is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -+ -+class CreateVrf_result: -+ """ -+ Attributes: -+ - success -+ """ -+ -+ thrift_spec = ( -+ (0, TType.BOOL, 'success', None, None, ), # 0 -+ ) -+ -+ def __init__(self, success=None,): -+ self.success = success -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 0: -+ if ftype == TType.BOOL: -+ self.success = iprot.readBool(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('CreateVrf_result') -+ if self.success is not None: -+ oprot.writeFieldBegin('success', TType.BOOL, 0) -+ oprot.writeBool(self.success) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) -diff --git plugins/contrail/instance_service/__init__.py plugins/contrail/instance_service/__init__.py -new file mode 100644 -index 0000000..f34ead4 ---- /dev/null -+++ plugins/contrail/instance_service/__init__.py -@@ -0,0 +1 @@ -+__all__ = ['ttypes', 'constants', 'InstanceService'] -diff --git plugins/contrail/instance_service/constants.py plugins/contrail/instance_service/constants.py -new file mode 100644 -index 0000000..73f07fe ---- /dev/null -+++ plugins/contrail/instance_service/constants.py -@@ -0,0 +1,11 @@ -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+from thrift.Thrift import TType, TMessageType, TException -+from ttypes import * -+ -diff --git plugins/contrail/instance_service/ttypes.py plugins/contrail/instance_service/ttypes.py -new file mode 100644 -index 0000000..564f82e ---- /dev/null -+++ plugins/contrail/instance_service/ttypes.py -@@ -0,0 +1,210 @@ -+# -+# Autogenerated by Thrift Compiler (0.8.0) -+# -+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -+# -+# options string: py -+# -+ -+from thrift.Thrift import TType, TMessageType, TException -+ -+from thrift.transport import TTransport -+from thrift.protocol import TBinaryProtocol, TProtocol -+try: -+ from thrift.protocol import fastbinary -+except: -+ fastbinary = None -+ -+ -+ -+class Port: -+ """ -+ Attributes: -+ - port_id -+ - instance_id -+ - tap_name -+ - ip_address -+ - vn_id -+ - mac_address -+ - display_name -+ - hostname -+ - host -+ """ -+ -+ thrift_spec = ( -+ None, # 0 -+ (1, TType.LIST, 'port_id', (TType.I16,None), None, ), # 1 -+ (2, TType.LIST, 'instance_id', (TType.I16,None), None, ), # 2 -+ (3, TType.STRING, 'tap_name', None, None, ), # 3 -+ (4, TType.STRING, 'ip_address', None, None, ), # 4 -+ (5, TType.LIST, 'vn_id', (TType.I16,None), None, ), # 5 -+ (6, TType.STRING, 'mac_address', None, None, ), # 6 -+ (7, TType.STRING, 'display_name', None, None, ), # 7 -+ (8, TType.STRING, 'hostname', None, None, ), # 8 -+ (9, TType.STRING, 'host', None, None, ), # 9 -+ ) -+ -+ def __init__(self, port_id=None, instance_id=None, tap_name=None, ip_address=None, vn_id=None, mac_address=None, display_name=None, hostname=None, host=None,): -+ self.port_id = port_id -+ self.instance_id = instance_id -+ self.tap_name = tap_name -+ self.ip_address = ip_address -+ self.vn_id = vn_id -+ self.mac_address = mac_address -+ self.display_name = display_name -+ self.hostname = hostname -+ self.host = host -+ -+ def read(self, iprot): -+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: -+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) -+ return -+ iprot.readStructBegin() -+ while True: -+ (fname, ftype, fid) = iprot.readFieldBegin() -+ if ftype == TType.STOP: -+ break -+ if fid == 1: -+ if ftype == TType.LIST: -+ self.port_id = [] -+ (_etype3, _size0) = iprot.readListBegin() -+ for _i4 in xrange(_size0): -+ _elem5 = iprot.readI16(); -+ self.port_id.append(_elem5) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ elif fid == 2: -+ if ftype == TType.LIST: -+ self.instance_id = [] -+ (_etype9, _size6) = iprot.readListBegin() -+ for _i10 in xrange(_size6): -+ _elem11 = iprot.readI16(); -+ self.instance_id.append(_elem11) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ elif fid == 3: -+ if ftype == TType.STRING: -+ self.tap_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 4: -+ if ftype == TType.STRING: -+ self.ip_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 5: -+ if ftype == TType.LIST: -+ self.vn_id = [] -+ (_etype15, _size12) = iprot.readListBegin() -+ for _i16 in xrange(_size12): -+ _elem17 = iprot.readI16(); -+ self.vn_id.append(_elem17) -+ iprot.readListEnd() -+ else: -+ iprot.skip(ftype) -+ elif fid == 6: -+ if ftype == TType.STRING: -+ self.mac_address = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 7: -+ if ftype == TType.STRING: -+ self.display_name = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 8: -+ if ftype == TType.STRING: -+ self.hostname = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ elif fid == 9: -+ if ftype == TType.STRING: -+ self.host = iprot.readString(); -+ else: -+ iprot.skip(ftype) -+ else: -+ iprot.skip(ftype) -+ iprot.readFieldEnd() -+ iprot.readStructEnd() -+ -+ def write(self, oprot): -+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: -+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) -+ return -+ oprot.writeStructBegin('Port') -+ if self.port_id is not None: -+ oprot.writeFieldBegin('port_id', TType.LIST, 1) -+ oprot.writeListBegin(TType.I16, len(self.port_id)) -+ for iter18 in self.port_id: -+ oprot.writeI16(iter18) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ if self.instance_id is not None: -+ oprot.writeFieldBegin('instance_id', TType.LIST, 2) -+ oprot.writeListBegin(TType.I16, len(self.instance_id)) -+ for iter19 in self.instance_id: -+ oprot.writeI16(iter19) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ if self.tap_name is not None: -+ oprot.writeFieldBegin('tap_name', TType.STRING, 3) -+ oprot.writeString(self.tap_name) -+ oprot.writeFieldEnd() -+ if self.ip_address is not None: -+ oprot.writeFieldBegin('ip_address', TType.STRING, 4) -+ oprot.writeString(self.ip_address) -+ oprot.writeFieldEnd() -+ if self.vn_id is not None: -+ oprot.writeFieldBegin('vn_id', TType.LIST, 5) -+ oprot.writeListBegin(TType.I16, len(self.vn_id)) -+ for iter20 in self.vn_id: -+ oprot.writeI16(iter20) -+ oprot.writeListEnd() -+ oprot.writeFieldEnd() -+ if self.mac_address is not None: -+ oprot.writeFieldBegin('mac_address', TType.STRING, 6) -+ oprot.writeString(self.mac_address) -+ oprot.writeFieldEnd() -+ if self.display_name is not None: -+ oprot.writeFieldBegin('display_name', TType.STRING, 7) -+ oprot.writeString(self.display_name) -+ oprot.writeFieldEnd() -+ if self.hostname is not None: -+ oprot.writeFieldBegin('hostname', TType.STRING, 8) -+ oprot.writeString(self.hostname) -+ oprot.writeFieldEnd() -+ if self.host is not None: -+ oprot.writeFieldBegin('host', TType.STRING, 9) -+ oprot.writeString(self.host) -+ oprot.writeFieldEnd() -+ oprot.writeFieldStop() -+ oprot.writeStructEnd() -+ -+ def validate(self): -+ if self.port_id is None: -+ raise TProtocol.TProtocolException(message='Required field port_id is unset!') -+ if self.instance_id is None: -+ raise TProtocol.TProtocolException(message='Required field instance_id is unset!') -+ if self.tap_name is None: -+ raise TProtocol.TProtocolException(message='Required field tap_name is unset!') -+ if self.ip_address is None: -+ raise TProtocol.TProtocolException(message='Required field ip_address is unset!') -+ if self.vn_id is None: -+ raise TProtocol.TProtocolException(message='Required field vn_id is unset!') -+ if self.mac_address is None: -+ raise TProtocol.TProtocolException(message='Required field mac_address is unset!') -+ return -+ -+ -+ def __repr__(self): -+ L = ['%s=%r' % (key, value) -+ for key, value in self.__dict__.iteritems()] -+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) -+ -+ def __eq__(self, other): -+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ -+ -+ def __ne__(self, other): -+ return not (self == other) diff --git a/lib/neutron_plugins/contrail b/lib/neutron_plugins/contrail index 8c97520e4b..ac51d06599 100644 --- a/lib/neutron_plugins/contrail +++ b/lib/neutron_plugins/contrail @@ -18,10 +18,11 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/juniper/contrail Q_PLUGIN_CONF_FILENAME=ContrailPlugin.ini Q_DB_NAME="contrail_neutron" - Q_PLUGIN_CLASS="neutron.plugins.juniper.contrail.contrailplugin.ContrailPlugin" + Q_PLUGIN_CLASS="neutron_plugin_contrail.plugins.opencontrail.contrailplugin.ContrailPlugin" } function neutron_plugin_configure_service() { + iniset $NEUTRON_CONF DEFAULT api_extensions_path $DEST/contrail/openstack/neutron_plugin/neutron_plugin_contrail/extensions/ iniset $NEUTRON_CONF quotas quota_driver neutron.quota.ConfDriver } diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index fbe4e87474..ddcbec3c94 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -102,9 +102,8 @@ function install_contrail() { # install VIF driver sudo pip install $CONTRAIL_SRC/build/noarch/nova_contrail_vif/dist/nova_contrail_vif*.tar.gz - - # install neutron patch after VNC api is built and installed - # test_install_neutron_patch + # install Neutron OpenContrail plugin + sudo pip install -e $CONTRAIL_SRC/openstack/neutron_plugin/ # get cassandra if ! which cassandra > /dev/null 2>&1 ; then @@ -235,18 +234,6 @@ function apply_patch() { fi } -function test_install_neutron_patch() { - apply_patch $TOP_DIR/contrail/neutron_v4.patch $DEST/neutron -} - -function test_install_nova_patch() { - apply_patch $TOP_DIR/contrail/nova_v4.patch $DEST/nova - if [ -e $DEST/nova/plugins/contrail/config_parser.py ]; then - sudo cp $DEST/nova/plugins/contrail/config_parser.py /usr/bin/config_parser - sudo chmod +x /usr/bin/config_parser - fi -} - # take over physical interface function insert_vrouter() { source /etc/contrail/agent_param From c16de55de5cfaa38020e5cfbbb789fd482e89ec9 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Thu, 5 Jun 2014 21:15:45 +0000 Subject: [PATCH 05/25] Update neutron config based on modular plugin --- contrail/contrail_config_templates.py | 1 + lib/neutron_plugins/contrail | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contrail/contrail_config_templates.py b/contrail/contrail_config_templates.py index 845833a262..031091d4f5 100644 --- a/contrail/contrail_config_templates.py +++ b/contrail/contrail_config_templates.py @@ -40,6 +40,7 @@ api_server_ip = $__contrail_api_server_ip__ api_server_port = $__contrail_api_server_port__ multi_tenancy = $__contrail_multi_tenancy__ +contrail_extensions = ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc [KEYSTONE] ;auth_url = http://$__contrail_keystone_ip__:35357/v2.0 diff --git a/lib/neutron_plugins/contrail b/lib/neutron_plugins/contrail index ac51d06599..774e39e355 100644 --- a/lib/neutron_plugins/contrail +++ b/lib/neutron_plugins/contrail @@ -18,7 +18,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/juniper/contrail Q_PLUGIN_CONF_FILENAME=ContrailPlugin.ini Q_DB_NAME="contrail_neutron" - Q_PLUGIN_CLASS="neutron_plugin_contrail.plugins.opencontrail.contrailplugin.ContrailPlugin" + Q_PLUGIN_CLASS="neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_core.NeutronPluginContrailCoreV2" } function neutron_plugin_configure_service() { From 32313078eea0070e71cd4e535200fd0575cffcfd Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Thu, 5 Jun 2014 22:03:50 +0000 Subject: [PATCH 06/25] VIF create now needs xconnect option --- lib/neutron_thirdparty/contrail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index fe54c325b0..9a72a421c7 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -255,7 +255,7 @@ function insert_vrouter() { || echo "Error creating interface: $DEVICE" echo "Adding $DEVICE to vrouter" - sudo $VIF --add $DEVICE --mac $DEV_MAC --vrf 0 --mode x --type vhost \ + sudo $VIF --add $DEVICE --mac $DEV_MAC --vrf 0 --xconnect $dev --mode x --type vhost \ || echo "Error adding $DEVICE to vrouter" echo "Adding $dev to vrouter" From 05144ea07bfa1d47eedceb886e07b454f48e506b Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Sat, 7 Jun 2014 18:32:08 +0000 Subject: [PATCH 07/25] Add knob to skip sync/build --- contrail/localrc-single | 3 +++ lib/neutron_thirdparty/contrail | 9 ++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/contrail/localrc-single b/contrail/localrc-single index 8384f9c7e3..d427886a6d 100644 --- a/contrail/localrc-single +++ b/contrail/localrc-single @@ -57,3 +57,6 @@ NOVA_VIF_DRIVER=nova_contrail_vif.contrailvif.VRouterVIFDriver CASS_MAX_HEAP_SIZE=1G CASS_HEAP_NEWSIZE=200M + +# set CONTRAIL_REPO_SKIP_SYNC to avoid sync and build +# CONTRAIL_REPO_SKIP_SYNC=yes diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index 9a72a421c7..90cb29dbe2 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -86,14 +86,13 @@ function install_contrail() { fi fi - # If CONTRAIL_REPO_SETUP_SKIP is not set, sync the repo. - if [ -z $CONTRAIL_REPO_SYNC_SKIP ]; then + # If CONTRAIL_REPO_SKIP_SYNC is not set, sync the repo. + if [ -z $CONTRAIL_REPO_SKIP_SYNC ]; then repo sync python third_party/fetch_packages.py + (cd third_party/thrift-*; touch configure.ac README ChangeLog; autoreconf --force --install) + scons fi - - (cd third_party/thrift-*; touch configure.ac README ChangeLog; autoreconf --force --install) - scons cd ${contrail_cwd} # install contrail modules From c17d96fc05a75796069886e31d8659d9f460fa9d Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Mon, 9 Jun 2014 16:57:19 +0000 Subject: [PATCH 08/25] Support using frozen contrail bits with devstack --- contrail/localrc-single | 3 +++ lib/neutron_thirdparty/contrail | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/contrail/localrc-single b/contrail/localrc-single index d427886a6d..16ccfd1cf9 100644 --- a/contrail/localrc-single +++ b/contrail/localrc-single @@ -60,3 +60,6 @@ CASS_HEAP_NEWSIZE=200M # set CONTRAIL_REPO_SKIP_SYNC to avoid sync and build # CONTRAIL_REPO_SKIP_SYNC=yes + +# uncomment to use specific contrail bits instead of t-o-t +# CONTRAIL_REPO=devstack.xml diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index c70086daee..bb30609473 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -70,6 +70,7 @@ function install_contrail() { sudo pip install kazoo pyinotify sudo pip install bottle + CONTRAIL_REPO=${CONTRAIL_REPO:-default.xml} CONTRAIL_REPO_PROTO=${CONTRAIL_REPO_PROTO:-ssh} CONTRAIL_SRC=${CONTRAIL_SRC:-/opt/stack/contrail} mkdir -p $CONTRAIL_SRC/third_party @@ -79,9 +80,9 @@ function install_contrail() { git config --global --get user.name || git config --global user.name "Anonymous" git config --global --get user.email || git config --global user.email "anonymous@nowhere.com" if [ "$CONTRAIL_REPO_PROTO" == "ssh" ]; then - repo init -u git@github.com:Juniper/contrail-vnc + repo init -u git@github.com:Juniper/contrail-vnc -m $CONTRAIL_REPO else - repo init -u https://github.com/Juniper/contrail-vnc + repo init -u https://github.com/Juniper/contrail-vnc -m $CONTRAIL_REPO sed -i 's/fetch=".."/fetch=\"https:\/\/github.com\/Juniper\/\"/' .repo/manifest.xml fi fi From 2cc1185b2974d0ac11574d08d81ad54967260295 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Mon, 9 Jun 2014 22:28:04 +0000 Subject: [PATCH 09/25] fix error in creating ifmap user + syntax error --- contrail/setup_contrail.py | 9 ++------- lib/neutron_thirdparty/contrail | 2 +- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/contrail/setup_contrail.py b/contrail/setup_contrail.py index 888b9fa982..efd2a70f02 100644 --- a/contrail/setup_contrail.py +++ b/contrail/setup_contrail.py @@ -706,15 +706,10 @@ def fixup_config_files(self): self.run_shell("echo 'api-server:api-server' >> %s/basicauthusers.properties" % dir) self.run_shell("echo 'schema-transformer:schema-transformer' >> %s/basicauthusers.properties" % dir) self.run_shell("echo 'svc-monitor:svc-monitor' >> %s/basicauthusers.properties" % dir) - self.run_shell("sudo sed -e '/%s:/d' -e '/%s.dns:/d' %s/%s | sudo tee %s/%s.new > /dev/null" \ - %(control_ip, control_ip, dir, 'basicauthusers.properties', - dir, 'basicauthusers.properties')) - self.run_shell("echo '%s:%s' >> %s/%s.new" \ + self.run_shell("echo '%s:%s' >> %s/%s" \ %(control_ip, control_ip, dir, 'basicauthusers.properties')) - self.run_shell("echo '%s.dns:%s.dns' >> %s/%s.new" \ + self.run_shell("echo '%s.dns:%s.dns' >> %s/%s" \ %(control_ip, control_ip, dir, 'basicauthusers.properties')) - self.run_shell("sudo mv %s/%s.new %s/%s" \ - % (dir, 'basicauthusers.properties', dir, 'basicauthusers.properties')) self.run_shell("echo '%s=%s--0000000001-1' >> %s/%s" \ %(control_ip, control_ip, dir, 'publisher.properties')) if self._args.puppet_server: diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index bb30609473..ae0456f91e 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -194,7 +194,7 @@ function install_contrail() { cd ${contrail_cwd} fi if ! which node > /dev/null 2>&1 ; then - if [ ! -f node-v0.8.15.tar.gz]; then + if [ ! -f node-v0.8.15.tar.gz ]; then wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz fi tar -xf node-v0.8.15.tar.gz From 0bbbb4425afcd3b71cd50a3a6f2fd9ce89ae5479 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Tue, 10 Jun 2014 02:09:09 +0000 Subject: [PATCH 10/25] Avoid downloading node-v0.8.15 again --- lib/neutron_thirdparty/contrail | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index ae0456f91e..f99c901275 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -194,16 +194,18 @@ function install_contrail() { cd ${contrail_cwd} fi if ! which node > /dev/null 2>&1 ; then - if [ ! -f node-v0.8.15.tar.gz ]; then + contrail_cwd=$(pwd) + cd $CONTRAIL_SRC/third_party + if [ ! -d $CONTRAIL_SRC/third_party/node-v0.8.15 ]; then wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz + tar -xf node-v0.8.15.tar.gz fi - tar -xf node-v0.8.15.tar.gz - contrail_cwd=$(pwd) cd node-v0.8.15 ./configure; make; sudo make install cd ${contrail_cwd} rm -rf node-v0.8.15.tar.gz rm -rf node-v0.8.15 + cd ${contrail_cwd} fi if [ ! -d $CONTRAIL_SRC/contrail-web-core/node_modules ]; then contrail_cwd=$(pwd) From c66bb72f54576abd70547261839d984b6fd8c1f7 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Tue, 10 Jun 2014 20:14:32 +0000 Subject: [PATCH 11/25] Add dependenvy for npm -- some packeges via fetch_packages.py need it --- lib/neutron_thirdparty/contrail | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index f99c901275..e6a9c60c63 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -35,6 +35,7 @@ function install_contrail() { apt_get install libevent-dev libxml2-dev libxslt-dev apt_get install uml-utilities apt_get install libvirt-bin + apt_get install npm if ! which redis-server > /dev/null 2>&1 ; then sudo apt-get install libjemalloc1 @@ -52,6 +53,7 @@ function install_contrail() { sudo yum -y install tunctl sudo yum -y install java-1.7.0-openjdk sudo yum -y install libvirt-bin + sudo yum -y install npm if ! which redis-server > /dev/null 2>&1 ; then wget http://mir01.syntis.net/atomic/fedora/17/x86_64/RPMS/redis-2.6.13-3.fc17.art.x86_64.rpm From 41a7ff539ab9abae0f5891ff42d42fd87659b280 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Wed, 11 Jun 2014 16:32:00 +0000 Subject: [PATCH 12/25] Fix npm build --- lib/neutron_thirdparty/contrail | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index e6a9c60c63..b2a2db6500 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -35,7 +35,6 @@ function install_contrail() { apt_get install libevent-dev libxml2-dev libxslt-dev apt_get install uml-utilities apt_get install libvirt-bin - apt_get install npm if ! which redis-server > /dev/null 2>&1 ; then sudo apt-get install libjemalloc1 @@ -53,7 +52,6 @@ function install_contrail() { sudo yum -y install tunctl sudo yum -y install java-1.7.0-openjdk sudo yum -y install libvirt-bin - sudo yum -y install npm if ! which redis-server > /dev/null 2>&1 ; then wget http://mir01.syntis.net/atomic/fedora/17/x86_64/RPMS/redis-2.6.13-3.fc17.art.x86_64.rpm @@ -62,6 +60,18 @@ function install_contrail() { fi fi + # install node which brings npm that's used in fetch_packages.py + if ! which node > /dev/null 2>&1 || ! which npm > /dev/null 2>&1 ; then + wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz + tar -xf node-v0.8.15.tar.gz + contrail_cwd=$(pwd) + cd node-v0.8.15 + ./configure; make; sudo make install + cd ${contrail_cwd} + rm -rf node-v0.8.15.tar.gz + rm -rf node-v0.8.15 + fi + # api server requirements # sudo pip install gevent==0.13.8 geventhttpclient==1.0a thrift==0.8.0 # sudo easy_install -U distribute @@ -195,20 +205,6 @@ function install_contrail() { sudo python setup.py install cd ${contrail_cwd} fi - if ! which node > /dev/null 2>&1 ; then - contrail_cwd=$(pwd) - cd $CONTRAIL_SRC/third_party - if [ ! -d $CONTRAIL_SRC/third_party/node-v0.8.15 ]; then - wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz - tar -xf node-v0.8.15.tar.gz - fi - cd node-v0.8.15 - ./configure; make; sudo make install - cd ${contrail_cwd} - rm -rf node-v0.8.15.tar.gz - rm -rf node-v0.8.15 - cd ${contrail_cwd} - fi if [ ! -d $CONTRAIL_SRC/contrail-web-core/node_modules ]; then contrail_cwd=$(pwd) cd $CONTRAIL_SRC/contrail-web-core From 0a37c2ac67046fc22a29bf09455c19c39d7c5df1 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Thu, 12 Jun 2014 17:09:49 +0000 Subject: [PATCH 13/25] Add cassandra list for cassandra based discovery --- contrail/contrail_config_templates.py | 1 + contrail/setup_contrail.py | 1 + 2 files changed, 2 insertions(+) diff --git a/contrail/contrail_config_templates.py b/contrail/contrail_config_templates.py index 031091d4f5..9566782467 100644 --- a/contrail/contrail_config_templates.py +++ b/contrail/contrail_config_templates.py @@ -146,6 +146,7 @@ listen_port=$__contrail_listen_port__ log_local=$__contrail_log_local__ log_file=$__contrail_log_file__ +cassandra_server_list=$__contrail_cassandra_server_list__ # minimim time to allow client to cache service information (seconds) ttl_min=300 diff --git a/contrail/setup_contrail.py b/contrail/setup_contrail.py index efd2a70f02..753acc1f8c 100644 --- a/contrail/setup_contrail.py +++ b/contrail/setup_contrail.py @@ -628,6 +628,7 @@ def fixup_config_files(self): '__contrail_listen_port__': '5998', '__contrail_log_local__': 'True', '__contrail_log_file__': '/var/log/contrail/discovery.log', + '__contrail_cassandra_server_list__' : ' '.join('%s:%s' % cassandra_server for cassandra_server in cassandra_server_list), } self._template_substitute_write(discovery_conf_template, template_vals, temp_dir_name + '/discovery.conf') From 66e8c2531c36412e63f8b188a779255bcf537d8c Mon Sep 17 00:00:00 2001 From: Anonymous Date: Thu, 12 Jun 2014 21:10:33 +0000 Subject: [PATCH 14/25] change the path to core path and feature path --- lib/neutron_thirdparty/contrail | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index b2a2db6500..47c22c1224 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -207,7 +207,11 @@ function install_contrail() { fi if [ ! -d $CONTRAIL_SRC/contrail-web-core/node_modules ]; then contrail_cwd=$(pwd) - cd $CONTRAIL_SRC/contrail-web-core + cd $CONTRAIL_SRC + sed -ie "s/config\.discoveryService\.enable.*$/config\.discoveryService\.enable = false;/" contrail-web-core/config/config.global.js + sed -ie "s/config\.featurePkg\.webController\.path.*$/config\.featurePkg\.webController\.path = '\/opt\/stack\/contrail\/contrail-web-controller';/" contrail-web-core/config/config.global.js + sed -ie "s/config\.core_path.*$/config\.core_path = '\/opt\/stack\/contrail\/contrail-web-core';/" contrail-web-controller/webroot/common/js/controller.config.global.js + cd contrail-web-core make fetch-pkgs-prod make dev-env REPO=webController fi From ba75acc0735cb3a73f287de8690c8b0d0b907e3b Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Tue, 17 Jun 2014 14:11:36 -0700 Subject: [PATCH 15/25] Enable security groups for neutron --- lib/neutron_plugins/contrail | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/contrail b/lib/neutron_plugins/contrail index 774e39e355..f84749b6d5 100644 --- a/lib/neutron_plugins/contrail +++ b/lib/neutron_plugins/contrail @@ -36,8 +36,8 @@ function is_neutron_ovs_base_plugin() { } function has_neutron_plugin_security_group() { - # False - return 1 + # True + return 0 } function neutron_plugin_check_adv_test_requirements() { From 57093cf6793c5287a35f1d81e878b565a37c2f6d Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Tue, 17 Jun 2014 16:02:18 -0700 Subject: [PATCH 16/25] Add support for neutron plugin in CI environment --- contrail/localrc-ci | 70 ++++++++++++++++++++++++++++++++++++ lib/neutron_plugins/contrail | 6 ++-- 2 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 contrail/localrc-ci diff --git a/contrail/localrc-ci b/contrail/localrc-ci new file mode 100644 index 0000000000..8c2353ed09 --- /dev/null +++ b/contrail/localrc-ci @@ -0,0 +1,70 @@ +STACK_DIR=$(cd $(dirname $0) && pwd) + +SCREEN_LOGDIR=$STACK_DIR/log/screens +LOG=True +DEBUG=True +LOGFILE=$STACK_DIR/log/stack.log +LOGDAYS=1 + +# contrail services +enable_service cass +enable_service ifmap +enable_service apiSrv +enable_service schema +enable_service svc-mon +enable_service zk +enable_service control +enable_service agent +enable_service disco +enable_service redis +enable_service contrail +enable_service redis-u +enable_service redis-q +enable_service vizd +enable_service opserver +enable_service qed +enable_service redis-w +enable_service ui-jobs +enable_service ui-webs + +disable_service n-net +enable_service q-svc +enable_service q-meta +enable_service neutron + +# not used by contrail +disable_service q-agt +disable_service q-dhcp +disable_service q-l3 + +DATABASE_PASSWORD=contrail123 +RABBIT_PASSWORD=contrail123 +SERVICE_TOKEN=contrail123 +SERVICE_PASSWORD=contrail123 +ADMIN_PASSWORD=contrail123 + +Q_PLUGIN=contrail +PHYSICAL_INTERFACE=eth0 + +# repo proto is https or (default) ssh. Leave commented for ssh +# CONTRAIL_REPO_PROTO=https + +# proto for openstack bits. Use HTTPS if git is firewalled +GIT_BASE=https://git.openstack.org + +# use contrail VIF driver with NOVA +NOVA_VIF_DRIVER=nova_contrail_vif.contrailvif.VRouterVIFDriver + +CASS_MAX_HEAP_SIZE=1G +CASS_HEAP_NEWSIZE=200M + +# set CONTRAIL_REPO_SKIP_SYNC to avoid sync and build +# CONTRAIL_REPO_SKIP_SYNC=yes + +# uncomment to use specific contrail bits instead of t-o-t +CONTRAIL_REPO=devstack.xml + +# Set these to use neutron repo in openstack +Q_PLUGIN_CLASS=neutron.plugins.opencontrail.contrail_plugin_core.NeutronPluginContrailCoreV2 +Q_PLUGIN_CONF_PATH=etc/neutron/plugins/opencontrail +Q_PLUGIN_CONF_FILENAME=contrailplugin.ini diff --git a/lib/neutron_plugins/contrail b/lib/neutron_plugins/contrail index f84749b6d5..1810f4b99e 100644 --- a/lib/neutron_plugins/contrail +++ b/lib/neutron_plugins/contrail @@ -15,10 +15,10 @@ function neutron_plugin_setup_interface_driver() { } function neutron_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/juniper/contrail - Q_PLUGIN_CONF_FILENAME=ContrailPlugin.ini + Q_PLUGIN_CONF_PATH=${Q_PLUGIN_CONF_PATH:-etc/neutron/plugins/juniper/contrail} + Q_PLUGIN_CONF_FILENAME=${Q_PLUGIN_CONF_FILENAME:-ContrailPlugin.ini} Q_DB_NAME="contrail_neutron" - Q_PLUGIN_CLASS="neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_core.NeutronPluginContrailCoreV2" + Q_PLUGIN_CLASS=${Q_PLUGIN_CLASS:-"neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_core.NeutronPluginContrailCoreV2"} } function neutron_plugin_configure_service() { From 226ca88d607678b8173adddfdeec61cb613bb3fe Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Tue, 17 Jun 2014 20:10:08 -0700 Subject: [PATCH 17/25] put HOST_IP= --- contrail/localrc-ci | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrail/localrc-ci b/contrail/localrc-ci index 8c2353ed09..ee697f9efd 100644 --- a/contrail/localrc-ci +++ b/contrail/localrc-ci @@ -1,3 +1,5 @@ +HOST_IP= + STACK_DIR=$(cd $(dirname $0) && pwd) SCREEN_LOGDIR=$STACK_DIR/log/screens From 4e29a7834d8e0e742c7a2a33d956454389412370 Mon Sep 17 00:00:00 2001 From: Anonymous Date: Fri, 11 Jul 2014 07:48:35 -0700 Subject: [PATCH 18/25] devstack changes for webui and analytics redis changes --- contrail/setup_contrail.py | 25 ++----------------------- lib/neutron_thirdparty/contrail | 30 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 38 deletions(-) diff --git a/contrail/setup_contrail.py b/contrail/setup_contrail.py index 753acc1f8c..fac84cd35b 100644 --- a/contrail/setup_contrail.py +++ b/contrail/setup_contrail.py @@ -473,27 +473,6 @@ def fixup_config_files(self): # collector in Phase 2 if 'collector' in self._args.role: - REDIS_UVE="/etc/contrail/redis-uve.conf" - REDIS_QUERY="/etc/contrail/redis-query.conf" - if os.path.isfile('/etc/redis/redis.conf'): - REDIS_CONF="/etc/redis/redis.conf" - else: - REDIS_CONF="/etc/redis.conf" - self.run_shell("cp %s %s" %(REDIS_CONF, REDIS_UVE)) - self.run_shell("cp %s %s" %(REDIS_CONF, REDIS_QUERY)) - - self.replace_in_file(REDIS_UVE, 'pidfile /var/run/redis/redis.pid', 'pidfile /var/run/redis/redis-uve.pid') - self.replace_in_file(REDIS_UVE, 'port 6379', 'port 6381') - self.replace_in_file(REDIS_UVE, 'bind 127.0.0.1', '#bind 127.0.0.1') - self.replace_in_file(REDIS_UVE, 'logfile /var/log/redis/redis-server.log', 'logfile /var/log/redis/redis-uve.log') - self.replace_in_file(REDIS_UVE, 'dbfilename dump.rdb', 'dbfilename dump-uve.rdb') - - self.replace_in_file(REDIS_QUERY, 'pidfile /var/run/redis/redis.pid', 'pidfile /var/run/redis/redis-query.pid') - self.replace_in_file(REDIS_QUERY, 'port 6379', 'port 6380') - self.replace_in_file(REDIS_QUERY, 'bind 127.0.0.1', '#bind 127.0.0.1') - self.replace_in_file(REDIS_QUERY, 'logfile /var/log/redis/redis-server.log', 'logfile /var/log/redis/redis-query.log') - self.replace_in_file(REDIS_QUERY, 'dbfilename dump.rdb', 'dbfilename dump-query.rdb') - template_vals = {'__contrail_discovery_ip__': self._args.discovery_ip, '__contrail_host_ip__': self._args.collector_ip, '__contrail_cassandra_server_list__' : ' '.join('%s:%s' % cassandra_server for cassandra_server in cassandra_server_list), @@ -510,8 +489,8 @@ def fixup_config_files(self): '__contrail_log_local__': '--log-local', '__contrail_log_file__': '--log-file=/var/log/contrail/qe.log', '__contrail_collectors__' : ' '.join('%s:%s' % collector_server for collector_server in collector_server_list), - '__contrail_redis_server__' : collector_ip, - '__contrail_redis_server_port__' : 6380, + '__contrail_redis_server__' : '127.0.0.1', + '__contrail_redis_server_port__' : 6379, } self._template_substitute_write(qe_param_template, template_vals, temp_dir_name + '/qed_param') diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index 47c22c1224..0eaa960cf1 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -62,14 +62,20 @@ function install_contrail() { # install node which brings npm that's used in fetch_packages.py if ! which node > /dev/null 2>&1 || ! which npm > /dev/null 2>&1 ; then - wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz - tar -xf node-v0.8.15.tar.gz - contrail_cwd=$(pwd) - cd node-v0.8.15 - ./configure; make; sudo make install - cd ${contrail_cwd} - rm -rf node-v0.8.15.tar.gz - rm -rf node-v0.8.15 + if is_ubuntu; then + wget https://launchpad.net/~opencontrail/+archive/ubuntu/ppa/+files/nodejs_0.8.15-1contrail1_amd64.deb + sudo dpkg -i nodejs_0.8.15-1contrail1_amd64.deb + rm -rf nodejs_0.8.15-1contrail1_amd64.deb + else + wget http://nodejs.org/dist/v0.8.15/node-v0.8.15.tar.gz -O node-v0.8.15.tar.gz + tar -xf node-v0.8.15.tar.gz + contrail_cwd=$(pwd) + cd node-v0.8.15 + ./configure; make; sudo make install + cd ${contrail_cwd} + rm -rf node-v0.8.15.tar.gz + rm -rf node-v0.8.15 + fi fi # api server requirements @@ -208,6 +214,7 @@ function install_contrail() { if [ ! -d $CONTRAIL_SRC/contrail-web-core/node_modules ]; then contrail_cwd=$(pwd) cd $CONTRAIL_SRC + python contrail-webui-third-party/fetch_packages.py sed -ie "s/config\.discoveryService\.enable.*$/config\.discoveryService\.enable = false;/" contrail-web-core/config/config.global.js sed -ie "s/config\.featurePkg\.webController\.path.*$/config\.featurePkg\.webController\.path = '\/opt\/stack\/contrail\/contrail-web-controller';/" contrail-web-core/config/config.global.js sed -ie "s/config\.core_path.*$/config\.core_path = '\/opt\/stack\/contrail\/contrail-web-core';/" contrail-web-controller/webroot/common/js/controller.config.global.js @@ -370,13 +377,6 @@ function start_contrail() { screen_it control "sudo PATH=$PATH:$TOP_DIR/bin LD_LIBRARY_PATH=/opt/stack/contrail/build/lib $CONTRAIL_SRC/build/debug/control-node/control-node --IFMAP.server_url https://${IFMAP_SERVER}:${IFMAP_PORT} --IFMAP.user ${IFMAP_USER} --IFMAP.password ${IFMAP_PASWD} --DEFAULT.hostname ${HOSTNAME} --DEFAULT.hostip ${HOSTIP} --DEFAULT.bgp_port ${BGP_PORT} ${CERT_OPTS} ${LOG_LOCAL} --DEFAULT.collectors ${COLLECTOR}:${COLLECTOR_PORT}" # collector services - # redis-uve - screen_it redis-u "sudo redis-server /etc/contrail/redis-uve.conf" - sleep 2 - # redis-query - screen_it redis-q "sudo redis-server /etc/contrail/redis-query.conf" - sleep 2 - # collector/vizd source /etc/contrail/vizd_param screen_it vizd "sudo PATH=$PATH:$TOP_DIR/bin LD_LIBRARY_PATH=/opt/stack/contrail/build/lib $CONTRAIL_SRC/build/debug/analytics/vizd --DEFAULT.cassandra_server_list ${CASSANDRA_SERVER_LIST} --DEFAULT.hostip ${HOST_IP} --DEFAULT.log_file /var/log/contrail/collector.log" From e58742c9a3e91df6f2e72acf649d41969b86fc5f Mon Sep 17 00:00:00 2001 From: Sylvain Afchain Date: Tue, 15 Jul 2014 15:11:50 +0000 Subject: [PATCH 19/25] Fix the name of the agent executable --- lib/neutron_thirdparty/contrail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index 47c22c1224..8cd1894302 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -432,7 +432,7 @@ EOF2 cat > $TOP_DIR/bin/vnsw.hlpr < Date: Mon, 21 Jul 2014 16:32:49 -0700 Subject: [PATCH 20/25] sometimes webui does not start up --- lib/neutron_thirdparty/contrail | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index 0eaa960cf1..6a945caf3e 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -211,17 +211,17 @@ function install_contrail() { sudo python setup.py install cd ${contrail_cwd} fi - if [ ! -d $CONTRAIL_SRC/contrail-web-core/node_modules ]; then - contrail_cwd=$(pwd) - cd $CONTRAIL_SRC - python contrail-webui-third-party/fetch_packages.py - sed -ie "s/config\.discoveryService\.enable.*$/config\.discoveryService\.enable = false;/" contrail-web-core/config/config.global.js - sed -ie "s/config\.featurePkg\.webController\.path.*$/config\.featurePkg\.webController\.path = '\/opt\/stack\/contrail\/contrail-web-controller';/" contrail-web-core/config/config.global.js - sed -ie "s/config\.core_path.*$/config\.core_path = '\/opt\/stack\/contrail\/contrail-web-core';/" contrail-web-controller/webroot/common/js/controller.config.global.js - cd contrail-web-core - make fetch-pkgs-prod - make dev-env REPO=webController - fi + + contrail_cwd=$(pwd) + cd $CONTRAIL_SRC + python contrail-webui-third-party/fetch_packages.py + sed -ie "s/config\.discoveryService\.enable.*$/config\.discoveryService\.enable = false;/" contrail-web-core/config/config.global.js + sed -ie "s/config\.featurePkg\.webController\.path.*$/config\.featurePkg\.webController\.path = '\/opt\/stack\/contrail\/contrail-web-controller';/" contrail-web-core/config/config.global.js + sed -ie "s/config\.core_path.*$/config\.core_path = '\/opt\/stack\/contrail\/contrail-web-core';/" contrail-web-controller/webroot/common/js/controller.config.global.js + cd contrail-web-core + make fetch-pkgs-prod + make dev-env REPO=webController + cd ${contrail_cwd} } function apply_patch() { From bd3a38e803edb799178ed3a8244b01d07e0ef19f Mon Sep 17 00:00:00 2001 From: anandhk-juniper Date: Tue, 22 Jul 2014 12:17:39 +0530 Subject: [PATCH 21/25] - Arguments to vif command changed (the 'mode' argument is not supported anymore) --- lib/neutron_thirdparty/contrail | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index 6a945caf3e..5214b72564 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -267,13 +267,14 @@ function insert_vrouter() { sudo $VIF --create $DEVICE --mac $DEV_MAC \ || echo "Error creating interface: $DEVICE" + echo "Adding $dev to vrouter" + sudo $VIF --add $dev --mac $DEV_MAC --vrf 0 --vhost-phys --type physical \ + || echo "Error adding $dev to vrouter" + echo "Adding $DEVICE to vrouter" - sudo $VIF --add $DEVICE --mac $DEV_MAC --vrf 0 --xconnect $dev --mode x --type vhost \ + sudo $VIF --add $DEVICE --mac $DEV_MAC --vrf 0 --xconnect $dev --type vhost \ || echo "Error adding $DEVICE to vrouter" - echo "Adding $dev to vrouter" - sudo $VIF --add $dev --mac $DEV_MAC --vrf 0 --mode x --type physical \ - || echo "Error adding $dev to vrouter" if is_ubuntu; then From 4cfa2e75b909d36d09b83e5a082e5719d2105853 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Wed, 23 Jul 2014 18:29:42 +0000 Subject: [PATCH 22/25] Fix libvirt section; time to sync from openstack-dev/devstack --- lib/neutron | 2 +- lib/nova_plugins/hypervisor-libvirt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 81db2a74d1..7770fcfcca 100644 --- a/lib/neutron +++ b/lib/neutron @@ -301,7 +301,7 @@ function create_nova_conf_neutron() { # set NOVA_VIF_DRIVER and optionally set options in nova_conf neutron_plugin_create_nova_conf - iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" + iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..1f62eab721 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -105,8 +105,8 @@ EOF" # libvirt to detect those changes. restart_service $LIBVIRT_DAEMON - iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" - iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF libvirt virt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} From 83ede5e99d1ff1e344e649d2d73d7439280dd2d0 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Thu, 24 Jul 2014 01:57:38 +0000 Subject: [PATCH 23/25] update localrc for ci to use its own manifest --- contrail/localrc-ci | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrail/localrc-ci b/contrail/localrc-ci index ee697f9efd..c0b0298d75 100644 --- a/contrail/localrc-ci +++ b/contrail/localrc-ci @@ -64,7 +64,7 @@ CASS_HEAP_NEWSIZE=200M # CONTRAIL_REPO_SKIP_SYNC=yes # uncomment to use specific contrail bits instead of t-o-t -CONTRAIL_REPO=devstack.xml +CONTRAIL_REPO=devstack-ci.xml # Set these to use neutron repo in openstack Q_PLUGIN_CLASS=neutron.plugins.opencontrail.contrail_plugin_core.NeutronPluginContrailCoreV2 From dc1860c3d96f16c93e26f651a9d42306072a1bb8 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Fri, 25 Jul 2014 17:37:40 +0000 Subject: [PATCH 24/25] Install kernel headers (needed for vrouter) --- lib/neutron_thirdparty/contrail | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/neutron_thirdparty/contrail b/lib/neutron_thirdparty/contrail index 75cd8e44e1..96c5c905c1 100644 --- a/lib/neutron_thirdparty/contrail +++ b/lib/neutron_thirdparty/contrail @@ -35,6 +35,7 @@ function install_contrail() { apt_get install libevent-dev libxml2-dev libxslt-dev apt_get install uml-utilities apt_get install libvirt-bin + apt_get install linux-headers-$(uname -r) if ! which redis-server > /dev/null 2>&1 ; then sudo apt-get install libjemalloc1 @@ -52,6 +53,7 @@ function install_contrail() { sudo yum -y install tunctl sudo yum -y install java-1.7.0-openjdk sudo yum -y install libvirt-bin + sudo yum -y install kernel-headers if ! which redis-server > /dev/null 2>&1 ; then wget http://mir01.syntis.net/atomic/fedora/17/x86_64/RPMS/redis-2.6.13-3.fc17.art.x86_64.rpm From b46bdfcc4e80d3469b4e264f408c4134ab4429c4 Mon Sep 17 00:00:00 2001 From: Deepinder Setia Date: Wed, 3 Sep 2014 15:59:14 -0700 Subject: [PATCH 25/25] deprecated in favor of juniper/contrail-installer --- README.md | 355 +------------ stack.sh | 1441 ---------------------------------------------------- unstack.sh | 185 ------- 3 files changed, 3 insertions(+), 1978 deletions(-) delete mode 100755 stack.sh delete mode 100755 unstack.sh diff --git a/README.md b/README.md index 9914b1ed69..1c6f8122fa 100644 --- a/README.md +++ b/README.md @@ -1,354 +1,5 @@ -DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. +THIS REPOSITORY IS DEPRECATED +============================= -# Goals +This repository is deprecated in favor of http://github.com/juniper/contrail-installer -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora environment -* To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) -* To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once -* To make it easy to prototype cross-project features -* To provide an environment for the OpenStack CI testing on every commit to the projects - -Read more at http://devstack.org. - -IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you -execute before you run them, as they install software and will alter your -networking configuration. We strongly recommend that you run `stack.sh` -in a clean and disposable vm when you are first getting started. - -# Versions - -The DevStack master branch generally points to trunk versions of OpenStack -components. For older, stable versions, look for branches named -stable/[release] in the DevStack repo. For example, you can do the -following to create a grizzly OpenStack cloud: - - git checkout stable/grizzly - ./stack.sh - -You can also pick specific OpenStack project releases by setting the appropriate -`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in -`stackrc` for the default set). Usually just before a release there will be -milestone-proposed branches that need to be tested:: - - GLANCE_REPO=git://git.openstack.org/openstack/glance.git - GLANCE_BRANCH=milestone-proposed - -# Start A Dev Cloud - -Installing in a dedicated disposable VM is safer than installing on your -dev machine! Plus you can pick one of the supported Linux distros for -your VM. To start a dev cloud run the following NOT AS ROOT (see -**DevStack Execution Environment** below for more on user accounts): - - ./stack.sh - -When the script finishes executing, you should be able to access OpenStack endpoints, like so: - -* Horizon: http://myhost/ -* Keystone: http://myhost:5000/v2.0/ - -We also provide an environment file that you can use to interact with your cloud via CLI: - - # source openrc file to load your environment with OpenStack CLI creds - . openrc - # list instances - nova list - -If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: - - # source eucarc to generate EC2 credentials and set up the environment - . eucarc - # list instances using ec2 api - euca-describe-instances - -# DevStack Execution Environment - -DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe for disappointment, or worse. Alas, we're all in the virtualization business here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... - -``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo`` -for all of those tasks. However, it needs to be not-root for most of its -work and for all of the OpenStack services. ``stack.sh`` specifically -does not run if started as root. - -This is a recent change (Oct 2013) from the previous behaviour of -automatically creating a ``stack`` user. Automatically creating -user accounts is not the right response to running as root, so -that bit is now an explicit step using ``tools/create-stack-user.sh``. -Run that (as root!) or just check it out to see what DevStack's -expectations are for the account it runs under. Many people simply -use their usual login (the default 'ubuntu' login on a UEC image -for example). - -# Customizing - -You can override environment variables used in `stack.sh` by creating file -name `local.conf` with a ``localrc`` section as shown below. It is likely -that you will need to do this to tweak your networking configuration should -you need to access your cloud from a different host. - - [[local|localrc]] - VARIABLE=value - -See the **Local Configuration** section below for more details. - -# Database Backend - -Multiple database backends are available. The available databases are defined in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the -following in the `localrc` section: - - disable_service mysql - enable_service postgresql - -`mysql` is the default database. - -# RPC Backend - -Multiple RPC backends are available. Currently, this -includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of -choice may be selected via the `localrc` section. - -Note that selecting more than one RPC backend will result in a failure. - -Example (ZeroMQ): - - ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq" - -Example (Qpid): - - ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" - -# Apache Frontend - -Apache web server is enabled for wsgi services by setting -`APACHE_ENABLED_SERVICES` in your ``localrc`` section. Remember to -enable these services at first as above. - - APACHE_ENABLED_SERVICES+=keystone,swift - -# Swift - -Swift is disabled by default. When enabled, it is configured with -only one replica to avoid being IO/memory intensive on a small -vm. When running with only one replica the account, container and -object services will run directly in screen. The others services like -replicator, updaters or auditor runs in background. - -If you would like to enable Swift you can add this to your `localrc` section: - - enable_service s-proxy s-object s-container s-account - -If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc` section: - - disable_all_services - enable_service key mysql s-proxy s-object s-container s-account - -If you only want to do some testing of a real normal swift cluster -with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` section (usually to 3). - -# Swift S3 - -If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will -install the swift3 middleware emulation. Swift will be configured to -act as a S3 endpoint for Keystone so effectively replacing the -`nova-objectstore`. - -Only Swift proxy server is launched in the screen session all other -services are started in background and managed by `swift-init` tool. - -# Neutron - -Basic Setup - -In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` section: - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-l3 - enable_service q-meta - enable_service q-metering - enable_service neutron - # Optional, to enable tempest configuration as part of DevStack - enable_service tempest - -Then run `stack.sh` as normal. - -DevStack supports setting specific Neutron configuration flags to the -service, Open vSwitch plugin and LinuxBridge plugin configuration files. -To make use of this feature, the following variables are defined and can -be configured in your `localrc` section: - - Variable Name Config File Section Modified - ------------------------------------------------------------------------------------- - Q_SRV_EXTRA_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_AGENT_EXTRA_AGENT_OPTS Plugin AGENT - Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT - -An example of using the variables in your `localrc` section is below: - - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) - Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) - -DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin -can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A -simple way to configure the ml2 plugin is shown below: - - # VLAN configuration - Q_PLUGIN=ml2 - ENABLE_TENANT_VLANS=True - - # GRE tunnel configuration - Q_PLUGIN=ml2 - ENABLE_TENANT_TUNNELS=True - - # VXLAN tunnel configuration - Q_PLUGIN=ml2 - Q_ML2_TENANT_NETWORK_TYPE=vxlan - -The above will default in DevStack to using the OVS on each compute host. -To change this, set the `Q_AGENT` variable to the agent you want to run -(e.g. linuxbridge). - - Variable Name Notes - ------------------------------------------------------------------------------------- - Q_AGENT This specifies which agent to run with the ML2 Plugin (either `openvswitch` or `linuxbridge`). - Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default is none. Note, ML2 will work with the OVS and LinuxBridge agents by default. - Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to all available TypeDrivers. - Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. - Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. - Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. - Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. - -# Heat - -Heat is disabled by default. To enable it you'll need the following settings -in your `localrc` section: - - enable_service heat h-api h-api-cfn h-api-cw h-eng - -Heat can also run in standalone mode, and be configured to orchestrate -on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` section: - - disable_all_services - enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng - HEAT_STANDALONE=True - KEYSTONE_SERVICE_HOST=... - KEYSTONE_AUTH_HOST=... - -# Tempest - -If tempest has been successfully configured, a basic set of smoke tests can be run as follows: - - $ cd /opt/stack/tempest - $ nosetests tempest/scenario/test_network_basic_ops.py - -# DevStack on Xenserver - -If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. - -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. - -# Additional Projects - -DevStack has a hook mechanism to call out to a dispatch script at specific -points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This -allows upper-layer projects, especially those that the lower layer projects -have no dependency on, to be added to DevStack without modifying the core -scripts. Tempest is built this way as an example of how to structure the -dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` -for more information. - -# Multi-Node Setup - -A more interesting setup involves running multiple compute nodes, with Neutron networks connecting VMs on different compute nodes. -You should run at least one "controller node", which should have a `stackrc` that includes at least: - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-l3 - enable_service q-meta - enable_service neutron - -You likely want to change your `localrc` section to run a scheduler that -will balance VMs across hosts: - - SCHEDULER=nova.scheduler.simple.SimpleScheduler - -You can then run many compute nodes, each of which should have a `stackrc` which includes the following, with the IP address of the above controller node: - - ENABLED_SERVICES=n-cpu,rabbit,g-api,neutron,q-agt - SERVICE_HOST=[IP of controller node] - MYSQL_HOST=$SERVICE_HOST - RABBIT_HOST=$SERVICE_HOST - Q_HOST=$SERVICE_HOST - MATCHMAKER_REDIS_HOST=$SERVICE_HOST - -# Cells - -Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells. - -To setup a cells environment add the following to your `localrc` section: - - enable_service n-cell - -Be aware that there are some features currently missing in cells, one notable one being security groups. The exercises have been patched to disable functionality not supported by cells. - - -# Local Configuration - -Historically DevStack has used ``localrc`` to contain all local configuration and customizations. More and more of the configuration variables available for DevStack are passed-through to the individual project configuration files. The old mechanism for this required specific code for each file and did not scale well. This is handled now by a master local configuration file. - -# local.conf - -The new config file ``local.conf`` is an extended-INI format that introduces a new meta-section header that provides some additional information such as a phase name and destination config filename: - - [[ | ]] - -where ```` is one of a set of phase names defined by ``stack.sh`` -and ```` is the configuration filename. The filename is -eval'ed in the ``stack.sh`` context so all environment variables are -available and may be used. Using the project config file variables in -the header is strongly suggested (see the ``NOVA_CONF`` example below). -If the path of the config file does not exist it is skipped. - -The defined phases are: - -* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -* **post-config** - runs after the layer 2 services are configured and before they are started -* **extra** - runs after services are started and before any files in ``extra.d`` are executed -* **post-extra** - runs after files in ``extra.d`` are executed - -The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. - - [[post-config|$NOVA_CONF]] - [DEFAULT] - use_syslog = True - - [osapi_v3] - enabled = False - -A specific meta-section ``local|localrc`` is used to provide a default -``localrc`` file (actually ``.localrc.auto``). This allows all custom -settings for DevStack to be contained in a single file. If ``localrc`` -exists it will be used instead to preserve backward-compatibility. - - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - ADMIN_PASSWORD=speciale - LOGFILE=$DEST/logs/stack.sh.log - -Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT* -start with a ``/`` (slash) character. A slash will need to be added: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/stack.sh b/stack.sh deleted file mode 100755 index 45d47c819c..0000000000 --- a/stack.sh +++ /dev/null @@ -1,1441 +0,0 @@ -#!/usr/bin/env bash - -# ``stack.sh`` is an opinionated OpenStack developer installation. It -# installs and configures various combinations of **Ceilometer**, **Cinder**, -# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, -# and **Swift** - -# This script allows you to specify configuration options of what git -# repositories to use, enabled services, network configuration and various -# passwords. If you are crafty you can run the script on multiple nodes using -# shared settings for common resources (mysql, rabbitmq) and build a multi-node -# developer install. - -# To keep this script simple we assume you are running on a recent **Ubuntu** -# (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work -# on other platforms but support for those platforms is left to those who added -# them to DevStack.) It should work in a VM or physical server. Additionally -# we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration -# files in this repo. - -# Learn more and get the most recent version at http://devstack.org - -# Make sure custom grep options don't get in the way -unset GREP_OPTIONS - -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. -unset LANG -unset LANGUAGE -LC_ALL=C -export LC_ALL - -# Keep track of the devstack directory -TOP_DIR=$(cd $(dirname "$0") && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import config functions -source $TOP_DIR/lib/config - -# Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` -# and ``DISTRO`` -GetDistro - - -# Global Settings -# =============== - -# Check for a ``localrc`` section embedded in ``local.conf`` and extract if -# ``localrc`` does not already exist - -# Phase: local -rm -f $TOP_DIR/.localrc.auto -if [[ -r $TOP_DIR/local.conf ]]; then - LRC=$(get_meta_section_files $TOP_DIR/local.conf local) - for lfile in $LRC; do - if [[ "$lfile" == "localrc" ]]; then - if [[ -r $TOP_DIR/localrc ]]; then - warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" - else - echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto - get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto - fi - fi - done -fi - -# ``stack.sh`` is customizable by setting environment variables. Override a -# default setting via export:: -# -# export DATABASE_PASSWORD=anothersecret -# ./stack.sh -# -# or by setting the variable on the command line:: -# -# DATABASE_PASSWORD=simple ./stack.sh -# -# Persistent variables can be placed in a ``localrc`` file:: -# -# DATABASE_PASSWORD=anothersecret -# DATABASE_USER=hellaroot -# -# We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. ``localrc`` is not distributed with DevStack and will never -# be overwritten by a DevStack update. -# -# DevStack distributes ``stackrc`` which contains locations for the OpenStack -# repositories, branches to configure, and other configuration defaults. -# ``stackrc`` sources ``localrc`` to allow you to safely override those settings. - -if [[ ! -r $TOP_DIR/stackrc ]]; then - log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" -fi -source $TOP_DIR/stackrc - - -# Local Settings -# -------------- - -# Make sure the proxy config is visible to sub-processes -export_proxy_variables - -# Destination path for installation ``DEST`` -DEST=${DEST:-/opt/stack} - - -# Sanity Check -# ------------ - -# Clean up last environment var cache -if [[ -r $TOP_DIR/.stackenv ]]; then - rm $TOP_DIR/.stackenv -fi - -# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config -# templates and other useful files in the ``files`` subdirectory -FILES=$TOP_DIR/files -if [ ! -d $FILES ]; then - log_error $LINENO "missing devstack/files" -fi - -# ``stack.sh`` keeps function libraries here -# Make sure ``$TOP_DIR/lib`` directory is present -if [ ! -d $TOP_DIR/lib ]; then - log_error $LINENO "missing devstack/lib" -fi - -# Import common services (database, message queue) configuration -source $TOP_DIR/lib/database -source $TOP_DIR/lib/rpc_backend - -# Remove services which were negated in ENABLED_SERVICES -# using the "-" prefix (e.g., "-rabbit") instead of -# calling disable_service(). -disable_negated_services - -# Warn users who aren't on an explicitly supported distro, but allow them to -# override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then - echo "WARNING: this script has not been tested on $DISTRO" - if [[ "$FORCE" != "yes" ]]; then - die $LINENO "If you wish to run this script anyway run with FORCE=yes" - fi -fi - -# Make sure we only have one rpc backend enabled, -# and the specified rpc backend is available on your platform. -check_rpc_backend - -# Check to see if we are already running DevStack -# Note that this may fail if USE_SCREEN=False -if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then - echo "You are already running a stack.sh session." - echo "To rejoin this session type 'screen -x stack'." - echo "To destroy this session, type './unstack.sh'." - exit 1 -fi - -# Set up logging level -VERBOSE=$(trueorfalse True $VERBOSE) - - -# Additional repos -# ================ - -# Some distros need to add repos beyond the defaults provided by the vendor -# to pick up required packages. - -# The Debian Wheezy official repositories do not contain all required packages, -# add gplhost repository. -if [[ "$os_VENDOR" =~ (Debian) ]]; then - echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list - echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list - apt_get update - apt_get install --force-yes gplhost-archive-keyring -fi - -if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. - RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"} - RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"} - if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then - echo "RDO repo not detected; installing" - yum_install $RHEL6_RDO_REPO_RPM || \ - die $LINENO "Error installing RDO repo, cannot continue" - fi - - # RHEL6 requires EPEL for many Open Stack dependencies - RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} - if ! yum repolist enabled epel | grep -q 'epel'; then - echo "EPEL not detected; installing" - yum_install ${RHEL6_EPEL_RPM} || \ - die $LINENO "Error installing EPEL repo, cannot continue" - fi -fi - - -# root Access -# ----------- - -# OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). -# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of -# action to create a suitable user account. - -if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "Cut it out." - echo "Really." - echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" - echo "$TOP_DIR/tools/create-stack-user.sh" - exit 1 -fi - -# We're not **root**, make sure ``sudo`` is available -is_package_installed sudo || install_package sudo - -# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one -sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - -# Set up devstack sudoers -TEMPFILE=`mktemp` -echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH -echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE -chmod 0440 $TEMPFILE -sudo chown root:root $TEMPFILE -sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - - -# Create the destination directory and ensure it is writable by the user -# and read/executable by everybody for daemons (e.g. apache run for horizon) -sudo mkdir -p $DEST -safe_chown -R $STACK_USER $DEST -safe_chmod 0755 $DEST - -# a basic test for $DEST path permissions (fatal on error unless skipped) -check_path_perm_sanity ${DEST} - -# Certain services such as rabbitmq require that the local hostname resolves -# correctly. Make sure it exists in /etc/hosts so that is always true. -LOCAL_HOSTNAME=`hostname -s` -if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then - sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts -fi - -# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without -# Internet access. ``stack.sh`` must have been previously run with Internet -# access to install prerequisites and fetch repositories. -OFFLINE=`trueorfalse False $OFFLINE` - -# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if -# the destination git repository does not exist during the ``git_clone`` -# operation. -ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` - -# Whether to enable the debug log level in OpenStack services -ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` - -# Destination path for service data -DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR - - -# Common Configuration -# ==================== - -# Set fixed and floating range here so we can make sure not to use addresses -# from either range when attempting to guess the IP to use for the host. -# Note that setting FIXED_RANGE may be necessary when running DevStack -# in an OpenStack cloud that uses either of these address ranges internally. -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} - -HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") -if [ "$HOST_IP" == "" ]; then - die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" -fi - -# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. -SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} - -# Allow the use of an alternate protocol (such as https) for service endpoints -SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} - -# Configure services to use syslog instead of writing to individual log files -SYSLOG=`trueorfalse False $SYSLOG` -SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} -SYSLOG_PORT=${SYSLOG_PORT:-516} - -# Enable sysstat logging -SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} -SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} - -PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} -PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} - -# Use color for logging output (only available if syslog is not used) -LOG_COLOR=`trueorfalse True $LOG_COLOR` - -# Service startup timeout -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} - -# Reset the bundle of CA certificates -SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" -rm -f $SSL_BUNDLE_FILE - - -# Configure Projects -# ================== - -# Import apache functions -source $TOP_DIR/lib/apache - -# Import TLS functions -source $TOP_DIR/lib/tls - -# Source project function libraries -source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo -source $TOP_DIR/lib/stackforge -source $TOP_DIR/lib/horizon -source $TOP_DIR/lib/keystone -source $TOP_DIR/lib/glance -source $TOP_DIR/lib/nova -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/heat -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic - -# Extras Source -# -------------- - -# Phase: source -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i source - done -fi - -# Set the destination directories for other OpenStack projects -OPENSTACKCLIENT_DIR=$DEST/python-openstackclient - -# Interactive Configuration -# ------------------------- - -# Do all interactive config up front before the logging spew begins - -# Generic helper to configure passwords -function read_password { - XTRACE=$(set +o | grep xtrace) - set +o xtrace - var=$1; msg=$2 - pw=${!var} - - localrc=$TOP_DIR/localrc - - # If the password is not defined yet, proceed to prompt user for a password. - if [ ! $pw ]; then - # If there is no localrc file, create one - if [ ! -e $localrc ]; then - touch $localrc - fi - - # Presumably if we got this far it can only be that our localrc is missing - # the required password. Prompt user for a password and write to localrc. - echo '' - echo '################################################################################' - echo $msg - echo '################################################################################' - echo "This value will be written to your localrc file so you don't have to enter it " - echo "again. Use only alphanumeric characters." - echo "If you leave this blank, a random default value will be used." - pw=" " - while true; do - echo "Enter a password now:" - read -e $var - pw=${!var} - [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break - echo "Invalid chars in password. Try again:" - done - if [ ! $pw ]; then - pw=`openssl rand -hex 10` - fi - eval "$var=$pw" - echo "$var=$pw" >> $localrc - fi - $XTRACE -} - - -# Database Configuration - -# To select between database backends, add the following to ``localrc``: -# -# disable_service mysql -# enable_service postgresql -# -# The available database backends are listed in ``DATABASE_BACKENDS`` after -# ``lib/database`` is sourced. ``mysql`` is the default. - -initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" - - -# Queue Configuration - -# Rabbit connection info -if is_service_enabled rabbit; then - RABBIT_HOST=${RABBIT_HOST:-localhost} - read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." -fi - - -# Keystone - -if is_service_enabled key; then - # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is - # just a string and is not a 'real' Keystone token. - read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." - # Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` - read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." - # Horizon currently truncates usernames and passwords at 20 characters - read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." - - # Keystone can now optionally install OpenLDAP by enabling the ``ldap`` - # service in ``localrc`` (e.g. ``enable_service ldap``). - # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` - # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the - # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) - # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. - # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``. - - # only request ldap password if the service is enabled - if is_service_enabled ldap; then - read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" - fi -fi - - -# Swift - -if is_service_enabled s-proxy; then - # We only ask for Swift Hash if we have enabled swift service. - # ``SWIFT_HASH`` is a random unique string for a swift cluster that - # can never change. - read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." -fi - - -# Configure logging -# ----------------- - -# Draw a spinner so the user knows something is happening -function spinner() { - local delay=0.75 - local spinstr='/-\|' - printf "..." >&3 - while [ true ]; do - local temp=${spinstr#?} - printf "[%c]" "$spinstr" >&3 - local spinstr=$temp${spinstr%"$temp"} - sleep $delay - printf "\b\b\b" >&3 - done -} - -# Echo text to the log file, summary log file and stdout -# echo_summary "something to say" -function echo_summary() { - if [[ -t 3 && "$VERBOSE" != "True" ]]; then - kill >/dev/null 2>&1 $LAST_SPINNER_PID - if [ ! -z "$LAST_SPINNER_PID" ]; then - printf "\b\b\bdone\n" >&3 - fi - echo -n -e $@ >&6 - spinner & - LAST_SPINNER_PID=$! - else - echo -e $@ >&6 - fi -} - -# Echo text only to stdout, no log files -# echo_nolog "something not for the logs" -function echo_nolog() { - echo $@ >&3 -} - -# Set up logging for ``stack.sh`` -# Set ``LOGFILE`` to turn on logging -# Append '.xxxxxxxx' to the given name to maintain history -# where 'xxxxxxxx' is a representation of the date the file was created -TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} -if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then - LOGDAYS=${LOGDAYS:-7} - CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") -fi - -if [[ -n "$LOGFILE" ]]; then - # First clean up old log files. Use the user-specified ``LOGFILE`` - # as the template to search for, appending '.*' to match the date - # we added on earlier runs. - LOGDIR=$(dirname "$LOGFILE") - LOGFILENAME=$(basename "$LOGFILE") - mkdir -p $LOGDIR - find $LOGDIR -maxdepth 1 -name $LOGFILENAME.\* -mtime +$LOGDAYS -exec rm {} \; - LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} - SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary - - # Redirect output according to config - - # Copy stdout to fd 3 - exec 3>&1 - if [[ "$VERBOSE" == "True" ]]; then - # Redirect stdout/stderr to tee to write the log file - exec 1> >( awk ' - { - cmd ="date +\"%Y-%m-%d %H:%M:%S \"" - cmd | getline now - close("date +\"%Y-%m-%d %H:%M:%S \"") - sub(/^/, now) - print - fflush() - }' | tee "${LOGFILE}" ) 2>&1 - # Set up a second fd for output - exec 6> >( tee "${SUMFILE}" ) - else - # Set fd 1 and 2 to primary logfile - exec 1> "${LOGFILE}" 2>&1 - # Set fd 6 to summary logfile and stdout - exec 6> >( tee "${SUMFILE}" >&3 ) - fi - - echo_summary "stack.sh log $LOGFILE" - # Specified logfile name always links to the most recent log - ln -sf $LOGFILE $LOGDIR/$LOGFILENAME - ln -sf $SUMFILE $LOGDIR/$LOGFILENAME.summary -else - # Set up output redirection without log files - # Copy stdout to fd 3 - exec 3>&1 - if [[ "$VERBOSE" != "True" ]]; then - # Throw away stdout and stderr - exec 1>/dev/null 2>&1 - fi - # Always send summary fd to original stdout - exec 6>&3 -fi - -# Set up logging of screen windows -# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the -# directory specified in ``SCREEN_LOGDIR``, we will log to the the file -# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link -# ``screen-$SERVICE_NAME.log`` to the latest log file. -# Logs are kept for as long specified in ``LOGDAYS``. -if [[ -n "$SCREEN_LOGDIR" ]]; then - - # We make sure the directory is created. - if [[ -d "$SCREEN_LOGDIR" ]]; then - # We cleanup the old logs - find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \; - else - mkdir -p $SCREEN_LOGDIR - fi -fi - - -# Set Up Script Execution -# ----------------------- - -# Kill background processes on exit -trap clean EXIT -clean() { - local r=$? - kill >/dev/null 2>&1 $(jobs -p) - exit $r -} - - -# Exit on any errors so that errors don't compound -trap failed ERR -failed() { - local r=$? - kill >/dev/null 2>&1 $(jobs -p) - set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" - exit $r -} - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace - - -# Install Packages -# ================ - -# OpenStack uses a fair number of other projects. - -# Install package requirements -# Source it so the entire environment is available -echo_summary "Installing package prerequisites" -source $TOP_DIR/tools/install_prereqs.sh - -# Configure an appropriate python environment -if [[ "$OFFLINE" != "True" ]]; then - $TOP_DIR/tools/install_pip.sh -fi - -# Do the ugly hacks for borken packages and distros -$TOP_DIR/tools/fixup_stuff.sh - -install_rpc_backend - -if is_service_enabled $DATABASE_BACKENDS; then - install_database -fi - -if is_service_enabled neutron; then - install_neutron_agent_packages -fi - -TRACK_DEPENDS=${TRACK_DEPENDS:-False} - -# Install python packages into a virtualenv so that we can track them -if [[ $TRACK_DEPENDS = True ]]; then - echo_summary "Installing Python packages into a virtualenv $DEST/.venv" - pip_install -U virtualenv - - rm -rf $DEST/.venv - virtualenv --system-site-packages $DEST/.venv - source $DEST/.venv/bin/activate - $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip -fi - -# Check Out and Install Source -# ---------------------------- - -echo_summary "Installing OpenStack project source" - -# Install required infra support libraries -install_infra - -# Install oslo libraries that have graduated -install_oslo - -# Install stackforge libraries for testing -if is_service_enabled stackforge_libs; then - install_stackforge -fi - -# Install clients libraries -install_keystoneclient -install_glanceclient -install_cinderclient -install_novaclient -if is_service_enabled swift glance horizon; then - install_swiftclient -fi -if is_service_enabled neutron nova horizon; then - install_neutronclient -fi -if is_service_enabled heat horizon; then - install_heatclient -fi - -git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH -setup_develop $OPENSTACKCLIENT_DIR - -if is_service_enabled key; then - install_keystone - configure_keystone -fi - -if is_service_enabled s-proxy; then - install_swift - configure_swift - - # swift3 middleware to provide S3 emulation to Swift - if is_service_enabled swift3; then - # replace the nova-objectstore port by the swift port - S3_SERVICE_PORT=8080 - git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH - setup_develop $SWIFT3_DIR - fi -fi - -if is_service_enabled g-api n-api; then - # image catalog service - install_glance - configure_glance -fi - -if is_service_enabled cinder; then - install_cinder - configure_cinder -fi - -if is_service_enabled neutron; then - install_neutron - install_neutron_third_party -fi - -if is_service_enabled nova; then - # compute service - install_nova - cleanup_nova - configure_nova -fi - -if is_service_enabled horizon; then - # dashboard - install_horizon - configure_horizon -fi - -if is_service_enabled ceilometer; then - install_ceilometerclient - install_ceilometer - echo_summary "Configuring Ceilometer" - configure_ceilometer - configure_ceilometerclient -fi - -if is_service_enabled heat; then - install_heat - cleanup_heat - configure_heat -fi - -if is_service_enabled tls-proxy; then - configure_CA - init_CA - init_cert - # Add name to /etc/hosts - # don't be naive and add to existing line! -fi - -if is_service_enabled ir-api ir-cond; then - install_ironic - install_ironicclient - configure_ironic -fi - -# Extras Install -# -------------- - -# Phase: install -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack install - done -fi - -if [[ $TRACK_DEPENDS = True ]]; then - $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip - if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then - echo "Detect some changes for installed packages of pip, in depend tracking mode" - cat $DEST/requires.diff - fi - echo "Ran stack.sh in depend tracking mode, bailing out now" - exit 0 -fi - - -# Syslog -# ------ - -if [[ $SYSLOG != "False" ]]; then - if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then - # Configure the master host to receive - cat </tmp/90-stack-m.conf -\$ModLoad imrelp -\$InputRELPServerRun $SYSLOG_PORT -EOF - sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d - else - # Set rsyslog to send to remote host - cat </tmp/90-stack-s.conf -*.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT -EOF - sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d - fi - - RSYSLOGCONF="/etc/rsyslog.conf" - if [ -f $RSYSLOGCONF ]; then - sudo cp -b $RSYSLOGCONF $RSYSLOGCONF.bak - if [[ $(grep '$SystemLogRateLimitBurst' $RSYSLOGCONF) ]]; then - sudo sed -i 's/$SystemLogRateLimitBurst\ .*/$SystemLogRateLimitBurst\ 0/' $RSYSLOGCONF - else - sudo sed -i '$ i $SystemLogRateLimitBurst\ 0' $RSYSLOGCONF - fi - if [[ $(grep '$SystemLogRateLimitInterval' $RSYSLOGCONF) ]]; then - sudo sed -i 's/$SystemLogRateLimitInterval\ .*/$SystemLogRateLimitInterval\ 0/' $RSYSLOGCONF - else - sudo sed -i '$ i $SystemLogRateLimitInterval\ 0' $RSYSLOGCONF - fi - fi - - echo_summary "Starting rsyslog" - restart_service rsyslog -fi - - -# Finalize queue installation -# ---------------------------- -restart_rpc_backend - - -# Export Certicate Authority Bundle -# --------------------------------- - -# If certificates were used and written to the SSL bundle file then these -# should be exported so clients can validate their connections. - -if [ -f $SSL_BUNDLE_FILE ]; then - export OS_CACERT=$SSL_BUNDLE_FILE -fi - - -# Configure database -# ------------------ - -if is_service_enabled $DATABASE_BACKENDS; then - configure_database -fi - - -# Configure screen -# ---------------- - -USE_SCREEN=$(trueorfalse True $USE_SCREEN) -if [[ "$USE_SCREEN" == "True" ]]; then - # Create a new named screen to run processes in - screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash - sleep 1 - - # Set a reasonable status bar - if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' - fi - screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" - screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true -fi - -# Clear screen rc file -SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc -if [[ -e $SCREENRC ]]; then - rm -f $SCREENRC -fi - -# Initialize the directory for service status check -init_service_check - - -# Sysstat -# ------- - -# If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat; then - # what we want to measure - # -u : cpu statitics - # -q : load - # -b : io load rates - # -w : process creation and context switch rates - SYSSTAT_OPTS="-u -q -b -w" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" - else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" - fi -fi - -if is_service_enabled pidstat; then - # Per-process stats - PIDSTAT_OPTS="-l -p ALL -T ALL" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" - else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" - fi -fi - - -# Start Services -# ============== - -# Keystone -# -------- - -if is_service_enabled key; then - echo_summary "Starting Keystone" - init_keystone - start_keystone - - # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - - if is_service_enabled tls-proxy; then - export OS_CACERT=$INT_CA_DIR/ca-chain.pem - # Until the client support is fixed, just use the internal endpoint - SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 - fi - - # Do the keystone-specific bits from keystone_data.sh - export OS_SERVICE_TOKEN=$SERVICE_TOKEN - export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT - create_keystone_accounts - create_nova_accounts - create_cinder_accounts - create_neutron_accounts - - if is_service_enabled ceilometer; then - create_ceilometer_accounts - fi - - if is_service_enabled swift || is_service_enabled s-proxy; then - create_swift_accounts - fi - - # ``keystone_data.sh`` creates services, admin and demo users, and roles. - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ - SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ - S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ - HEAT_API_PORT=$HEAT_API_PORT \ - bash -x $FILES/keystone_data.sh - - # Set up auth creds now that keystone is bootstrapped - export OS_AUTH_URL=$SERVICE_ENDPOINT - export OS_TENANT_NAME=admin - export OS_USERNAME=admin - export OS_PASSWORD=$ADMIN_PASSWORD - unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT -fi - - -# Horizon -# ------- - -# Set up the django horizon application to serve via apache/wsgi - -if is_service_enabled horizon; then - echo_summary "Configuring and starting Horizon" - init_horizon - start_horizon -fi - - -# Glance -# ------ - -if is_service_enabled g-reg; then - echo_summary "Configuring Glance" - init_glance -fi - - -# Ironic -# ------ - -if is_service_enabled ir-api ir-cond; then - echo_summary "Configuring Ironic" - init_ironic -fi - - -# Neutron -# ------- - -if is_service_enabled neutron; then - echo_summary "Configuring Neutron" - - configure_neutron - # Run init_neutron only on the node hosting the neutron API server - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then - init_neutron - fi -fi - -# Some Neutron plugins require network controllers which are not -# a part of the OpenStack project. Configure and start them. -if is_service_enabled neutron; then - configure_neutron_third_party - init_neutron_third_party - start_neutron_third_party -fi - - -# Nova -# ---- - -if is_service_enabled n-net q-dhcp; then - # Delete traces of nova networks from prior runs - # Do not kill any dnsmasq instance spawned by NetworkManager - netman_pid=$(pidof NetworkManager || true) - if [ -z "$netman_pid" ]; then - sudo killall dnsmasq || true - else - sudo ps h -o pid,ppid -C dnsmasq | grep -v $netman_pid | awk '{print $1}' | sudo xargs kill || true - fi - - clean_iptables - rm -rf ${NOVA_STATE_PATH}/networks - sudo mkdir -p ${NOVA_STATE_PATH}/networks - safe_chown -R ${USER} ${NOVA_STATE_PATH}/networks - # Force IP forwarding on, just in case - sudo sysctl -w net.ipv4.ip_forward=1 -fi - - -# Storage Service -# --------------- - -if is_service_enabled s-proxy; then - echo_summary "Configuring Swift" - init_swift -fi - - -# Volume Service -# -------------- - -if is_service_enabled cinder; then - echo_summary "Configuring Cinder" - init_cinder -fi - - -# Compute Service -# --------------- - -if is_service_enabled nova; then - echo_summary "Configuring Nova" - init_nova - - # Additional Nova configuration that is dependent on other services - if is_service_enabled neutron; then - create_nova_conf_neutron - elif is_service_enabled n-net; then - create_nova_conf_nova_network - fi - - init_nova_cells -fi - -# Extra things to prepare nova for baremetal, before nova starts -if is_service_enabled nova && is_baremetal; then - echo_summary "Preparing for nova baremetal" - prepare_baremetal_toolchain - configure_baremetal_nova_dirs - if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env - fi -fi - - -# Extras Configuration -# ==================== - -# Phase: post-config -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack post-config - done -fi - - -# Local Configuration -# =================== - -# Apply configuration from local.conf if it exists for layer 2 services -# Phase: post-config -merge_config_group $TOP_DIR/local.conf post-config - - -# Launch Services -# =============== - -# Only run the services specified in ``ENABLED_SERVICES`` - -# Launch Swift Services -if is_service_enabled s-proxy; then - echo_summary "Starting Swift" - start_swift -fi - -# Launch the Glance services -if is_service_enabled g-api g-reg; then - echo_summary "Starting Glance" - start_glance -fi - -# Launch the Ironic services -if is_service_enabled ir-api ir-cond; then - echo_summary "Starting Ironic" - start_ironic -fi - -# Create an access key and secret key for nova ec2 register image -if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then - NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) - die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova" - NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) - die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME" - CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID) - ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') - SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') - iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" - iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY" - iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" -fi - -# Create a randomized default value for the keymgr's fixed_key -if is_service_enabled nova; then - FIXED_KEY="" - for i in $(seq 1 64); - do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); - done; - iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" -fi - -if is_service_enabled zeromq; then - echo_summary "Starting zermomq receiver" - screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" -fi - -# Launch the nova-api and wait for it to answer before continuing -if is_service_enabled n-api; then - echo_summary "Starting Nova API" - start_nova_api -fi - -if is_service_enabled q-svc; then - echo_summary "Starting Neutron" - start_neutron_service_and_check - check_neutron_third_party_integration -elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then - NM_CONF=${NOVA_CONF} - if is_service_enabled n-cell; then - NM_CONF=${NOVA_CELLS_CONF} - fi - - # Create a small network - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - - # Create some floating ips - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME - - # Create a second pool - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL -fi - -if is_service_enabled neutron; then - start_neutron_agents -fi -# Once neutron agents are started setup initial network elements -if is_service_enabled q-svc; then - echo_summary "Creating initial neutron network elements" - create_neutron_initial_network - setup_neutron_debug -fi -if is_service_enabled nova; then - echo_summary "Starting Nova" - start_nova -fi -if is_service_enabled cinder; then - echo_summary "Starting Cinder" - start_cinder -fi -if is_service_enabled ceilometer; then - echo_summary "Starting Ceilometer" - init_ceilometer - start_ceilometer -fi - -# Configure and launch heat engine, api and metadata -if is_service_enabled heat; then - # Initialize heat, including replacing nova flavors - echo_summary "Configuring Heat" - init_heat - echo_summary "Starting Heat" - start_heat -fi - - -# Create account rc files -# ======================= - -# Creates source able script files for easier user switching. -# This step also creates certificates for tenants and users, -# which is helpful in image bundle steps. - -if is_service_enabled nova && is_service_enabled key; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" - - if [ -f $SSL_BUNDLE_FILE ]; then - USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" - fi - - $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS -fi - - -# Install Images -# ============== - -# Upload an image to glance. -# -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz - -if is_service_enabled g-reg; then - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" - - if is_baremetal; then - echo_summary "Creating and uploading baremetal images" - - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN - - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done - else - echo_summary "Uploading images" - - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done - fi -fi - -# If we are running nova with baremetal driver, there are a few -# last-mile configuration bits to attend to, which must happen -# after n-api and n-sch have started. -# Also, creating the baremetal flavor must happen after images -# are loaded into glance, though just knowing the IDs is sufficient here -if is_service_enabled nova && is_baremetal; then - # create special flavor for baremetal if we know what images to associate - [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID - - # otherwise user can manually add it later by calling nova-baremetal-manage - [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node - - if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "False" ]]; then - # NOTE: we do this here to ensure that our copy of dnsmasq is running - sudo pkill dnsmasq || true - sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ - --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ - --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ - ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} - fi - # ensure callback daemon is running - sudo pkill nova-baremetal-deploy-helper || true - screen_it baremetal "cd ; nova-baremetal-deploy-helper" -fi - -# Save some values we generated for later use -CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") -echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv -for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do - echo $i=${!i} >>$TOP_DIR/.stackenv -done - - -# Local Configuration -# =================== - -# Apply configuration from local.conf if it exists for layer 2 services -# Phase: extra -merge_config_group $TOP_DIR/local.conf extra - - -# Run extras -# ========== - -# Phase: extra -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack extra - done -fi - -# Local Configuration -# =================== - -# Apply configuration from local.conf if it exists for layer 2 services -# Phase: post-extra -merge_config_group $TOP_DIR/local.conf post-extra - - -# Run local script -# ================ - -# Run ``local.sh`` if it exists to perform user-managed tasks -if [[ -x $TOP_DIR/local.sh ]]; then - echo "Running user script $TOP_DIR/local.sh" - $TOP_DIR/local.sh -fi - -# Check the status of running services -service_check - - -# Fin -# === - -set +o xtrace - -if [[ -n "$LOGFILE" ]]; then - exec 1>&3 - # Force all output to stdout and logs now - exec 1> >( tee -a "${LOGFILE}" ) 2>&1 -else - # Force all output to stdout now - exec 1>&3 -fi - - -# Using the cloud -# --------------- - -echo "" -echo "" -echo "" - -# If you installed Horizon on this server you should be able -# to access the site using your browser. -if is_service_enabled horizon; then - echo "Horizon is now available at http://$SERVICE_HOST/" -fi - -# Warn that the default flavors have been changed by Heat -if is_service_enabled heat; then - echo "Heat has replaced the default flavors. View by running: nova flavor-list" -fi - -# If Keystone is present you can point ``nova`` cli to this server -if is_service_enabled key; then - echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" - echo "Examples on using novaclient command line is in exercise.sh" - echo "The default users are: admin and demo" - echo "The password: $ADMIN_PASSWORD" -fi - -# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address -echo "This is your host ip: $HOST_IP" - -# Warn that a deprecated feature was used -if [[ -n "$DEPRECATED_TEXT" ]]; then - echo_summary "WARNING: $DEPRECATED_TEXT" -fi - -# Specific warning for deprecated configs -if [[ -n "$EXTRA_OPTS" ]]; then - echo "" - echo_summary "WARNING: EXTRA_OPTS is used" - echo "You are using EXTRA_OPTS to pass configuration into nova.conf." - echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" - echo " -[[post-config|\$NOVA_CONF]] -[DEFAULT] -" - for I in "${EXTRA_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then - echo "" - echo_summary "WARNING: EXTRA_OPTS is used" - echo "You are using EXTRA_OPTS to pass configuration into nova.conf." - echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" - echo " -[[post-config|\$NOVA_CONF]] -[baremetal] -" - for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then - echo "" - echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" - echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." - echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" - echo " -[[post-config|\$Q_DHCP_CONF_FILE]] -[DEFAULT] -" - for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then - echo "" - echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" - echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." - echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" - echo " -[[post-config|\$NEUTRON_CONF]] -[DEFAULT] -" - for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do - # Replace the first '=' with ' ' for iniset syntax - echo ${I} - done -fi - -# Indicate how long this took to run (bash maintained variable ``SECONDS``) -echo_summary "stack.sh completed in $SECONDS seconds." diff --git a/unstack.sh b/unstack.sh deleted file mode 100755 index 92d0642c38..0000000000 --- a/unstack.sh +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env bash - -# **unstack.sh** - -# Stops that which is started by ``stack.sh`` (mostly) -# mysql and rabbit are left running as OpenStack code refreshes -# do not require them to be restarted. -# -# Stop all processes by setting ``UNSTACK_ALL`` or specifying ``--all`` -# on the command line - -# Keep track of the current devstack directory. -TOP_DIR=$(cd $(dirname "$0") && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import database library -source $TOP_DIR/lib/database - -# Load local configuration -source $TOP_DIR/stackrc - -# Destination path for service data -DATA_DIR=${DATA_DIR:-${DEST}/data} - -if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "It might work but you will have a better day running it as $STACK_USER" - exit 1 -fi - - -# Configure Projects -# ================== - -# Import apache functions -source $TOP_DIR/lib/apache - -# Import TLS functions -source $TOP_DIR/lib/tls - -# Source project function libraries -source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo -source $TOP_DIR/lib/stackforge -source $TOP_DIR/lib/horizon -source $TOP_DIR/lib/keystone -source $TOP_DIR/lib/glance -source $TOP_DIR/lib/nova -source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/heat -source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic - -# Extras Source -# -------------- - -# Phase: source -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i source - done -fi - -# Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` -GetOSVersion - -if [[ "$1" == "--all" ]]; then - UNSTACK_ALL=${UNSTACK_ALL:-1} -fi - -# Run extras -# ========== - -# Phase: unstack -if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i unstack - done -fi - -if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - source $TOP_DIR/openrc - teardown_neutron_debug -fi - -# Call service stop - -if is_service_enabled heat; then - stop_heat -fi - -if is_service_enabled ceilometer; then - stop_ceilometer -fi - -if is_service_enabled nova; then - stop_nova -fi - -if is_service_enabled g-api g-reg; then - stop_glance -fi - -if is_service_enabled key; then - stop_keystone -fi - -# Swift runs daemons -if is_service_enabled s-proxy; then - stop_swift - cleanup_swift -fi - -# Ironic runs daemons -if is_service_enabled ir-api ir-cond; then - stop_ironic - cleanup_ironic -fi - -# Apache has the WSGI processes -if is_service_enabled horizon; then - stop_horizon -fi - -# Kill TLS proxies -if is_service_enabled tls-proxy; then - killall stud -fi - -# baremetal might have created a fake environment -if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - cleanup_fake_baremetal_env -fi - -SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* - -# Get the iSCSI volumes -if is_service_enabled cinder; then - stop_cinder - cleanup_cinder -fi - -if [[ -n "$UNSTACK_ALL" ]]; then - # Stop MySQL server - if is_service_enabled mysql; then - stop_service mysql - fi - - if is_service_enabled postgresql; then - stop_service postgresql - fi - - # Stop rabbitmq-server - if is_service_enabled rabbit; then - stop_service rabbitmq-server - fi -fi - -if is_service_enabled neutron; then - stop_neutron - stop_neutron_third_party - cleanup_neutron -fi - -if is_service_enabled trove; then - cleanup_trove -fi - -# Clean up the remainder of the screen processes -SCREEN=$(which screen) -if [[ -n "$SCREEN" ]]; then - SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') - if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit - fi -fi - -cleanup_tmp