File infoblox-ipam-juno.patch of Package openstack-neutron
diff --git a/etc/dhcp_agent.ini b/etc/dhcp_agent.ini
old mode 100644
new mode 100755
index 9836d35..6665b89
--- a/etc/dhcp_agent.ini
+++ b/etc/dhcp_agent.ini
@@ -86,3 +86,16 @@
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10
+
+# Name of a bridge through which dhcp relay agent will connect to external
+# network in which DHCP server resides
+# dhcp_relay_bridge = br-mgmt
+
+# Path to dhcrelay agent executable
+# dhcrelay_path = /usr/local/dhcp-4.3.0/sbin/dhcrelay
+
+# Path to dhclient executable
+# dhcrelay_path = /usr/local/dhcp-4.3.0/sbin/dhclient
+
+# Relay interface name length
+# interface_dev_name_len = 10
diff --git a/etc/neutron.conf b/etc/neutron.conf
index 7fa8795..45e0129 100644
--- a/etc/neutron.conf
+++ b/etc/neutron.conf
@@ -75,6 +75,9 @@ lock_path = $state_path/lock
# Paste configuration file
# api_paste_config = api-paste.ini
+# IPAM driver
+# ipam_driver = neutron.ipam.drivers.neutron_ipam.NeutronIPAM
+
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
diff --git a/etc/neutron/rootwrap.d/dhcp.filters b/etc/neutron/rootwrap.d/dhcp.filters
index 7c11d70..8a8e07d 100644
--- a/etc/neutron/rootwrap.d/dhcp.filters
+++ b/etc/neutron/rootwrap.d/dhcp.filters
@@ -9,7 +9,8 @@
[Filters]
# dhcp-agent
-dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID=
+dnsmasq: EnvFilter, env, root, NEUTRON_NETWORK_ID=, dnsmasq
+
# dhcp-agent uses kill as well, that's handled by the generic KillFilter
# it looks like these are the only signals needed, per
# neutron/agent/linux/dhcp.py
@@ -20,6 +21,9 @@ ovs-vsctl: CommandFilter, ovs-vsctl, root
ivs-ctl: CommandFilter, ivs-ctl, root
mm-ctl: CommandFilter, mm-ctl, root
dhcp_release: CommandFilter, dhcp_release, root
+dhcrelay: CommandFilter, /usr/local/dhcp-4.3.0/sbin/dhcrelay, root
+dhclient: CommandFilter, /usr/local/dhcp-4.3.0/sbin/dhclient, root
+kill: CommandFilter, kill, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
diff --git a/neutron/agent/dhcp_agent.py b/neutron/agent/dhcp_agent.py
index 19b8e9a..2c24634 100644
--- a/neutron/agent/dhcp_agent.py
+++ b/neutron/agent/dhcp_agent.py
@@ -23,6 +23,7 @@ from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
+from neutron.agent.linux import dhcp_relay
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ovs_lib # noqa
@@ -616,6 +617,7 @@ def register_options():
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp.OPTS)
+ cfg.CONF.register_opts(dhcp_relay.OPTS)
cfg.CONF.register_opts(interface.OPTS)
diff --git a/neutron/agent/linux/dhcp_relay.py b/neutron/agent/linux/dhcp_relay.py
new file mode 100755
index 0000000..17b39eb
--- /dev/null
+++ b/neutron/agent/linux/dhcp_relay.py
@@ -0,0 +1,474 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+import random
+
+import netaddr
+from oslo.config import cfg
+
+from neutron.agent.linux import dhcp
+from neutron.agent.linux import ip_lib
+from neutron.agent.linux import utils
+from neutron.common import exceptions as exc
+from neutron.common import ipv6_utils
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
+
+LOG = logging.getLogger(__name__)
+
+OPTS = [
+ cfg.ListOpt('external_dhcp_servers',
+ default=None,
+ help=_('IP addresses of DHCP servers to relay to.')),
+ cfg.ListOpt('external_dns_servers',
+ default=None,
+ help=_('IP addresses of DNS servers to relay to.')),
+ cfg.StrOpt('dhcp_relay_bridge',
+ default=None,
+ help=_('Name of a bridge through which ipam proxy agent will'
+ ' connect to external network in which DHCP and DNS'
+ ' server resides.')),
+ cfg.StrOpt('dhclient_path',
+ default='dhclient',
+ help=_('Path to dhclient executable.')),
+ cfg.StrOpt('dhcrelay_path',
+ default='dhcrelay',
+ help=_('Path to dhcrelay executable.')),
+ cfg.BoolOpt('use_link_selection_option',
+ default=True,
+ help=_('Run dhcrelay with -o flag.')),
+ cfg.BoolOpt('use_ipv6_unicast_requests',
+ default=True,
+ help=_('Run dhcrelay -u server1%iface2 -u server2%iface2')),
+ cfg.StrOpt('dhcp_relay_management_network',
+ default=None,
+ help=_("CIDR for the management network served by "
+ "Infoblox DHCP member")),
+ cfg.BoolOpt('enable_ipv6_relay',
+ default=True,
+ help=_('Enable/Disable DHCP/DNS relay for IPv6'))
+]
+
+
+MGMT_INTERFACE_IP_ATTR = 'mgmt_iface_ip'
+
+
+def _generate_mac_address():
+ mac = [0x00, 0x16, 0x3e,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ return ':'.join(map(lambda x: "%02x" % x, mac))
+
+
+class DhcpDnsProxy(dhcp.DhcpLocalProcess):
+ """DHCP & DNS relay agent class."""
+
+ MINIMUM_VERSION = 0
+ DEV_NAME_LEN = 14
+ RELAY_DEV_NAME_PREFIX = 'trel'
+ NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
+ DHCPv4 = 4
+ DHCPv6 = 6
+
+ def _calc_dev_name_len(self):
+ if self.conf.interface_dev_name_len:
+ return self.conf.interface_dev_name_len
+ else:
+ return self.DEV_NAME_LEN
+
+ def _enable_dns_dhcp(self):
+ """check if there is a subnet within the network with dhcp enabled."""
+ for subnet in self.network.subnets:
+ if subnet.enable_dhcp:
+ return True
+ return False
+
+ def __init__(self, conf, network, root_helper='sudo',
+ version=None, plugin=None):
+ super(DhcpDnsProxy, self).__init__(conf, network, root_helper,
+ version, plugin)
+
+ external_dhcp_servers = self._get_relay_ips('external_dhcp_servers')
+ external_dns_servers = self._get_relay_ips('external_dns_servers')
+ required_options = {'dhcp_relay_bridge': self.conf.dhcp_relay_bridge,
+ 'external_dhcp_servers': external_dhcp_servers,
+ 'external_dns_servers': external_dns_servers}
+
+ for option_name, option in required_options.iteritems():
+ if not option:
+ LOG.error(_('You must specify an %(opt)s option in config'),
+ {'opt': option_name})
+ raise exc.InvalidConfigurationOption(
+ opt_name=option_name,
+ opt_value=option
+ )
+
+ self.dev_name_len = self._calc_dev_name_len()
+ self.device_manager = DnsDhcpProxyDeviceManager(
+ conf, root_helper, plugin)
+
+ @classmethod
+ def check_version(cls):
+ return 0
+
+ @classmethod
+ def get_isolated_subnets(cls, network):
+ """Returns a dict indicating whether or not a subnet is isolated"""
+ if hasattr(dhcp.Dnsmasq, 'get_isolated_subnets') \
+ and callable(getattr(dhcp.Dnsmasq, 'get_isolated_subnets')):
+ dhcp.Dnsmasq.get_isolated_subnets(network)
+
+ @classmethod
+ def should_enable_metadata(cls, conf, network):
+ """True if the metadata-proxy should be enabled for the network."""
+ if hasattr(dhcp.Dnsmasq, 'should_enable_metadata') \
+ and callable(getattr(dhcp.Dnsmasq, 'should_enable_metadata')):
+ dhcp.Dnsmasq.should_enable_metadata(conf, network)
+ else:
+ conf.enable_isolated_metadata
+
+ @classmethod
+ def existing_dhcp_networks(cls, conf, root_helper):
+ """Return a list of existing networks ids that we have configs for."""
+ confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
+ return [
+ c for c in os.listdir(confs_dir)
+ if uuidutils.is_uuid_like(c)
+ ]
+
+ def release_lease(self, mac_address, removed_ips):
+ """Release a DHCP lease."""
+ pass
+
+ def reload_allocations(self):
+ """Force the DHCP server to reload the assignment database."""
+ pass
+
+ @property
+ def ipv6_enabled(self):
+ return self.conf.enable_ipv6_relay and ipv6_utils.is_enabled()
+
+ def get_dhcp_pid(self, version):
+ """Last known pid for the dhcrelay process spawned for this network."""
+ return self._get_value_from_conf_file('dhcp%s_pid' % version, int)
+
+ def get_dns_pid(self):
+ """Last known pid for the dnsmasq process spawned for this network."""
+ return self._get_value_from_conf_file('dns_pid', int)
+
+ def is_dhcrelay_pid(self, pid):
+ pid_path = '/proc/%s/cmdline' % pid
+ if (pid and os.path.isdir('/proc/%s/' % pid) and
+ self.conf.dhcrelay_path in open(pid_path).read()):
+ return True
+ return False
+
+ def is_dhcp_active(self):
+ """Is any dhcprelay still active"""
+ pids = [self.get_dhcp_pid(version=DhcpDnsProxy.DHCPv4)]
+ if self.ipv6_enabled:
+ pids.append(self.get_dhcp_pid(version=DhcpDnsProxy.DHCPv6))
+
+ if not any(pids):
+ return False
+
+ for pid in pids:
+ if self.is_dhcrelay_pid(pid):
+ return True
+ return False
+
+ def is_dns_active(self):
+ pid = self.get_dns_pid()
+ if not pid:
+ return False
+ return os.path.isdir('/proc/%s/' % pid)
+
+ def enable(self):
+ relay_iface_name = self._get_relay_device_name()
+ relay_iface_mac_address = _generate_mac_address()
+ self.device_manager.setup_relay(
+ self.network,
+ relay_iface_name,
+ relay_iface_mac_address,
+ self.conf.dhcp_relay_bridge)
+
+ interface_name = self.device_manager.setup(self.network)
+ if self.is_dhcp_active() or self.is_dns_active():
+ self.restart()
+ elif self._enable_dns_dhcp():
+ self.interface_name = interface_name
+ self.spawn_process()
+
+ def disable(self, retain_port=False):
+ def kill_proc(pid):
+ if not pid:
+ return
+ cmd = ['kill', '-9', pid]
+ utils.execute(cmd, self.root_helper)
+
+ def check_dhcp_pid():
+ if self.ipv6_enabled:
+ return self.get_dhcp_pid(DhcpDnsProxy.DHCPv4) and \
+ self.get_dhcp_pid(DhcpDnsProxy.DHCPv6)
+ else:
+ return self.get_dhcp_pid(DhcpDnsProxy.DHCPv4)
+
+ def log_dhcp_pid_info():
+ if self.ipv6_enabled:
+ LOG.debug(
+ _('dhcrelay for %(net_id)s, dhcp_pid %(dhcp_pid)d, '
+ 'dhcp6_pid %(dhcp6_pid)d, is stale, ignoring command'),
+ {
+ 'net_id': self.network.id,
+ 'dhcp_pid': self.get_dhcp_pid(DhcpDnsProxy.DHCPv4),
+ 'dhcp6_pid': self.get_dhcp_pid(DhcpDnsProxy.DHCPv6)
+ })
+ else:
+ LOG.debug(
+ _('dhcrelay for %(net_id)s, dhcp_pid %(dhcp_pid)d '
+ 'is stale, ignoring command'),
+ {
+ 'net_id': self.network.id,
+ 'dhcp_pid': self.get_dhcp_pid(DhcpDnsProxy.DHCPv4)
+ })
+
+ if self.is_dhcp_active():
+ kill_proc(self.get_dhcp_pid(DhcpDnsProxy.DHCPv4))
+ if self.ipv6_enabled:
+ kill_proc(self.get_dhcp_pid(DhcpDnsProxy.DHCPv6))
+ elif check_dhcp_pid():
+ log_dhcp_pid_info()
+ else:
+ LOG.debug(_('No dhcrelay started for %s'), self.network.id)
+
+ if self.is_dns_active():
+ kill_proc(self.get_dns_pid())
+ elif self.get_dns_pid():
+ LOG.debug(_('dnsmasq for %(net_id)s, dhcp_pid %(dns_pid)d, is'
+ ' stale, ignoring command'),
+ {'net_id': self.network.id,
+ 'dns_pid': self.get_dns_pid()}
+ )
+ else:
+ LOG.debug(_('No dnsmasq started for %s'), self.network.id)
+
+ if not retain_port:
+ self.device_manager.destroy(self.network, self.interface_name)
+ self.device_manager.destroy_relay(
+ self.network,
+ self._get_relay_device_name(),
+ self.conf.dhcp_relay_bridge)
+
+ if self.conf.dhcp_delete_namespaces and self.network.namespace:
+ ns_ip = ip_lib.IPWrapper(self.root_helper,
+ self.network.namespace)
+ try:
+ ns_ip.netns.delete(self.network.namespace)
+ except RuntimeError:
+ msg = _('Failed trying to delete namespace: %s')
+ LOG.exception(msg, self.network.namespace)
+ self._remove_config_files()
+
+ def spawn_process(self):
+ """Spawns a IPAM proxy processes for the network."""
+ self._spawn_dhcp_proxy()
+ self._spawn_dns_proxy()
+
+ def _construct_dhcrelay_commands(self, relay_ips, relay_ipv6s):
+ dhcrelay_v4_command = [
+ self.conf.dhcrelay_path, '-4', '-a',
+ '-pf', self.get_conf_file_name('dhcp4_pid', ensure_conf_dir=True),
+ '-i', self.interface_name]
+
+ ipv6_ok = self.ipv6_enabled
+ if ipv6_ok:
+ dhcrelay_v6_command = [
+ self.conf.dhcrelay_path, '-6', '-I',
+ '-pf', self.get_conf_file_name('dhcp6_pid', ensure_conf_dir=True),
+ '-l', self.interface_name]
+
+ if self.conf.use_link_selection_option:
+ # dhcrelay -4 -a -i iface1 -l iface2 server1 server2
+ dhcrelay_v4_command.append('-o')
+ dhcrelay_v4_command.append(self._get_relay_device_name())
+
+ if ipv6_ok:
+ # dhcrelay -6 -l iface1 -u server1%iface2 -u server2%iface2
+ if relay_ipv6s:
+ for ipv6_addr in relay_ipv6s:
+ dhcrelay_v6_command.append('-u')
+
+ if self.conf.use_ipv6_unicast_requests:
+ dhcrelay_v6_command.append("%".join((
+ ipv6_addr, self._get_relay_device_name())))
+ else:
+ dhcrelay_v6_command.append(
+ self._get_relay_device_name())
+
+ dhcrelay_v4_command.append(" ".join(relay_ips))
+
+ if ipv6_ok:
+ return [
+ dhcrelay_v4_command,
+ dhcrelay_v6_command
+ ]
+ else:
+ return [
+ dhcrelay_v4_command
+ ]
+
+ def _spawn_dhcp_proxy(self):
+ """Spawns a dhcrelay process for the network."""
+ relay_ips = self._get_relay_ips('external_dhcp_servers')
+ relay_ipv6s = self._get_relay_ips('external_dhcp_ipv6_servers')
+
+ if not relay_ips:
+ LOG.error(_('DHCP relay server isn\'t defined for network %s'),
+ self.network.id)
+ return
+
+ commands = self._construct_dhcrelay_commands(relay_ips, relay_ipv6s)
+
+ for cmd in commands:
+ if self.network.namespace:
+ ip_wrapper = ip_lib.IPWrapper(self.root_helper,
+ self.network.namespace)
+ try:
+ ip_wrapper.netns.execute(cmd)
+ except RuntimeError:
+ LOG.info(_("Can't start dhcrelay for %(command)s"),
+ {'command': cmd})
+ else:
+ utils.execute(cmd, self.root_helper)
+
+ def _spawn_dns_proxy(self):
+ """Spawns a Dnsmasq process in DNS relay only mode for the network."""
+ relay_ips = self._get_relay_ips('external_dns_servers')
+
+ if not relay_ips:
+ LOG.error(_('DNS relay server isn\'t defined for network %s'),
+ self.network.id)
+ return
+
+ server_list = []
+ for relay_ip in relay_ips:
+ server_list.append("--server=%s" % relay_ip)
+
+ env = {
+ self.NEUTRON_NETWORK_ID_KEY: self.network.id,
+ }
+
+ cmd = [
+ 'dnsmasq',
+ '--no-hosts',
+ '--no-resolv',
+ '--strict-order',
+ '--bind-interfaces',
+ '--interface=%s' % self.interface_name,
+ '--except-interface=lo',
+ '--all-servers']
+ cmd += server_list
+ cmd += ['--pid-file=%s' % self.get_conf_file_name(
+ 'dns_pid', ensure_conf_dir=True)]
+
+ if self.network.namespace:
+ ip_wrapper = ip_lib.IPWrapper(self.root_helper,
+ self.network.namespace)
+ ip_wrapper.netns.execute(cmd, addl_env=env)
+ else:
+ utils.execute(cmd, self.root_helper, addl_env=env)
+
+ def _get_relay_device_name(self):
+ return (self.RELAY_DEV_NAME_PREFIX +
+ self.network.id)[:self.dev_name_len]
+
+ def _get_relay_ips(self, ip_opt_name):
+ # Try to get relay IP from the config.
+ relay_ips = getattr(self.conf, ip_opt_name, None)
+ # If not specified in config try to get from network object.
+ if not relay_ips:
+ relay_ips = getattr(self.network, ip_opt_name, None)
+
+ if not relay_ips:
+ return None
+
+ try:
+ for relay_ip in relay_ips:
+ netaddr.IPAddress(relay_ip)
+ except netaddr.core.AddrFormatError:
+ LOG.error(_('An invalid option value has been provided:'
+ ' %(opt_name)s=%(opt_value)s') %
+ dict(opt_name=ip_opt_name, opt_value=relay_ip))
+ return None
+
+ return list(set(relay_ips))
+
+
+class DnsDhcpProxyDeviceManager(dhcp.DeviceManager):
+ def setup_relay(self, network, iface_name, mac_address, relay_bridge):
+ if ip_lib.device_exists(iface_name,
+ self.root_helper,
+ network.namespace):
+ LOG.debug(_('Reusing existing device: %s.'), iface_name)
+ else:
+ self.driver.plug(network.id,
+ network.id,
+ iface_name,
+ mac_address,
+ namespace=network.namespace,
+ bridge=relay_bridge)
+
+ use_static_ip_allocation = (
+ self.conf.dhcp_relay_management_network
+ and hasattr(network, MGMT_INTERFACE_IP_ATTR))
+
+ if use_static_ip_allocation:
+ self._allocate_static_ip(network, iface_name)
+ else:
+ self._allocate_ip_via_dhcp(network, iface_name)
+
+ def destroy_relay(self, network, device_name, relay_bridge):
+ self.driver.unplug(device_name, namespace=network.namespace,
+ bridge=relay_bridge)
+
+ def _allocate_static_ip(self, network, iface_name):
+ mgmt_net = self.conf.dhcp_relay_management_network
+ relay_ip = netaddr.IPAddress(getattr(network, MGMT_INTERFACE_IP_ATTR))
+ relay_net = netaddr.IPNetwork(mgmt_net)
+ relay_ip_cidr = '/'.join([str(relay_ip), str(relay_net.prefixlen)])
+ relay_iface = ip_lib.IPDevice(iface_name, self.root_helper)
+
+ LOG.info(_('Allocating static IP %(relay_ip)s for %(iface_name)s'),
+ {'relay_ip': relay_ip, 'iface_name': iface_name})
+
+ if network.namespace:
+ relay_iface.namespace = network.namespace
+
+ relay_iface.addr.add(
+ relay_ip.version, relay_ip_cidr, relay_net.broadcast,
+ scope='link')
+
+ def _allocate_ip_via_dhcp(self, network, iface_name):
+ dhcp_client_cmd = [self.conf.dhclient_path, iface_name]
+
+ LOG.info(_('Running DHCP client for %s interface'), iface_name)
+
+ if network.namespace:
+ ip_wrapper = ip_lib.IPWrapper(self.root_helper,
+ network.namespace)
+ ip_wrapper.netns.execute(dhcp_client_cmd)
+ else:
+ utils.execute(dhcp_client_cmd)
diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py
index a693cf1..bfb4951 100644
--- a/neutron/agent/linux/interface.py
+++ b/neutron/agent/linux/interface.py
@@ -65,6 +65,9 @@ OPTS = [
default='publicURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
+ cfg.IntOpt('interface_dev_name_len',
+ default=14,
+ help=_("Maximum interace name length")),
]
@@ -174,7 +177,8 @@ class LinuxInterfaceDriver(object):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
- return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
+ return ((self.DEV_NAME_PREFIX + port.id)
+ [:self.conf.interface_dev_name_len])
@abc.abstractmethod
def plug(self, network_id, port_id, device_name, mac_address,
diff --git a/neutron/common/config.py b/neutron/common/config.py
index cda8d05..9bd5eec 100644
--- a/neutron/common/config.py
+++ b/neutron/common/config.py
@@ -115,6 +115,9 @@ core_opts = [
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
+ cfg.StrOpt('ipam_driver',
+ default='neutron.ipam.drivers.neutron_ipam.NeutronIPAM',
+ help=_('IPAM driver'))
]
core_cli_opts = [
diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py
index 0a013d5..aaa3983 100644
--- a/neutron/db/agents_db.py
+++ b/neutron/db/agents_db.py
@@ -115,7 +115,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase):
conf = {}
return conf
- def _make_agent_dict(self, agent, fields=None):
+ def _make_agent_dict(self, agent, fields=None, context=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py
old mode 100644
new mode 100755
index 3df2fe5..f013d40
--- a/neutron/db/db_base_plugin_v2.py
+++ b/neutron/db/db_base_plugin_v2.py
@@ -38,7 +38,6 @@ from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
-
LOG = logging.getLogger(__name__)
# Ports with the following 'device_owner' values will not prevent
@@ -51,8 +50,8 @@ LOG = logging.getLogger(__name__)
AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP]
-class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
- common_db_mixin.CommonDbMixin):
+class NeutronCorePluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
+ common_db_mixin.CommonDbMixin):
"""V2 Neutron plugin interface implementation using SQLAlchemy models.
Whenever a non-read call happens the plugin will call an event handler
@@ -136,8 +135,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
if base_mac[3] != '00':
mac[3] = int(base_mac[3], 16)
mac_address = ':'.join(map(lambda x: "%02x" % x, mac))
- if NeutronDbPluginV2._check_unique_mac(context, network_id,
- mac_address):
+ if NeutronCorePluginV2._check_unique_mac(context, network_id,
+ mac_address):
LOG.debug(_("Generated mac for network %(network_id)s "
"is %(mac_address)s"),
{'network_id': network_id,
@@ -196,11 +195,11 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
@staticmethod
def _generate_ip(context, subnets):
try:
- return NeutronDbPluginV2._try_generate_ip(context, subnets)
+ return NeutronCorePluginV2._try_generate_ip(context, subnets)
except n_exc.IpAddressGenerationFailure:
- NeutronDbPluginV2._rebuild_availability_ranges(context, subnets)
+ NeutronCorePluginV2._rebuild_availability_ranges(context, subnets)
- return NeutronDbPluginV2._try_generate_ip(context, subnets)
+ return NeutronCorePluginV2._try_generate_ip(context, subnets)
@staticmethod
def _try_generate_ip(context, subnets):
@@ -431,9 +430,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
if 'ip_address' in fixed:
# Ensure that the IP's are unique
- if not NeutronDbPluginV2._check_unique_ip(context, network_id,
- subnet_id,
- fixed['ip_address']):
+ if not NeutronCorePluginV2._check_unique_ip(
+ context, network_id, subnet_id, fixed['ip_address']):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=fixed['ip_address'])
@@ -474,7 +472,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
for fixed in fixed_ips:
if 'ip_address' in fixed:
# Remove the IP address from the allocation pool
- NeutronDbPluginV2._allocate_specific_ip(
+ NeutronCorePluginV2._allocate_specific_ip(
context, fixed['subnet_id'], fixed['ip_address'])
ips.append({'ip_address': fixed['ip_address'],
'subnet_id': fixed['subnet_id']})
@@ -523,17 +521,17 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
device_owner)
for ip in original_ips:
LOG.debug(_("Port update. Hold %s"), ip)
- NeutronDbPluginV2._delete_ip_allocation(context,
- network_id,
- ip['subnet_id'],
- ip['ip_address'])
+ NeutronCorePluginV2._delete_ip_allocation(context,
+ network_id,
+ ip['subnet_id'],
+ ip['ip_address'])
if to_add:
LOG.debug(_("Port update. Adding %s"), to_add)
ips = self._allocate_fixed_ips(context, to_add, mac_address)
return ips, prev_ips
- def _allocate_ips_for_port(self, context, port):
+ def _allocate_ips_for_port(self, context, port, port_id):
"""Allocate IP addresses for the port.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
@@ -541,6 +539,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
+ p['id'] = port_id
ips = []
fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
@@ -583,7 +582,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
- result = NeutronDbPluginV2._generate_ip(context, subnets)
+ result = NeutronCorePluginV2._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
@@ -830,6 +829,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
'shared': network['shared'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
+
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
@@ -861,8 +861,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
return self._fields(res, fields)
- def _make_port_dict(self, port, fields=None,
- process_extensions=True):
+ def _make_port_dict(self, port, fields=None, process_extensions=True):
res = {"id": port["id"],
'name': port['name'],
"network_id": port["network_id"],
@@ -1098,55 +1097,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
self._validate_subnet(context, s)
- tenant_id = self._get_tenant_id_for_create(context, s)
- with context.session.begin(subtransactions=True):
- network = self._get_network(context, s["network_id"])
- self._validate_subnet_cidr(context, network, s['cidr'])
- # The 'shared' attribute for subnets is for internal plugin
- # use only. It is not exposed through the API
- args = {'tenant_id': tenant_id,
- 'id': s.get('id') or uuidutils.generate_uuid(),
- 'name': s['name'],
- 'network_id': s['network_id'],
- 'ip_version': s['ip_version'],
- 'cidr': s['cidr'],
- 'enable_dhcp': s['enable_dhcp'],
- 'gateway_ip': s['gateway_ip'],
- 'shared': network.shared}
- if s['ip_version'] == 6 and s['enable_dhcp']:
- if attributes.is_attr_set(s['ipv6_ra_mode']):
- args['ipv6_ra_mode'] = s['ipv6_ra_mode']
- if attributes.is_attr_set(s['ipv6_address_mode']):
- args['ipv6_address_mode'] = s['ipv6_address_mode']
- subnet = models_v2.Subnet(**args)
-
- context.session.add(subnet)
- if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED:
- for addr in s['dns_nameservers']:
- ns = models_v2.DNSNameServer(address=addr,
- subnet_id=subnet.id)
- context.session.add(ns)
-
- if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED:
- for rt in s['host_routes']:
- route = models_v2.SubnetRoute(
- subnet_id=subnet.id,
- destination=rt['destination'],
- nexthop=rt['nexthop'])
- context.session.add(route)
-
- for pool in s['allocation_pools']:
- ip_pool = models_v2.IPAllocationPool(subnet=subnet,
- first_ip=pool['start'],
- last_ip=pool['end'])
- context.session.add(ip_pool)
- ip_range = models_v2.IPAvailabilityRange(
- ipallocationpool=ip_pool,
- first_ip=pool['start'],
- last_ip=pool['end'])
- context.session.add(ip_range)
-
- return self._make_subnet_dict(subnet)
+ return s
def _update_subnet_dns_nameservers(self, context, id, s):
old_dns_list = self._get_dns_by_subnet(context, id)
@@ -1207,7 +1158,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
first_ip=p['start'], last_ip=p['end'],
subnet_id=id) for p in s['allocation_pools']]
context.session.add_all(new_pools)
- NeutronDbPluginV2._rebuild_availability_ranges(context, [s])
+ NeutronCorePluginV2._rebuild_availability_ranges(context, [s])
#Gather new pools for result:
result_pools = [{'start': pool['start'],
'end': pool['end']}
@@ -1232,6 +1183,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
s['ip_version'] = db_subnet.ip_version
s['cidr'] = db_subnet.cidr
s['id'] = db_subnet.id
+ s['network_id'] = db_subnet.network_id
+ s['tenant_id'] = db_subnet.tenant_id
self._validate_subnet(context, s, cur_subnet=db_subnet)
if 'gateway_ip' in s and s['gateway_ip'] is not None:
@@ -1283,7 +1236,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
for a in allocated:
if (is_auto_addr_subnet or
a.ports.device_owner in AUTO_DELETE_PORT_OWNERS):
- NeutronDbPluginV2._delete_ip_allocation(
+ NeutronCorePluginV2._delete_ip_allocation(
context, subnet.network_id, id, a.ip_address)
else:
raise n_exc.SubnetInUse(subnet_id=id)
@@ -1334,13 +1287,12 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
#Note(scollins) Add the generated mac_address to the port,
#since _allocate_ips_for_port will need the mac when
#calculating an EUI-64 address for a v6 subnet
- p['mac_address'] = NeutronDbPluginV2._generate_mac(context,
- network_id)
+ p['mac_address'] = NeutronCorePluginV2._generate_mac(
+ context, network_id)
else:
# Ensure that the mac on the network is unique
- if not NeutronDbPluginV2._check_unique_mac(context,
- network_id,
- p['mac_address']):
+ if not NeutronCorePluginV2._check_unique_mac(
+ context, network_id, p['mac_address']):
raise n_exc.MacAddressInUse(net_id=network_id,
mac=p['mac_address'])
@@ -1361,12 +1313,12 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
context.session.add(db_port)
# Update the IP's for the port
- ips = self._allocate_ips_for_port(context, port)
+ ips = self._allocate_ips_for_port(context, port, port_id)
if ips:
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
- NeutronDbPluginV2._store_ip_allocation(
+ NeutronCorePluginV2._store_ip_allocation(
context, ip_address, network_id, subnet_id, port_id)
return self._make_port_dict(db_port, process_extensions=False)
@@ -1406,7 +1358,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
# Update ips if necessary
for ip in added_ips:
- NeutronDbPluginV2._store_ip_allocation(
+ NeutronCorePluginV2._store_ip_allocation(
context, ip['ip_address'], port['network_id'],
ip['subnet_id'], port.id)
# Remove all attributes in p which are not in the port DB model
@@ -1530,3 +1482,210 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
device_id=device_id)
if tenant_id != router['tenant_id']:
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
+
+
+class NeutronIPAMPlugin(NeutronCorePluginV2):
+ @property
+ def ipam(self):
+ return manager.NeutronManager.get_ipam()
+
+ def _update_ips_for_port(self, context, network_id, port_id, original_ips,
+ new_ips, mac_address, device_owner):
+ """Add or remove IPs from the port."""
+ ips = []
+ # These ips are still on the port and haven't been removed
+ prev_ips = []
+
+ # the new_ips contain all of the fixed_ips that are to be updated
+ if len(new_ips) > cfg.CONF.max_fixed_ips_per_port:
+ msg = _('Exceeded maximim amount of fixed ips per port')
+ raise n_exc.InvalidInput(error_message=msg)
+
+ # Remove all of the intersecting elements
+ for original_ip in original_ips[:]:
+ for new_ip in new_ips[:]:
+ if ('ip_address' in new_ip and
+ original_ip['ip_address'] == new_ip['ip_address']):
+ original_ips.remove(original_ip)
+ new_ips.remove(new_ip)
+ prev_ips.append(original_ip)
+
+ # Check if the IP's to add are OK
+ to_add = self._test_fixed_ips_for_port(context, network_id,
+ new_ips, device_owner)
+
+ for ip in original_ips:
+ LOG.debug(_("Port update. Hold %s"), ip)
+ NeutronIPAMPlugin._delete_ip_allocation(context,
+ network_id,
+ ip['subnet_id'],
+ ip['ip_address'])
+ port = self._get_port(context, port_id)
+ self.ipam.deallocate_ip(context, port, ip)
+
+ if to_add:
+ LOG.debug(_("Port update. Adding %s"), to_add)
+ ips = self._allocate_fixed_ips(context, to_add, mac_address)
+
+ return ips, prev_ips
+
+ def _allocate_ips_for_port(self, context, port, port_id):
+ """Allocate IP addresses for the port.
+
+ If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
+ addresses for the port. If port['fixed_ips'] contains an IP address or
+ a subnet_id then allocate an IP address accordingly.
+ """
+ p = port['port']
+ p['id'] = port_id
+ ips = []
+
+ fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
+ if fixed_configured:
+ configured_ips = self._test_fixed_ips_for_port(context,
+ p["network_id"],
+ p['fixed_ips'],
+ p['device_owner'])
+
+ for ip in configured_ips:
+ ips.append(self.ipam.allocate_ip(context, p, ip=ip))
+ else:
+ filter = {'network_id': [p['network_id']]}
+ subnets = self.get_subnets(context, filters=filter)
+ # Split into v4 and v6 subnets
+ v4 = []
+ v6_stateful = []
+ v6_stateless = []
+ for subnet in subnets:
+ if subnet['ip_version'] == 4:
+ v4.append(subnet)
+ else:
+ if ipv6_utils.is_auto_address_subnet(subnet):
+ v6_stateless.append(subnet)
+ else:
+ v6_stateful.append(subnet)
+
+ for subnet in v6_stateless:
+ prefix = subnet['cidr']
+ ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
+ prefix, p['mac_address'])
+ if not self._check_unique_ip(
+ context, p['network_id'],
+ subnet['id'], ip_address.format()):
+ raise n_exc.IpAddressInUse(
+ net_id=p['network_id'],
+ ip_address=ip_address.format())
+ ips.append({'ip_address': ip_address.format(),
+ 'subnet_id': subnet['id']})
+
+ version_subnets = [v4, v6_stateful]
+ for subnets in version_subnets:
+ if subnets:
+ ip = None
+ for subnet in subnets:
+ ip = self.ipam.allocate_ip(
+ context, p, ip={'subnet_id': subnet['id']})
+ if ip:
+ ips.append(ip)
+ break
+ if not ip:
+ raise n_exc.IpAddressGenerationFailure(
+ net_id=subnet['network_id'])
+
+ return ips
+
+ def _update_subnet_dns_nameservers(self, context, id, s):
+ subnet, dhcp_changes = self.ipam.update_subnet(context, id, s)
+
+ result = self._make_subnet_dict(subnet)
+ # Keep up with fields that changed
+ if 'new_dns' in dhcp_changes:
+ result['dns_nameservers'] = dhcp_changes['new_dns']
+ if 'new_routes' in dhcp_changes:
+ result['host_routes'] = dhcp_changes['new_routes']
+ if "dns_nameservers" in s:
+ del s["dns_nameservers"]
+ return dhcp_changes['new_dns']
+
+ def create_subnet(self, context, subnet):
+ s = super(NeutronIPAMPlugin, self).create_subnet(context, subnet)
+ with context.session.begin(subtransactions=True):
+ network = self._get_network(context, s["network_id"])
+ self._validate_subnet_cidr(context, network, s['cidr'])
+ subnet = self.ipam.create_subnet(context, s)
+ return subnet
+
+ def delete_subnet(self, context, id):
+ with context.session.begin(subtransactions=True):
+ subnet = self._get_subnet(context, id)
+ self.ipam.delete_subnet(context, subnet)
+ super(NeutronIPAMPlugin, self).delete_subnet(context, id)
+
+ def create_network(self, context, network):
+ net = super(NeutronIPAMPlugin, self).create_network(context,
+ network)
+ n = network['network']
+ n['id'] = net['id']
+ self.ipam.create_network(context, n)
+
+ return net
+
+ def get_networks(self, context, filters=None, fields=None,
+ sorts=None, limit=None, marker=None,
+ page_reverse=False):
+ nets = super(NeutronIPAMPlugin, self).get_networks(
+ context, filters, fields, sorts, limit, marker, page_reverse)
+
+ for net in nets:
+ if 'id' in net:
+ net.update(self.ipam.get_additional_network_dict_params(
+ context, net['id']))
+
+ return nets
+
+ def delete_network(self, context, id):
+ with context.session.begin(subtransactions=True):
+ network = self._get_network(context, id)
+ self.ipam.delete_network(context, network)
+ super(NeutronIPAMPlugin, self).delete_network(context, id)
+
+ def update_network(self, context, id, network):
+ n = super(NeutronIPAMPlugin, self).update_network(context,
+ id, network)
+
+ subnets = self._get_subnets_by_network(context, id)
+ for subnet in subnets:
+ self.ipam.update_subnet(context, subnet['id'], subnet)
+
+ return n
+
+ def create_port(self, context, port):
+ port_dict = super(NeutronIPAMPlugin, self).create_port(context,
+ port)
+ self.ipam.create_port(context, port_dict)
+ return port_dict
+
+ def _delete_port(self, context, id):
+ query = (context.session.query(models_v2.Port).
+ enable_eagerloads(False).filter_by(id=id))
+ if not context.is_admin:
+ query = query.filter_by(tenant_id=context.tenant_id)
+
+ port = query.with_lockmode('update').one()
+ self.ipam.delete_port(context, port)
+
+ allocated_qry = context.session.query(
+ models_v2.IPAllocation).with_lockmode('update')
+ # recycle all of the IP's
+ allocated = allocated_qry.filter_by(port_id=id)
+ host = dict(name=(port.get('id') or uuidutils.generate_uuid()),
+ mac_address=port['mac_address'])
+ for a in allocated:
+ ip = dict(subnet_id=a['subnet_id'],
+ ip_address=a['ip_address'])
+ self.ipam.deallocate_ip(context, host, ip)
+
+ query.delete()
+
+
+NeutronDbPluginV2 = NeutronIPAMPlugin
diff --git a/neutron/db/infoblox/__init__.py b/neutron/db/infoblox/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/db/infoblox/infoblox_db.py b/neutron/db/infoblox/infoblox_db.py
new file mode 100755
index 0000000..b3b4d81
--- /dev/null
+++ b/neutron/db/infoblox/infoblox_db.py
@@ -0,0 +1,294 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from sqlalchemy.orm import exc
+
+import functools
+import time
+
+from oslo.config import cfg
+from neutron.db import external_net_db
+from neutron.db.infoblox import models
+from neutron.db import l3_db
+from neutron.db import models_v2
+from neutron.openstack.common import log as logging
+#from neutron.openstack.common.db import exception as db_exc
+from oslo.db import exception as db_exc
+
+
+OPTS = [
+ cfg.IntOpt('infoblox_db_retry_interval',
+ default=5,
+ help=_('Seconds between db connection retries')),
+ cfg.IntOpt('infoblox_db_max_try',
+ default=20,
+ help=_('Maximum retries before error is raised. '
+ '(setting -1 implies an infinite retry count)')),
+ cfg.BoolOpt('infoblox_db_inc_retry_interval',
+ default=True,
+ help=_('Whether to increase interval between retries, '
+ 'up to infoblox_db_max_retry_interval')),
+ cfg.IntOpt('infoblox_db_max_retry_interval',
+ default=600,
+ help='Max seconds for total retries, if '
+ 'infoblox_db_inc_retry_interval is enabled')
+]
+
+cfg.CONF.register_opts(OPTS)
+
+LOG = logging.getLogger(__name__)
+
+
+def _retry_db_error(func):
+ @functools.wraps(func)
+ def callee(*args, **kwargs):
+ next_interval = cfg.CONF.infoblox_db_retry_interval
+ remaining = cfg.CONF.infoblox_db_max_try
+ while True:
+ try:
+ return func(*args, **kwargs)
+ except Exception as e:
+ LOG.warn(_("DB error on %s: %s"
+ % (func, str(e))))
+ if remaining == 0:
+ LOG.exception(_('DB exceeded retry limit.'))
+ raise db_exc.DBError(e)
+ if remaining != -1:
+ remaining -= 1
+ time.sleep(next_interval)
+ if cfg.CONF.infoblox_db_inc_retry_interval:
+ next_interval = min(
+ next_interval * 2,
+ cfg.CONF.infoblox_db_max_retry_interval
+ )
+ return callee
+
+
+def get_used_members(context, member_type):
+ """Returns used members where map_id is not null."""
+ q = context.session.query(models.InfobloxMemberMap)
+ members = q.filter(models.InfobloxMemberMap.member_type ==
+ member_type).\
+ filter(models.InfobloxMemberMap.map_id.isnot(None)).\
+ distinct()
+ return members
+
+
+def get_registered_members(context, member_type):
+ """Returns registered members."""
+ q = context.session.query(models.InfobloxMemberMap)
+ members = q.filter_by(member_type=member_type).distinct()
+ return members
+
+
+@_retry_db_error
+def get_available_member(context, member_type):
+ """Returns available members."""
+ q = context.session.query(models.InfobloxMemberMap)
+ q = q.filter(models.InfobloxMemberMap.member_type == member_type)
+ member = q.filter(models.InfobloxMemberMap.map_id.is_(None)).\
+ with_lockmode("update").\
+ first()
+ return member
+
+
+def get_members(context, map_id, member_type):
+ """Returns members used by currently used mapping (tenant id,
+ network id or Infoblox netview name).
+ """
+ q = context.session.query(models.InfobloxMemberMap)
+ members = q.filter_by(map_id=map_id, member_type=member_type).all()
+ return members
+
+
+def register_member(context, map_id, member_name, member_type):
+ if map_id:
+ context.session.add(
+ models.InfobloxMemberMap(map_id=map_id,
+ member_name=member_name,
+ member_type=member_type))
+ else:
+ context.session.add(
+ models.InfobloxMemberMap(member_name=member_name,
+ member_type=member_type))
+
+
+def attach_member(context, map_id, member_name, member_type):
+ context.session.query(models.InfobloxMemberMap.member_name).\
+ filter_by(member_name=member_name, member_type=member_type).\
+ update({'map_id': map_id})
+
+
+def delete_members(context, map_id):
+ with context.session.begin(subtransactions=True):
+ context.session.query(
+ models.InfobloxMemberMap).filter_by(map_id=map_id).delete()
+
+
+@_retry_db_error
+def release_member(context, map_id):
+ context.session.query(models.InfobloxMemberMap.member_name).\
+ filter_by(map_id=map_id).update({'map_id': None})
+
+
+def is_last_subnet(context, subnet_id):
+ q = context.session.query(models_v2.Subnet)
+ return q.filter(models_v2.Subnet.id != subnet_id).count() == 0
+
+
+def is_last_subnet_in_network(context, subnet_id, network_id):
+ q = context.session.query(models_v2.Subnet)
+ return q.filter(models_v2.Subnet.id != subnet_id,
+ models_v2.Subnet.network_id == network_id).count() == 0
+
+
+def is_last_subnet_in_tenant(context, subnet_id, tenant_id):
+ q = context.session.query(models_v2.Subnet)
+ return q.filter(models_v2.Subnet.id != subnet_id,
+ models_v2.Subnet.tenant_id == tenant_id).count() == 0
+
+
+def is_last_subnet_in_private_networks(context, subnet_id):
+ sub_qry = context.session.query(
+ external_net_db.ExternalNetwork.network_id)
+ q = context.session.query(models_v2.Subnet.id)
+ q = q.filter(models_v2.Subnet.id != subnet_id)
+ q = q.filter(~models_v2.Subnet.network_id.in_(sub_qry))
+ return q.count() == 0
+
+
+def is_network_external(context, network_id):
+ q = context.session.query(external_net_db.ExternalNetwork)
+ return q.filter_by(network_id=network_id).count() > 0
+
+
+def delete_ip_allocation(context, network_id, subnet, ip_address):
+ # Delete the IP address from the IPAllocate table
+ subnet_id = subnet['id']
+ LOG.debug(_("Delete allocated IP %(ip_address)s "
+ "(%(network_id)s/%(subnet_id)s)"), locals())
+ alloc_qry = context.session.query(
+ models_v2.IPAllocation).with_lockmode('update')
+ alloc_qry.filter_by(network_id=network_id,
+ ip_address=ip_address,
+ subnet_id=subnet_id).delete()
+
+
+def get_subnets_by_network(context, network_id):
+ subnet_qry = context.session.query(models_v2.Subnet)
+ return subnet_qry.filter_by(network_id=network_id).all()
+
+
+def get_subnets_by_port(context, port_id):
+ allocs = (context.session.query(models_v2.IPAllocation).
+ join(models_v2.Port).
+ filter_by(id=port_id)
+ .all())
+ subnets = []
+ subnet_qry = context.session.query(models_v2.Subnet)
+ for allocation in allocs:
+ subnets.append(subnet_qry.
+ filter_by(id=allocation.subnet_id).
+ first())
+ return subnets
+
+
+def get_port_by_id(context, port_id):
+ query = context.session.query(models_v2.Port)
+ return query.filter_by(id=port_id).one()
+
+
+def get_network_name(context, subnet):
+ q = context.session.query(models_v2.Network)
+ net_name = q.join(models_v2.Subnet).filter(
+ models_v2.Subnet.id == subnet['id']).first()
+ if net_name:
+ return net_name.name
+ return None
+
+
+def get_instance_id_by_floating_ip(context, floating_ip_id):
+ query = context.session.query(l3_db.FloatingIP, models_v2.Port)
+ query = query.filter(l3_db.FloatingIP.id == floating_ip_id)
+ query = query.filter(models_v2.Port.id == l3_db.FloatingIP.fixed_port_id)
+ result = query.first()
+ if result:
+ return result.Port.device_id
+ return None
+
+
+def get_subnet_dhcp_port_address(context, subnet_id):
+ dhcp_port = (context.session.query(models_v2.IPAllocation).
+ filter_by(subnet_id=subnet_id).
+ join(models_v2.Port).
+ filter_by(device_owner='network:dhcp')
+ .first())
+ if dhcp_port:
+ return dhcp_port.ip_address
+ return None
+
+
+def get_network_view(context, network_id):
+ query = context.session.query(models.InfobloxNetViews)
+ net_view = query.filter_by(network_id=network_id).first()
+ if net_view:
+ return net_view.network_view
+ return None
+
+
+def set_network_view(context, network_view, network_id):
+ ib_net_view = models.InfobloxNetViews(network_id=network_id,
+ network_view=network_view)
+
+ # there should be only one NIOS network view per Openstack network
+ query = context.session.query(models.InfobloxNetViews)
+ obj = query.filter_by(network_id=network_id).first()
+ if not obj:
+ context.session.add(ib_net_view)
+
+
+def add_management_ip(context, network_id, fixed_address):
+ context.session.add(models.InfobloxManagementNetIps(
+ network_id=network_id,
+ ip_address=fixed_address.ip,
+ fixed_address_ref=fixed_address.ref))
+
+
+def delete_management_ip(context, network_id):
+ query = context.session.query(models.InfobloxManagementNetIps)
+ query.filter_by(network_id=network_id).delete()
+
+
+def get_management_ip_ref(context, network_id):
+ query = context.session.query(models.InfobloxManagementNetIps)
+ mgmt_ip = query.filter_by(network_id=network_id).first()
+ return mgmt_ip.fixed_address_ref if mgmt_ip else None
+
+
+def get_management_net_ip(context, network_id):
+ query = context.session.query(models.InfobloxManagementNetIps)
+ mgmt_ip = query.filter_by(network_id=network_id).first()
+ return mgmt_ip.ip_address if mgmt_ip else None
+
+
+def get_network(context, network_id):
+ network_qry = context.session.query(models_v2.Network)
+ return network_qry.filter_by(id=network_id).one()
+
+
+def get_subnet(context, subnet_id):
+ subnet_qry = context.session.query(models_v2.Subnet)
+ return subnet_qry.filter_by(id=subnet_id).one()
diff --git a/neutron/db/infoblox/models.py b/neutron/db/infoblox/models.py
new file mode 100755
index 0000000..2f2bd5e
--- /dev/null
+++ b/neutron/db/infoblox/models.py
@@ -0,0 +1,82 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sa
+
+from neutron.db import model_base
+from neutron.db import models_v2
+
+
+DHCP_MEMBER_TYPE = 'dhcp'
+DNS_MEMBER_TYPE = 'dns'
+
+
+class InfobloxDNSMember(model_base.BASEV2, models_v2.HasId):
+ __tablename__ = 'infoblox_dns_members'
+
+ network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id',
+ ondelete="CASCADE"))
+ server_ip = sa.Column(sa.String(40))
+ server_ipv6 = sa.Column(sa.String(40))
+
+
+class InfobloxDHCPMember(model_base.BASEV2, models_v2.HasId):
+ __tablename__ = 'infoblox_dhcp_members'
+
+ network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id',
+ ondelete="CASCADE"))
+ server_ip = sa.Column(sa.String(40))
+ server_ipv6 = sa.Column(sa.String(40))
+
+
+class InfobloxMemberMap(model_base.BASEV2):
+ """Maps Neutron object to Infoblox member.
+
+ map_id may point to Network ID, Tenant ID or Infoblox network view name
+ depending on configuration. Infoblox member names are unique.
+ """
+ __tablename__ = 'infoblox_member_maps'
+
+ member_name = sa.Column(sa.String(255), nullable=False, primary_key=True)
+ map_id = sa.Column(sa.String(255), nullable=True)
+ member_type = sa.Column(sa.String(10))
+
+
+class InfobloxNetViews(model_base.BASEV2):
+ """Connects Infoblox network views with Openstack networks.
+ This is needed to properly delete network views in NIOS on network
+ delete
+ """
+
+ __tablename__ = 'infoblox_net_views'
+
+ network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id",
+ ondelete="CASCADE"),
+ nullable=False, primary_key=True)
+ network_view = sa.Column(sa.String(56))
+
+
+class InfobloxManagementNetIps(model_base.BASEV2):
+ """Holds IP addresses allocated on management network for DHCP relay
+ interface
+ """
+
+ __tablename__ = 'infoblox_mgmt_net_ips'
+
+ network_id = sa.Column(sa.String(length=255), primary_key=True)
+ ip_address = sa.Column(sa.String(length=64), nullable=False)
+ fixed_address_ref = sa.Column(sa.String(length=255), nullable=False)
diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py
index 2c915cd..2e037af 100644
--- a/neutron/db/l3_db.py
+++ b/neutron/db/l3_db.py
@@ -458,6 +458,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
subnet['id'],
subnet['cidr'])
port.update({'device_id': router.id, 'device_owner': owner})
+ ipam = manager.NeutronManager.get_ipam()
+ ipam.update_port(context, port)
return port
def _add_interface_by_subnet(self, context, router, subnet_id, owner):
@@ -810,9 +812,12 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
fip['tenant_id'] = floatingip_db['tenant_id']
fip['id'] = id
fip_port_id = floatingip_db['floating_port_id']
- self._update_fip_assoc(context, fip, floatingip_db,
- self._core_plugin.get_port(
- context.elevated(), fip_port_id))
+ port = self._core_plugin.get_port(context.elevated(),
+ fip_port_id)
+ self._update_fip_assoc(context, fip, floatingip_db, port)
+ ipam = manager.NeutronManager.get_ipam()
+ ipam.associate_floatingip(context, floatingip, port)
+
return old_floatingip, self._make_floatingip_dict(floatingip_db)
def _floatingips_to_router_ids(self, floatingips):
@@ -920,6 +925,10 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
floating_ips = fip_qry.filter_by(fixed_port_id=port_id)
for floating_ip in floating_ips:
router_ids.add(floating_ip['router_id'])
+
+ ipam = manager.NeutronManager.get_ipam()
+ ipam.disassociate_floatingip(context, floating_ip, port_id)
+
floating_ip.update({'fixed_port_id': None,
'fixed_ip_address': None,
'router_id': None})
diff --git a/neutron/db/migration/alembic_migrations/versions/172ace2194db_infoblox_net_view_to_network_id.py b/neutron/db/migration/alembic_migrations/versions/172ace2194db_infoblox_net_view_to_network_id.py
new file mode 100644
index 0000000..76c37c1
--- /dev/null
+++ b/neutron/db/migration/alembic_migrations/versions/172ace2194db_infoblox_net_view_to_network_id.py
@@ -0,0 +1,48 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Infoblox net view to network ID
+
+Revision ID: 172ace2194db
+Revises: d9841b33bd
+Create Date: 2014-09-09 11:03:23.737412
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '172ace2194db'
+down_revision = 'd9841b33bd'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = ['*']
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(active_plugins=None, options=None):
+ op.create_table('infoblox_net_views',
+ sa.Column('network_id',
+ sa.String(36),
+ sa.ForeignKey("networks.id",
+ ondelete="CASCADE"),
+ nullable=False,
+ primary_key=True),
+ sa.Column('network_view', sa.String(56)))
+
+
+def downgrade(active_plugins=None, options=None):
+ op.drop_table('infoblox_net_views')
diff --git a/neutron/db/migration/alembic_migrations/versions/256b90dd9824_multiple_dhcp_members.py b/neutron/db/migration/alembic_migrations/versions/256b90dd9824_multiple_dhcp_members.py
new file mode 100644
index 0000000..3445cc8
--- /dev/null
+++ b/neutron/db/migration/alembic_migrations/versions/256b90dd9824_multiple_dhcp_members.py
@@ -0,0 +1,62 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Multiple DHCP members
+
+Revision ID: 256b90dd9824
+Revises: 172ace2194db
+Create Date: 2014-09-15 05:54:38.612277
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '256b90dd9824'
+down_revision = '172ace2194db'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = [
+ '*'
+]
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(active_plugins=None, options=None):
+ op.create_table(
+ 'infoblox_dhcp_members',
+ sa.Column('id', sa.String(length=36), nullable=False),
+ sa.Column('network_id', sa.String(length=36), nullable=False),
+ sa.Column('server_ip', sa.String(length=40), nullable=False),
+ sa.Column('server_ipv6', sa.String(length=40), nullable=True),
+ sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'))
+
+ op.create_table(
+ 'infoblox_dns_members',
+ sa.Column('id', sa.String(length=36), nullable=False),
+ sa.Column('network_id', sa.String(length=36), nullable=False),
+ sa.Column('server_ip', sa.String(length=40), nullable=False),
+ sa.Column('server_ipv6', sa.String(length=40), nullable=True),
+ sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id'))
+
+
+def downgrade(active_plugins=None, options=None):
+ op.drop_table('infoblox_dhcp_members')
+ op.drop_table('infoblox_dns_members')
diff --git a/neutron/db/migration/alembic_migrations/versions/33150f5993b6_infoblox_management_net_handling.py b/neutron/db/migration/alembic_migrations/versions/33150f5993b6_infoblox_management_net_handling.py
new file mode 100644
index 0000000..e3ef4ae
--- /dev/null
+++ b/neutron/db/migration/alembic_migrations/versions/33150f5993b6_infoblox_management_net_handling.py
@@ -0,0 +1,58 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Infoblox management net handling
+
+Currently DHCP relay is using dynamic DHCP to get the management IP address.
+This is not desirable from customer point of view as they do want a static IP
+assignment for such cases which will reduce any potential issue.
+
+Revision ID: 33150f5993b6
+Revises: 256b90dd9824
+Create Date: 2014-08-28 14:40:20.585390
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '33150f5993b6'
+down_revision = '256b90dd9824'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = [
+ '*'
+]
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(active_plugins=None, options=None):
+ op.create_table('infoblox_mgmt_net_ips',
+ sa.Column('network_id',
+ sa.String(length=255),
+ primary_key=True),
+ sa.Column('ip_address',
+ sa.String(length=64),
+ nullable=False),
+ sa.Column('fixed_address_ref',
+ sa.String(length=255),
+ nullable=False))
+
+
+def downgrade(active_plugins=None, options=None):
+ op.drop_table('infoblox_mgmt_net_ips')
diff --git a/neutron/db/migration/alembic_migrations/versions/78d27e9172_infoblox_db_v2.py b/neutron/db/migration/alembic_migrations/versions/78d27e9172_infoblox_db_v2.py
new file mode 100755
index 0000000..4fcf87b
--- /dev/null
+++ b/neutron/db/migration/alembic_migrations/versions/78d27e9172_infoblox_db_v2.py
@@ -0,0 +1,46 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Infoblox DB v2
+
+Revision ID: 78d27e9172
+Revises: 45284803ad19
+Create Date: 2014-05-28 11:11:34.815843
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '78d27e9172'
+down_revision = 'juno'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = ['*']
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(active_plugins=None, options=None):
+ op.create_table(
+ 'infoblox_member_maps',
+ sa.Column('member_name', sa.String(255), nullable=False),
+ sa.Column('map_id', sa.String(255), nullable=True))
+
+
+def downgrade(active_plugins=None, options=None):
+ op.drop_table('infoblox_member_maps')
diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD
index 7a30775..a27236f 100644
--- a/neutron/db/migration/alembic_migrations/versions/HEAD
+++ b/neutron/db/migration/alembic_migrations/versions/HEAD
@@ -1 +1 @@
-juno
+172ace2194db
diff --git a/neutron/db/migration/alembic_migrations/versions/d9841b33bd_add_infoblox_member_type.py b/neutron/db/migration/alembic_migrations/versions/d9841b33bd_add_infoblox_member_type.py
new file mode 100644
index 0000000..de2b39b
--- /dev/null
+++ b/neutron/db/migration/alembic_migrations/versions/d9841b33bd_add_infoblox_member_type.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Add Infoblox member type
+
+Revision ID: d9841b33bd
+Revises: 78d27e9172
+Create Date: 2014-06-23 18:25:15.557835
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'd9841b33bd'
+down_revision = '78d27e9172'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = ['*']
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(active_plugins=None, options=None):
+ op.add_column('infoblox_member_maps',
+ sa.Column('member_type', sa.String(10)))
+ op.execute("UPDATE infoblox_member_maps SET member_type='dhcp'")
+
+
+def downgrade(active_plugins=None, options=None):
+ op.drop_column('infoblox_member_maps', 'member_type')
diff --git a/neutron/ipam/__init__.py b/neutron/ipam/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/ipam/base.py b/neutron/ipam/base.py
new file mode 100644
index 0000000..2109c46
--- /dev/null
+++ b/neutron/ipam/base.py
@@ -0,0 +1,357 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+import six
+
+from neutron.ipam.drivers import neutron_db
+from neutron.openstack.common import log as logging
+
+# Ports with the following 'device_owner' values will not prevent
+# network deletion. If delete_network() finds that all ports on a
+# network have these owners, it will explicitly delete each port
+# and allow network deletion to continue. Similarly, if delete_subnet()
+# finds out that all existing IP Allocations are associated with ports
+# with these owners, it will allow subnet deletion to proceed with the
+# IP allocations being cleaned up by cascade.
+AUTO_DELETE_PORT_OWNERS = ['network:dhcp']
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class DHCPController(neutron_db.NeutronPluginController):
+ """Base class for IPAM DHCP controller. Incapsulates logic for handling
+ DHCP service related actions.
+ """
+
+ @abc.abstractmethod
+ def configure_dhcp(self, context, backend_subnet, dhcp_params):
+ """Implement this if you need extra actions to be taken on DHCP server
+ during subnet creation.
+ :param backend_subnet: models_v2.Subnet object, represents a subnet
+ being created
+ :param dhcp_params: dict with DHCP arguments, such as dns_nameservers,
+ and host_routes
+ """
+ pass
+
+ @abc.abstractmethod
+ def reconfigure_dhcp(self, context, backend_subnet, dhcp_params):
+ """This is called on subnet update. Implement if DHCP needs to be
+ reconfigured on subnet change
+ :param backend_subnet: models_v2.Subnet object being updated
+ :param dhcp_params: dict with DHCP parameters, such as DNS nameservers,
+ and host routes
+ """
+ pass
+
+ @abc.abstractmethod
+ def disable_dhcp(self, context, backend_subnet):
+ """This is called on subnet delete. Implement if DHCP service needs to
+ be disabled for a given subnet.
+ :param backend_subnet: models_v2.Subnet object being deleted
+ """
+ pass
+
+ @abc.abstractmethod
+ def dhcp_is_enabled(self, context, backend_subnet):
+ """Returns True if DHDC service is enabled for a subnet, False
+ otherwise
+ :param backend_subnet: models_v2.Subnet object
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_dhcp_ranges(self, context, backend_subnet):
+ """Returns DHCP range for a subnet
+ :param backend_subnet: models_v2.Subnet object
+ """
+ pass
+
+ @abc.abstractmethod
+ def bind_mac(self, context, backend_subnet, ip_address, mac_address):
+ """Binds IP address with MAC.
+ :param backend_subnet: models_v2.Subnet object
+ :param ip_address: IP address to be bound
+ :param mac_address: MAC address to be bound
+ """
+ pass
+
+ @abc.abstractmethod
+ def unbind_mac(self, context, backend_subnet, ip_address):
+ """Inverse action for bind_mac.
+ :param backend_subnet: models_v2.Subnet object;
+ :param ip_address: IP address to be unbound
+ """
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class DNSController(neutron_db.NeutronPluginController):
+ """Incapsulates DNS related logic"""
+
+ @abc.abstractmethod
+ def bind_names(self, context, backend_port):
+ """Associate domain name with IP address for a given port
+ :param backend_port: models_v2.Port object
+ """
+ pass
+
+ @abc.abstractmethod
+ def unbind_names(self, context, backend_port):
+ """Disassociate domain name from a given port
+ :param backend_port: models_v2.Port object
+ """
+ pass
+
+ @abc.abstractmethod
+ def create_dns_zones(self, context, backend_subnet):
+ """Creates domain name space for a given subnet. This is called on
+ subnet creation.
+ :param backend_subnet: models_v2.Subnet object
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_dns_zones(self, context, backend_subnet):
+ """Deletes domain name space associated with a subnet. Called on
+ delete subnet.
+ :param backend_subnet: models_v2.Subnet object
+ """
+ pass
+
+ @abc.abstractmethod
+ def disassociate_floatingip(self, context, floatingip, port_id):
+ """Called when floating IP gets disassociated from port
+ :param floatingip: l3_db.FloatingIP object to be disassociated
+ :param port_id: UUID of a port being disassociated
+ """
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class IPAMController(neutron_db.NeutronPluginController):
+ """IP address management controller. Operates with higher-level entities
+ like networks, subnets and ports
+ """
+
+ @abc.abstractmethod
+ def create_subnet(self, context, subnet):
+ """Creates allocation pools and IP ranges for a subnet.
+
+ :param subnet: user-supplied subnet
+ :return models_v2.Subnet object.
+ """
+ pass
+
+ @abc.abstractmethod
+ def update_subnet(self, context, subnet_id, subnet):
+ """Called on subnet update.
+ :param subnet_id: ID of a subnet being updated
+ :param subnet: user-supplied subnet object (dict)
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_subnet(self, context, subnet):
+ """Called on subnet delete. Remove all the higher-level objects
+ associated with a subnet
+ :param subnet: user-supplied subnet object (dict)
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_subnets(self, context, filters=None, fields=None,
+ sorts=None, limit=None, marker=None,
+ page_reverse=False):
+ pass
+
+ @abc.abstractmethod
+ def force_off_ports(self, context, ports):
+ """Disable ports on subnet delete event
+ :param ports: list of models_v2.Port objects to be disabled
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_subnet_by_id(self, context, subnet_id):
+ """Returns subnet by UUID
+ :param subnet_id: UUID of a subnet
+ """
+ pass
+
+ @abc.abstractmethod
+ def allocate_ip(self, context, backend_subnet, host, ip=None):
+ """Allocates IP address based either on a subnet's IP range or an IP
+ address provided as an argument
+ :param backend_subnet: models_v2.Subnet object
+ :param host: port which needs IP generated
+ :param ip: IP address to be allocated for a port/host. If not set, IP
+ address will be generated from subnet range
+ :returns: IP address allocated
+ """
+ pass
+
+ @abc.abstractmethod
+ def deallocate_ip(self, context, backend_subnet, host, ip):
+ """Frees IP allocation for a given address
+ :param backend_subnet: models_v2.Subnet object
+ :param host: host/port which has IP allocated
+ :param ip: IP address to be revoked
+ """
+ pass
+
+ @abc.abstractmethod
+ def create_network(self, context, network):
+ """Creates network in the database
+ :param network: user-supplied network object (dict)
+ :returns: models_v2.Network object
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_network(self, context, network_id):
+ """Deletes network from the database
+ :param network_id: UUID of a network to be deleted
+ """
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class IPAMManager(object):
+ """IPAM subsystem manager class which controls IPAM by calling DCHP, DNS
+ and IPAM controller methods
+ """
+
+ @abc.abstractmethod
+ def create_subnet(self, context, subnet):
+ """Called on subnet create event
+ :param subnet: user-supplied subnet object (dict)
+ :returns: models_v2.Subnet object being created
+ """
+ pass
+
+ @abc.abstractmethod
+ def update_subnet(self, context, id, subnet):
+ """Called on subnet update event
+ :param id: UUID of a subnet being updated
+ :param subnet: user-supplied subnet object (dict)
+ :returns: updated subnet
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_subnet(self, context, subnet_id):
+ """Called on delete subnet event
+ :param subnet_id: UUID of a subnet to be deleted
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_subnets_by_network(self, context, network_id):
+ pass
+
+ @abc.abstractmethod
+ def get_subnet_by_id(self, context, subnet_id):
+ pass
+
+ @abc.abstractmethod
+ def allocate_ip(self, context, host, ip):
+ """Called on port create event. Incapsulates logic associated with IP
+ allocation process.
+ :param host: host/port which needs IP to be allocated
+ :param ip: IP address for a port
+ """
+ pass
+
+ @abc.abstractmethod
+ def deallocate_ip(self, context, host, ip):
+ """Revoke IP allocated previously
+ :param host: host/port to have IP address deallocated
+ :param ip: IP address to revoke
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_subnets(self, context, filters=None, fields=None,
+ sorts=None, limit=None, marker=None,
+ page_reverse=False):
+ pass
+
+ @abc.abstractmethod
+ def create_network(self, context, network):
+ """Called on network create event
+ :param network: user-supplied network object (dict)
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_network(self, context, network_id):
+ """Called on delete network event
+ :param network_id: UUID of network to be deleted
+ """
+ pass
+
+ @abc.abstractmethod
+ def create_port(self, context, port):
+ """Called on port create event
+ :param port: user-supplied port dict
+ """
+ pass
+
+ @abc.abstractmethod
+ def update_port(self, context, port):
+ """Called on port update event
+ :param port: user-supplied port dict
+ """
+ pass
+
+ @abc.abstractmethod
+ def delete_port(self, context, port):
+ """Called on port delete event
+ :param port: user-supplied port dict
+ """
+ pass
+
+ @abc.abstractmethod
+ def associate_floatingip(self, context, floatingip, port):
+ """Called on floating IP being associated with a port
+ :param floatingip: l3_db.FloatingIP object
+ :param port: models_v2.Port to be associated with floating IP
+ """
+ pass
+
+ @abc.abstractmethod
+ def disassociate_floatingip(self, context, floatingip, port_id):
+ """Inverse of associate floating IP. Removes relationship between
+ floating IP and a port
+ :param floatingip: l3_db.FloatingIP object to be disassociated from
+ port
+ :param port_id: port UUID to be disassociated from floating IP
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_additional_network_dict_params(self, ctx, network_id):
+ """Returns a dict of extra arguments for a network. Place your
+ implementation if neutron agent(s) require extra information to
+ provision DHCP/DNS properly
+ :param network_id: UUID of a network to have extra arguments
+ """
+ pass
diff --git a/neutron/ipam/drivers/__init__.py b/neutron/ipam/drivers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/ipam/drivers/infoblox/__init__.py b/neutron/ipam/drivers/infoblox/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/ipam/drivers/infoblox/config.py b/neutron/ipam/drivers/infoblox/config.py
new file mode 100755
index 0000000..bba27cb
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/config.py
@@ -0,0 +1,617 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import io
+import logging
+import operator
+
+from oslo.config import cfg as neutron_conf
+
+from neutron.db.infoblox import infoblox_db as ib_db
+from neutron.db.infoblox import models as ib_models
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.ipam.drivers.infoblox import nova_manager
+from neutron.ipam.drivers.infoblox import objects
+from neutron.openstack.common import jsonutils
+
+LOG = logging.getLogger(__name__)
+OPTS = [
+ neutron_conf.StrOpt('conditional_config', default=None,
+ help=_("Infoblox conditional config path")),
+ neutron_conf.StrOpt('infoblox_members_config', default=None,
+ help=_("Path to infoblox members config file."))
+]
+
+neutron_conf.CONF.register_opts(OPTS)
+
+
+class ConfigFinder(object):
+ """
+ _variable_conditions: contains tenant_id or subnet_range "condition"
+ _static_conditions: contains global or tenant "condition"
+ _assigned_members: contains dhcp members to be registered in db
+ with its mapping id as network view
+ """
+ VALID_STATIC_CONDITIONS = ['global', 'tenant']
+ VALID_VARIABLE_CONDITIONS = ['tenant_id:', 'subnet_range:']
+ VALID_CONDITIONS = VALID_STATIC_CONDITIONS + VALID_VARIABLE_CONDITIONS
+
+ def __init__(self, stream=None, member_manager=None):
+ """Reads config from `io.IOBase`:stream:. Config is JSON format."""
+ self._member_manager = member_manager
+ self._variable_conditions = []
+ self._static_conditions = []
+ self._assigned_members = dict()
+ self._is_member_registered = False
+
+ if not member_manager:
+ self._member_manager = MemberManager()
+ if not stream:
+ config_file = neutron_conf.CONF.conditional_config
+ if not config_file:
+ raise exceptions.InfobloxConfigException(
+ msg="Config not found")
+ stream = io.FileIO(config_file)
+
+ with stream:
+ try:
+ self._conf = jsonutils.loads(stream.read())
+ self._read_conditions()
+ except ValueError as e:
+ raise exceptions.InfobloxConfigException(msg=e)
+
+ def configure_members(self, context):
+ # Do this only once after neutron server is restarted
+ if self._is_member_registered:
+ return
+
+ reg_members = self._member_manager.get_registered_members(
+ context)
+
+ # 1. register unregistered members
+ # -------------------------------------------------------
+ reg_member_names = []
+ # if never been registered
+ if len(reg_members) > 0:
+ reg_member_names = map(operator.attrgetter('name'),
+ reg_members)
+ reg_member_name_set = set(reg_member_names)
+ conf_member_name_set = set(
+ map(operator.attrgetter('name'),
+ self._member_manager.configured_members)
+ )
+ unreg_member_name_set = conf_member_name_set.difference(
+ reg_member_name_set)
+ self._member_manager.register_members(context,
+ list(unreg_member_name_set))
+
+ # 2. reserve the assigned members
+ # -------------------------------------------------------
+ reserv_member_names = []
+ reserv_mapped_ids = []
+ if len(reg_members) > 0:
+ zip_list = zip(*[(m.name, m.map_id)
+ for m in reg_members if m.map_id])
+ if len(zip_list) == 2:
+ reserv_member_names = list(zip_list[0])
+ reserv_mapped_ids = list(zip_list[1])
+ reserv_member_name_set = set(reserv_member_names)
+
+ for netview, memberset in self._assigned_members.items():
+ if netview in reserv_mapped_ids:
+ continue
+
+ unreserv_member_name_set = memberset.difference(
+ reserv_member_name_set)
+ if len(unreserv_member_name_set) == 0:
+ continue
+
+ member_name = unreserv_member_name_set.pop()
+ self._member_manager.reserve_member(context,
+ netview,
+ member_name,
+ ib_models.DHCP_MEMBER_TYPE)
+ self._member_manager.reserve_member(context,
+ netview,
+ member_name,
+ ib_models.DNS_MEMBER_TYPE)
+
+ self._is_member_registered = True
+
+ def find_config_for_subnet(self, context, subnet):
+ """
+ Returns first configuration which matches the object being created.
+ :param context:
+ :param subnet:
+ :return: :raise exceptions.InfobloxConfigException:
+ """
+ if not self._is_member_registered:
+ self.configure_members(context)
+
+ # First search for matching variable condition
+ for conditions in [self._variable_conditions, self._static_conditions]:
+ for cfg in conditions:
+ cfg = Config(cfg, context, subnet, self._member_manager)
+ if self._condition_matches(context, cfg, subnet):
+ return cfg
+
+ raise exceptions.InfobloxConfigException(
+ msg="No config found for subnet %s" % subnet)
+
+ def get_all_configs(self, context, subnet):
+ cfgs = []
+ for conditions in [self._variable_conditions, self._static_conditions]:
+ for cfg in conditions:
+ cfg = Config(cfg, context, subnet, self._member_manager)
+ cfgs.append(cfg)
+ return cfgs
+
+ @staticmethod
+ def _variable_condition_match(condition, var, expected):
+ return (condition.startswith(var) and
+ condition.split(':')[1] == expected)
+
+ def _condition_matches(self, context, config, subnet):
+ net_id = subnet.get('network_id', subnet.get('id'))
+ cidr = subnet.get('cidr')
+ tenant_id = subnet['tenant_id']
+
+ is_external = ib_db.is_network_external(context, net_id)
+ cond = config.condition
+ condition_matches = (
+ cond == 'global' or cond == 'tenant' or
+ self._variable_condition_match(cond, 'tenant_id', tenant_id) or
+ self._variable_condition_match(cond, 'subnet_range', cidr))
+
+ return config.is_external == is_external and condition_matches
+
+ def _read_conditions(self):
+ # Define lambdas to check
+ is_static_cond = lambda cond, static_conds: cond in static_conds
+ is_var_cond = lambda cond, var_conds: any([cond.startswith(valid)
+ for valid in var_conds])
+
+ for conf in self._conf:
+ # If condition contain colon: validate it as variable
+ if ':' in conf['condition'] and\
+ is_var_cond(conf['condition'],
+ self.VALID_VARIABLE_CONDITIONS):
+ self._variable_conditions.append(conf)
+ # If not: validate it as static
+ elif is_static_cond(conf['condition'],
+ self.VALID_STATIC_CONDITIONS):
+ self._static_conditions.append(conf)
+ # If any of previous checker cannot validate value - rise error
+ else:
+ msg = 'Invalid condition specified: {0}'.format(
+ conf['condition'])
+ raise exceptions.InfobloxConfigException(msg=msg)
+
+ # Look for assigned member; if dhcp_members list specific
+ # members, then network_view should be static as well
+ netview = conf.get('network_view', 'default')
+ members = conf.get('dhcp_members', Config.NEXT_AVAILABLE_MEMBER)
+ if not isinstance(members, list) and \
+ members != Config.NEXT_AVAILABLE_MEMBER:
+ members = [members]
+ if isinstance(members, list) and \
+ not netview.startswith('{'):
+ if self._assigned_members.get(netview):
+ self._assigned_members[netview].update(set(members))
+ else:
+ self._assigned_members[netview] = set(members)
+
+
+class PatternBuilder(object):
+ def __init__(self, *pattern):
+ self.pattern = '.'.join([el.strip('.')
+ for el in pattern if el is not None])
+
+ def build(self, context, subnet, port=None, ip_addr=None, instance_name=None):
+ """
+ Builds string by passing supplied values into pattern
+ :raise exceptions.InfobloxConfigException:
+ """
+ self._validate_pattern()
+
+ subnet_name = subnet['name'] if subnet['name'] else subnet['id']
+
+ pattern_dict = {
+ 'network_id': subnet['network_id'],
+ 'network_name': ib_db.get_network_name(context, subnet),
+ 'tenant_id': subnet['tenant_id'],
+ 'subnet_name': subnet_name,
+ 'subnet_id': subnet['id'],
+ 'user_id': context.user_id
+ }
+
+ if ip_addr:
+ octets = ip_addr.split('.')
+ ip_addr = ip_addr.replace('.', '-').replace(':', '-')
+ pattern_dict['ip_address'] = ip_addr
+ for i in xrange(len(octets)):
+ octet_key = 'ip_address_octet{i}'.format(i=(i + 1))
+ pattern_dict[octet_key] = octets[i]
+
+ if port:
+ pattern_dict['port_id'] = port['id']
+ pattern_dict['instance_id'] = port['device_id']
+ if instance_name:
+ pattern_dict['instance_name'] = instance_name
+ else:
+ if '{instance_name}' in self.pattern:
+ nm = nova_manager.NovaManager()
+ pattern_dict['instance_name'] = nm.get_instance_name_by_id(
+ port['device_id'])
+
+ try:
+ fqdn = self.pattern.format(**pattern_dict)
+ except (KeyError, IndexError) as e:
+ raise exceptions.InfobloxConfigException(
+ msg="Invalid pattern %s" % e)
+
+ return fqdn
+
+ def _validate_pattern(self):
+ invalid_values = ['..']
+ for val in invalid_values:
+ if val in self.pattern:
+ error_message = "Invalid pattern value {0}".format(val)
+ raise exceptions.InfobloxConfigException(msg=error_message)
+
+
+class Config(object):
+ NEXT_AVAILABLE_MEMBER = '<next-available-member>'
+ NETWORK_VIEW_TEMPLATES = ['{tenant_id}',
+ '{network_name}',
+ '{network_id}']
+
+ DEFINING_ATTRS = ['condition', '_dhcp_members', '_dns_members',
+ '_net_view', '_dns_view']
+
+ def __init__(self, config_dict, context, subnet,
+ member_manager=None):
+ if not member_manager:
+ _member_manager = MemberManager()
+
+ if 'condition' not in config_dict:
+ raise exceptions.InfobloxConfigException(
+ msg="Missing mandatory 'condition' config option")
+
+ self.condition = config_dict['condition']
+ self.is_external = config_dict.get('is_external', False)
+
+ self._net_view = config_dict.get('network_view', 'default')
+ self._set_network_view_scope()
+
+ self._dns_view = config_dict.get('dns_view', 'default')
+
+ self.require_dhcp_relay = config_dict.get('require_dhcp_relay', False)
+
+ self._dhcp_members = self._members_identifier(
+ config_dict.get('dhcp_members', self.NEXT_AVAILABLE_MEMBER))
+ self._dns_members = self._members_identifier(
+ config_dict.get('dns_members', self._dhcp_members))
+
+ self.domain_suffix_pattern = config_dict.get(
+ 'domain_suffix_pattern', 'global.com')
+ self.hostname_pattern = config_dict.get(
+ 'hostname_pattern', 'host-{ip_address}.{subnet_name}')
+
+ pattern = re.compile(r'\{\S+\}')
+ if pattern.findall(self.domain_suffix_pattern):
+ self.is_static_domain_suffix = False
+ else:
+ self.is_static_domain_suffix = True
+
+ self.network_template = config_dict.get('network_template')
+ self.ns_group = config_dict.get('ns_group')
+
+ self.context = context
+ self.subnet = subnet
+ self._member_manager = member_manager
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ all(map(lambda attr:
+ getattr(self, attr) == getattr(other, attr),
+ self.DEFINING_ATTRS)))
+
+ def __hash__(self):
+ return hash(tuple(self.DEFINING_ATTRS))
+
+ def __repr__(self):
+ values = {
+ 'condition': self.condition,
+ 'dns_members': self._dns_members,
+ 'dhcp_members': self._dhcp_members,
+ 'net_view': self._net_view,
+ 'dns_view': self._dns_view
+ }
+
+ return "ConditionalConfig{0}".format(values)
+
+ @property
+ def network_view_scope(self):
+ return self._net_view_scope
+
+ @property
+ def network_view(self):
+ if self._net_view_scope == 'tenant_id':
+ return self.subnet['tenant_id']
+ if self._net_view_scope == 'network_name':
+ return ib_db.get_network_name(self.context, self.subnet)
+ if self._net_view_scope == 'network_id':
+ return self.subnet['network_id']
+ return self._net_view
+
+ @property
+ def dns_view(self):
+ if self.network_view == 'default':
+ return self._dns_view
+
+ return '.'.join([self._dns_view, self.network_view])
+
+ @property
+ def dhcp_members(self):
+ return self._get_members(ib_models.DHCP_MEMBER_TYPE)
+
+ @property
+ def dns_members(self):
+ return self._get_members(ib_models.DNS_MEMBER_TYPE)
+
+ @property
+ def is_global_config(self):
+ return self.condition == 'global'
+
+ def reserve_dns_members(self):
+ reserved_members = self._reserve_members(self._dns_members,
+ self.ns_group,
+ ib_models.DNS_MEMBER_TYPE)
+
+ if isinstance(reserved_members, list):
+ return reserved_members
+ elif reserved_members:
+ return [reserved_members]
+ else:
+ return []
+
+ def reserve_dhcp_members(self):
+ reserved_members = self._reserve_members(self._dhcp_members,
+ self.network_template,
+ ib_models.DHCP_MEMBER_TYPE)
+
+ if isinstance(reserved_members, list):
+ return reserved_members
+ else:
+ return [reserved_members]
+
+ def release_member(self, map_id):
+ self._member_manager.release_member(self.context, map_id)
+
+ def requires_net_view(self):
+ return True
+
+ def verify_subnet_update_is_allowed(self, subnet_new):
+ """
+ Subnet update procedure is not allowed if Infoblox zone name exists on
+ NIOS. This can only happen if domain suffix pattern has subnet name
+ included.
+ """
+ subnet_new_name = subnet_new.get('name')
+ subnet_name = self.subnet.get('name')
+ pattern = self.domain_suffix_pattern
+ update_allowed = not (subnet_name is not None and
+ subnet_new_name is not None and
+ subnet_name != subnet_new_name and
+ '{subnet_name}' in pattern)
+
+ if not update_allowed:
+ raise exceptions.OperationNotAllowed(
+ reason="subnet_name is in domain name pattern")
+
+ if subnet_new.get('network') and subnet_new.get('network_before'):
+ network_new_name = subnet_new.get('network').get('name')
+ network_name = subnet_new.get('network_before').get('name')
+ update_allowed = not (network_name is not None and
+ network_new_name is not None and
+ network_name != network_new_name and
+ '{network_name}' in pattern)
+
+ if not update_allowed:
+ raise exceptions.OperationNotAllowed(
+ reason="network_name is in domain name pattern")
+
+ def _set_network_view_scope(self):
+ if (self._net_view.startswith('{') and
+ self._net_view not in self.NETWORK_VIEW_TEMPLATES):
+ raise exceptions.InfobloxConfigException(
+ msg="Invalid config value for 'network_view'")
+
+ if self._net_view == '{tenant_id}':
+ self._net_view_scope = 'tenant_id'
+ elif self._net_view == '{network_name}':
+ self._net_view_scope = 'network_name'
+ elif self._net_view == '{network_id}':
+ self._net_view_scope = 'network_id'
+ else:
+ self._net_view_scope = 'static'
+
+ def _get_members(self, member_type):
+ members = self._member_manager.find_members(self.context,
+ self.network_view,
+ member_type)
+ if members:
+ return members
+
+ msg = ("Looks like you're trying to call config.{member_type}_member "
+ "without reserving one. You should call "
+ "reserve_{member_type}_member() "
+ "first!".format(member_type=member_type))
+ raise RuntimeError(msg)
+
+ def _reserve_members_list(self, assigned_members, member_type):
+ members_to_reserve = [self._member_manager.get_member(member)
+ for member in assigned_members]
+ for member in members_to_reserve:
+ self._member_manager.reserve_member(self.context,
+ self.network_view,
+ member.name,
+ member_type)
+ return members_to_reserve
+
+ def _reserve_by_template(self, assigned_members, template, member_type):
+ member = self._get_member_from_template(assigned_members, template)
+ self._member_manager.reserve_member(self.context,
+ self.network_view,
+ member.name,
+ member_type)
+ return member
+
+ def _reserve_next_avaliable(self, member_type):
+ member = self._member_manager.next_available(self.context,
+ member_type)
+ self._member_manager.reserve_member(self.context,
+ self.network_view,
+ member.name,
+ member_type)
+ return member
+
+ def _reserve_members(self, assigned_members, template, member_type):
+ members = self._member_manager.find_members(self.context,
+ self.network_view,
+ member_type)
+ if members:
+ return members
+
+ if assigned_members == self.NEXT_AVAILABLE_MEMBER:
+ return self._reserve_next_avaliable(member_type)
+ elif isinstance(assigned_members, list):
+ return self._reserve_members_list(assigned_members,
+ member_type)
+ elif template:
+ return self._reserve_by_template(assigned_members,
+ template,
+ member_type)
+
+ def _get_member_from_template(self, assigned_members, template):
+ member_defined = (assigned_members != self.NEXT_AVAILABLE_MEMBER
+ and isinstance(assigned_members, basestring))
+ if template and not member_defined:
+ msg = 'Member MUST be configured for {template}'.format(
+ template=template)
+ raise exceptions.InfobloxConfigException(msg=msg)
+ return self._member_manager.get_member(assigned_members)
+
+ def _members_identifier(self, members):
+ if not isinstance(members, list) and \
+ members != self.NEXT_AVAILABLE_MEMBER:
+ members = [members]
+ return members
+
+
+class MemberManager(object):
+ def __init__(self, member_config_stream=None):
+ if not member_config_stream:
+ config_file = neutron_conf.CONF.infoblox_members_config
+ if not config_file:
+ raise exceptions.InfobloxConfigException(
+ msg="Config not found")
+ member_config_stream = io.FileIO(config_file)
+
+ with member_config_stream:
+ all_members = jsonutils.loads(member_config_stream.read())
+
+ try:
+ self.configured_members = map(
+ lambda m: objects.Member(name=m.get('name'),
+ ip=m.get('ipv4addr'),
+ ipv6=m.get('ipv6addr'),
+ delegate=m.get('delegate'),
+ map_id=None),
+ filter(lambda m: m.get('is_available', True),
+ all_members))
+ except KeyError as key:
+ raise exceptions.InfobloxConfigException(
+ msg="Invalid member config key: %s" % key)
+
+ if self.configured_members is None or \
+ len(self.configured_members) == 0:
+ raise exceptions.InfobloxConfigException(
+ msg="Configured member not found")
+
+ def __repr__(self):
+ values = {
+ 'configured_members': self.configured_members
+ }
+ return "MemberManager{0}".format(values)
+
+ def register_members(self, context, member_names):
+ for member_name in member_names:
+ ib_db.register_member(context, None, member_name,
+ ib_models.DHCP_MEMBER_TYPE)
+ ib_db.register_member(context, None, member_name,
+ ib_models.DNS_MEMBER_TYPE)
+
+ def get_registered_members(self, context,
+ member_type=ib_models.DHCP_MEMBER_TYPE):
+ registered_members = ib_db.get_registered_members(context,
+ member_type)
+ members = []
+ for reg_member in registered_members:
+ for member in self.configured_members:
+ if member.name == reg_member.member_name:
+ member.map_id = reg_member.map_id
+ members.append(member)
+ return members
+
+ def next_available(self, context, member_type):
+ avail_member = ib_db.get_available_member(context, member_type)
+ if not avail_member:
+ raise exceptions.InfobloxConfigException(
+ msg="No infoblox member available")
+
+ return self.get_member(avail_member.member_name)
+
+ def reserve_member(self, context, mapping, member_name, member_type):
+ ib_db.attach_member(context, mapping, member_name, member_type)
+
+ def release_member(self, context, mapping):
+ ib_db.release_member(context, mapping)
+
+ def get_member(self, member_name):
+ for member in self.configured_members:
+ if member.name == member_name:
+ return member
+ raise exceptions.InfobloxConfigException(
+ msg="No infoblox member available")
+
+ def find_members(self, context, map_id, member_type):
+ existing_members = ib_db.get_members(context, map_id, member_type)
+ if not existing_members:
+ return []
+
+ members = []
+ for existing_member in existing_members:
+ for member in self.configured_members:
+ if member.name == existing_member.member_name:
+ members.append(member)
+
+ if not members:
+ msg = "Reserved member not available in config"
+ raise exceptions.InfobloxConfigException(msg=msg)
+
+ return members
diff --git a/neutron/ipam/drivers/infoblox/connector.py b/neutron/ipam/drivers/infoblox/connector.py
new file mode 100755
index 0000000..aaa168f
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/connector.py
@@ -0,0 +1,357 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import re
+
+from oslo.config import cfg
+import requests
+from requests import exceptions as req_exc
+import urllib
+import urlparse
+
+from neutron.ipam.drivers.infoblox import exceptions as exc
+from neutron.openstack.common import jsonutils
+from neutron.openstack.common import log as logging
+
+
+OPTS = [
+ cfg.StrOpt('infoblox_wapi', help=_("REST API url")),
+ cfg.StrOpt('infoblox_username', help=_("User name")),
+ cfg.StrOpt('infoblox_password', help=_("User password")),
+ cfg.BoolOpt('infoblox_sslverify', default=False),
+ cfg.IntOpt('infoblox_http_pool_connections', default=100),
+ cfg.IntOpt('infoblox_http_pool_maxsize', default=100),
+ cfg.IntOpt('infoblox_http_request_timeout', default=120)
+]
+
+cfg.CONF.register_opts(OPTS)
+
+
+LOG = logging.getLogger(__name__)
+
+
+def reraise_neutron_exception(func):
+ @functools.wraps(func)
+ def callee(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except req_exc.Timeout as e:
+ LOG.error(e.message)
+ raise exc.InfobloxTimeoutError(e)
+ except req_exc.RequestException as e:
+ LOG.error(_("HTTP request failed: %s"), e)
+ raise exc.InfobloxConnectionError(reason=e)
+ return callee
+
+
+class Infoblox(object):
+ """
+ Infoblox class
+
+ Defines methods for getting, creating, updating and
+ removing objects from an Infoblox server instance.
+ """
+
+ CLOUD_WAPI_MAJOR_VERSION = 2
+
+ def __init__(self):
+ """
+ Initialize a new Infoblox object instance
+ Args:
+ config (str): Path to the Infoblox configuration file
+ """
+ self.wapi = cfg.CONF.infoblox_wapi
+ self.username = cfg.CONF.infoblox_username
+ self.password = cfg.CONF.infoblox_password
+ self.sslverify = cfg.CONF.infoblox_sslverify
+ self.request_timeout = cfg.CONF.infoblox_http_request_timeout
+
+ if not self.wapi or not self.username or not self.password:
+ raise exc.InfobloxIsMisconfigured()
+
+ self.is_cloud = self.is_cloud_wapi(self.wapi)
+ self.session = requests.Session()
+ adapter = requests.adapters.HTTPAdapter(
+ pool_connections=cfg.CONF.infoblox_http_pool_connections,
+ pool_maxsize=cfg.CONF.infoblox_http_pool_maxsize)
+ self.session.mount('http://', adapter)
+ self.session.mount('https://', adapter)
+ self.session.auth = (self.username, self.password)
+ self.session.verify = self.sslverify
+
+ @staticmethod
+ def is_cloud_wapi(wapi_url):
+ version_match = re.search('\/wapi\/v(\d+)\.(\d+)', wapi_url)
+ if version_match:
+ if int(version_match.group(1)) >= Infoblox.CLOUD_WAPI_MAJOR_VERSION:
+ return True
+ return False
+
+ def _construct_url(self, relative_path, query_params=None, extattrs=None):
+ if query_params is None:
+ query_params = {}
+ if extattrs is None:
+ extattrs = {}
+
+ if not relative_path or relative_path[0] == '/':
+ raise ValueError('Path in request must be relative.')
+ query = ''
+ if query_params or extattrs:
+ query = '?'
+
+ if extattrs:
+ attrs_queries = []
+ for key, value in extattrs.items():
+ attrs_queries.append('*' + key + '=' + value['value'])
+ query += '&'.join(attrs_queries)
+ if query_params:
+ if len(query) > 1:
+ query += '&'
+ query += urllib.urlencode(query_params)
+
+ baseurl = urlparse.urljoin(self.wapi, urllib.quote(relative_path))
+ return baseurl + query
+
+ @staticmethod
+ def _validate_objtype_or_die(objtype, objtype_expected=True):
+ if not objtype:
+ raise ValueError('WAPI object type can\'t be empty.')
+ if objtype_expected and '/' in objtype:
+ raise ValueError('WAPI object type can\'t contain slash.')
+
+ @reraise_neutron_exception
+ def get_object(self, objtype, payload=None, return_fields=None,
+ extattrs=None, proxy=False):
+ """
+ Retrieve a list of Infoblox objects of type 'objtype'
+ Args:
+ objtype (str): Infoblox object type, e.g. 'network',
+ 'range', etc.
+ payload (dict): Payload with data to send
+ return_fields (list): List of fields to be returned
+ extattrs (list): List of Extensible Attributes
+ Returns:
+ A list of the Infoblox objects requested
+ Raises:
+ InfobloxObjectNotFound
+ """
+ if return_fields is None:
+ return_fields = []
+ if extattrs is None:
+ extattrs = {}
+
+ self._validate_objtype_or_die(objtype, objtype_expected=False)
+
+ query_params = dict()
+ if payload:
+ query_params = payload
+
+ if return_fields:
+ query_params['_return_fields'] = ','.join(return_fields)
+
+ # Some get requests like 'ipv4address' should be always
+ # proxied to GM on Hellfire
+ # If request is cloud and proxy is not forced yet,
+ # then plan to do 2 request:
+ # - the first one is not proxified to GM
+ # - the second is proxified to GM
+ urls = dict()
+ urls['direct'] = self._construct_url(objtype, query_params, extattrs)
+ if self.is_cloud:
+ query_params['_proxy_search'] = 'GM'
+ urls['proxy'] = self._construct_url(objtype, query_params, extattrs)
+
+ url = urls['direct']
+ if self.is_cloud and proxy:
+ url = urls['proxy']
+
+ headers = {'Content-type': 'application/json'}
+
+ ib_object = self._get_object(objtype, url, headers)
+ if ib_object:
+ return ib_object
+
+ # if cloud api and proxy is not used, use proxy
+ if self.is_cloud and not proxy:
+ return self._get_object(objtype, urls['proxy'], headers)
+
+ return None
+
+ def _get_object(self, objtype, url, headers):
+ r = self.session.get(url,
+ verify=self.sslverify,
+ timeout=self.request_timeout,
+ headers=headers)
+
+ if r.status_code == requests.codes.UNAUTHORIZED:
+ raise exc.InfobloxBadWAPICredential(response='')
+
+ if r.status_code != requests.codes.ok:
+ raise exc.InfobloxSearchError(
+ response=jsonutils.loads(r.content),
+ objtype=objtype,
+ content=r.content,
+ code=r.status_code)
+
+ return jsonutils.loads(r.content)
+
+ @reraise_neutron_exception
+ def create_object(self, objtype, payload,
+ return_fields=None, delegate_member=None):
+ """
+ Create an Infoblox object of type 'objtype'
+ Args:
+ objtype (str): Infoblox object type,
+ e.g. 'network', 'range', etc.
+ payload (dict): Payload with data to send
+ return_fields (list): List of fields to be returned
+ Returns:
+ The object reference of the newly create object
+ Raises:
+ InfobloxException
+ """
+ if self.is_cloud and delegate_member and delegate_member.delegate:
+ payload.update({"cloud_info": {
+ "delegated_member": delegate_member.specifier}})
+
+ if not return_fields:
+ return_fields = []
+
+ self._validate_objtype_or_die(objtype)
+
+ query_params = dict()
+
+ if return_fields:
+ query_params['_return_fields'] = ','.join(return_fields)
+
+ url = self._construct_url(objtype, query_params)
+ data = jsonutils.dumps(payload)
+ headers = {'Content-type': 'application/json'}
+ r = self.session.post(url,
+ data=data,
+ verify=self.sslverify,
+ timeout=self.request_timeout,
+ headers=headers)
+
+ if r.status_code == requests.codes.UNAUTHORIZED:
+ raise exc.InfobloxBadWAPICredential(response='')
+
+ if r.status_code != requests.codes.CREATED:
+ raise exc.InfobloxCannotCreateObject(
+ response=jsonutils.loads(r.content),
+ objtype=objtype,
+ content=r.content,
+ args=payload,
+ code=r.status_code)
+
+ return jsonutils.loads(r.content)
+
+ @reraise_neutron_exception
+ def call_func(self, func_name, ref, payload, return_fields=None):
+ if not return_fields:
+ return_fields = []
+
+ query_params = dict()
+ query_params['_function'] = func_name
+
+ if return_fields:
+ query_params['_return_fields'] = ','.join(return_fields)
+
+ url = self._construct_url(ref, query_params)
+
+ headers = {'Content-type': 'application/json'}
+ r = self.session.post(url,
+ data=jsonutils.dumps(payload),
+ verify=self.sslverify,
+ headers=headers)
+
+ if r.status_code == requests.codes.UNAUTHORIZED:
+ raise exc.InfobloxBadWAPICredential(response='')
+
+ if r.status_code not in (requests.codes.CREATED,
+ requests.codes.ok):
+ raise exc.InfobloxFuncException(
+ response=jsonutils.loads(r.content),
+ ref=ref,
+ func_name=func_name,
+ content=r.content,
+ code=r.status_code)
+
+ return jsonutils.loads(r.content)
+
+ @reraise_neutron_exception
+ def update_object(self, ref, payload, return_fields=None):
+ """
+ Update an Infoblox object
+ Args:
+ ref (str): Infoblox object reference
+ payload (dict): Payload with data to send
+ Returns:
+ The object reference of the updated object
+ Raises:
+ InfobloxException
+ """
+ query_params = {}
+ if return_fields:
+ query_params['_return_fields'] = ','.join(return_fields)
+
+ headers = {'Content-type': 'application/json'}
+ r = self.session.put(self._construct_url(ref, query_params),
+ data=jsonutils.dumps(payload),
+ verify=self.sslverify,
+ timeout=self.request_timeout,
+ headers=headers)
+
+ if r.status_code == requests.codes.UNAUTHORIZED:
+ raise exc.InfobloxBadWAPICredential(response='')
+
+ if r.status_code != requests.codes.ok:
+ raise exc.InfobloxCannotUpdateObject(
+ response=jsonutils.loads(r.content),
+ ref=ref,
+ content=r.content,
+ code=r.status_code)
+
+ return jsonutils.loads(r.content)
+
+ @reraise_neutron_exception
+ def delete_object(self, ref):
+ """
+ Remove an Infoblox object
+ Args:
+ ref (str): Object reference
+ Returns:
+ The object reference of the removed object
+ Raises:
+ InfobloxException
+ """
+ r = self.session.delete(self._construct_url(ref),
+ verify=self.sslverify,
+ timeout=self.request_timeout)
+
+ if r.status_code == requests.codes.UNAUTHORIZED:
+ raise exc.InfobloxBadWAPICredential(response='')
+
+ if r.status_code != requests.codes.ok:
+ raise exc.InfobloxCannotDeleteObject(
+ response=jsonutils.loads(r.content),
+ ref=ref,
+ content=r.content,
+ code=r.status_code)
+
+ return jsonutils.loads(r.content)
diff --git a/neutron/ipam/drivers/infoblox/constants.py b/neutron/ipam/drivers/infoblox/constants.py
new file mode 100644
index 0000000..cef4cea
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/constants.py
@@ -0,0 +1,31 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from neutron.common import constants as neutron_constants
+from neutron.plugins.common import constants as plugins_constants
+
+NEUTRON_DEVICE_OWNER_TO_PATTERN_MAP = {
+ neutron_constants.DEVICE_OWNER_DHCP: 'dhcp-port-{ip_address}',
+ neutron_constants.DEVICE_OWNER_ROUTER_INTF: 'router-iface-{ip_address}',
+ neutron_constants.DEVICE_OWNER_ROUTER_GW: 'router-gw-{ip_address}',
+ neutron_constants.DEVICE_OWNER_FLOATINGIP: 'floating-ip-{ip_address}',
+ 'neutron:' + plugins_constants.LOADBALANCER: 'lb-vip-{ip_address}',
+}
+
+NEUTRON_INTERNAL_SERVICE_DEVICE_OWNERS = [
+ neutron_constants.DEVICE_OWNER_DHCP,
+ neutron_constants.DEVICE_OWNER_ROUTER_INTF,
+ neutron_constants.DEVICE_OWNER_ROUTER_GW,
+ 'neutron:' + plugins_constants.LOADBALANCER
+]
diff --git a/neutron/ipam/drivers/infoblox/dns_controller.py b/neutron/ipam/drivers/infoblox/dns_controller.py
new file mode 100755
index 0000000..e8130ea
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/dns_controller.py
@@ -0,0 +1,308 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+from oslo.config import cfg as neutron_conf
+from taskflow.patterns import linear_flow
+
+from neutron.common import constants as neutron_constants
+from neutron.db.infoblox import infoblox_db as infoblox_db
+from neutron.common import ipv6_utils
+from neutron.ipam.drivers.infoblox import config
+from neutron.ipam.drivers.infoblox import connector
+from neutron.ipam.drivers.infoblox import constants as ib_constants
+from neutron.ipam.drivers.infoblox import ea_manager
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.ipam.drivers.infoblox import object_manipulator
+from neutron.ipam.drivers.infoblox import tasks
+from neutron.ipam.drivers import neutron_ipam
+from neutron.openstack.common import log as logging
+
+OPTS = [
+ neutron_conf.StrOpt('private_dns_view_name',
+ default=None,
+ help=_("If single_network_view_name is specified, "
+ "this option will define DNS View name used "
+ "to serve networks from the single network "
+ "view. Otherwise it is ignored and "
+ "'default.<netview_name>' is used.")),
+ neutron_conf.StrOpt('external_dns_view_name',
+ default=None,
+ help=_("All the subnets created in external networks "
+ "will be associated with DNS View with such "
+ "name. If not specified, name "
+ "'default.<netview_name>' will be used.")),
+ neutron_conf.StrOpt('subnet_fqdn_suffix',
+ default='com',
+ help=_("Suffix for subnet domain name. Used to "
+ "generate subnet FQDN which is built using "
+ "the following pattern "
+ "<subnet_domain><subnet_fqdn_suffix>. "
+ "Subnet domain uniquely represents subnet and "
+ "equal to subnet name if specified, otherwise "
+ "equal to the first part of subnet uuid.")),
+ neutron_conf.BoolOpt('use_global_dns_zone',
+ default=True,
+ help=_("Use global DNS zone. Global private DNS zone "
+ "only make sense when we use single network "
+ "view")),
+ neutron_conf.BoolOpt('allow_admin_network_deletion',
+ default=False,
+ help=_("Allow admin network which is global, "
+ "external, or shared to be deleted"))
+]
+
+LOG = logging.getLogger(__name__)
+neutron_conf.CONF.register_opts(OPTS)
+
+
+class InfobloxDNSController(neutron_ipam.NeutronDNSController):
+
+ SUBDOMAIN_NAME_LEN = 8
+
+ def __init__(self, ip_allocator, manipulator=None, config_finder=None):
+ super(InfobloxDNSController, self).__init__()
+
+ if not manipulator:
+ manipulator = object_manipulator.InfobloxObjectManipulator(
+ connector.Infoblox())
+
+ self.infoblox = manipulator
+ self.ip_allocator = ip_allocator
+ self.config_finder = config_finder
+ self.ea_manager = ea_manager.InfobloxEaManager(infoblox_db)
+ self.pattern_builder = config.PatternBuilder
+
+ def disassociate_floatingip(self, context, ip_address, port_id):
+ floating_port_id = ip_address.get('floating_port_id')
+ port = infoblox_db.get_port_by_id(context, floating_port_id)
+ extattrs = self.ea_manager.get_extattrs_for_ip(context, port, True)
+ self.bind_names(context, port, disassociate=True)
+
+ @staticmethod
+ def get_hostname_pattern(port, cfg, instance_name):
+ port_owner = port['device_owner']
+ if port_owner == neutron_constants.DEVICE_OWNER_FLOATINGIP:
+ if instance_name and "{instance_name}" in cfg.hostname_pattern:
+ return cfg.hostname_pattern
+ if (port_owner
+ in ib_constants.NEUTRON_DEVICE_OWNER_TO_PATTERN_MAP.keys()):
+ return ib_constants.NEUTRON_DEVICE_OWNER_TO_PATTERN_MAP[port_owner]
+ else:
+ return cfg.hostname_pattern
+
+ @staticmethod
+ def get_instancename(extattrs):
+ instance_name = None
+ if extattrs:
+ vm_attr = extattrs.get('VM Name')
+ if vm_attr:
+ instance_name = vm_attr.get('value')
+ return instance_name
+
+ def _bind_names(self, context, backend_port, binding_func, extattrs=None):
+ all_dns_members = []
+
+ for ip in backend_port['fixed_ips']:
+ subnet = infoblox_db.get_subnet(context, ip['subnet_id'])
+ if subnet['ip_version'] == 4 or \
+ not ipv6_utils.is_auto_address_subnet(subnet):
+ cfg = self.config_finder.find_config_for_subnet(context,
+ subnet)
+ dns_members = cfg.reserve_dns_members()
+ all_dns_members.extend(dns_members)
+ ip_addr = ip['ip_address']
+ instance_name = self.get_instancename(extattrs)
+
+ hostname_pattern = self.get_hostname_pattern(
+ backend_port, cfg, instance_name)
+ pattern_builder = self.pattern_builder(
+ hostname_pattern, cfg.domain_suffix_pattern)
+ fqdn = pattern_builder.build(
+ context, subnet, backend_port, ip_addr, instance_name)
+
+ binding_func(cfg.network_view, cfg.dns_view, ip_addr, fqdn,
+ extattrs)
+
+ for member in set(all_dns_members):
+ self.infoblox.restart_all_services(member)
+
+ def bind_names(self, context, backend_port, disassociate=False):
+ if not backend_port['device_owner']:
+ return
+ # In the case of disassociating floatingip, we need to explicitly
+ # indicate to ignore instance id associated with the floating ip.
+ # This is because, at this point, the floating ip is still associated
+ # with instance in the neutron database.
+ extattrs = self.ea_manager.get_extattrs_for_ip(
+ context, backend_port, ignore_instance_id=disassociate)
+ try:
+ self._bind_names(context, backend_port,
+ self.ip_allocator.bind_names, extattrs)
+ except exceptions.InfobloxCannotCreateObject as e:
+ self.unbind_names(context, backend_port)
+ raise e
+
+ def unbind_names(self, context, backend_port):
+ self._bind_names(context, backend_port, self.ip_allocator.unbind_names)
+
+ def create_dns_zones(self, context, backend_subnet):
+ cfg = self.config_finder.find_config_for_subnet(context,
+ backend_subnet)
+ dns_members = cfg.reserve_dns_members()
+
+ dns_zone = self.pattern_builder(cfg.domain_suffix_pattern).\
+ build(context, backend_subnet)
+ zone_extattrs = self.ea_manager.get_extattrs_for_zone(
+ context, subnet=backend_subnet)
+
+ # Add prefix only for classless networks (ipv4)
+ # mask greater than 24 needs prefix.
+ # use meaningful prefix if used
+ prefix = None
+ if backend_subnet['ip_version'] == 4:
+ m = re.search(r'/\d+', backend_subnet['cidr'])
+ mask = m.group().replace("/", "")
+ if int(mask) > 24:
+ if len(backend_subnet['name']) > 0:
+ prefix = backend_subnet['name']
+ else:
+ prefix = '-'.join(
+ filter(None,
+ re.split(r'[.:/]', backend_subnet['cidr']))
+ )
+
+ args = {
+ 'backend_subnet': backend_subnet,
+ 'dnsview_name': cfg.dns_view,
+ 'fqdn': dns_zone,
+ 'cidr': backend_subnet['cidr'],
+ 'prefix': prefix,
+ 'zone_format': 'IPV%s' % backend_subnet['ip_version'],
+ 'zone_extattrs': zone_extattrs,
+ 'obj_manip': self.infoblox
+ }
+ create_dns_zones_flow = linear_flow.Flow('create-dns-zones')
+
+ if cfg.ns_group:
+ args['ns_group'] = cfg.ns_group
+ create_dns_zones_flow.add(
+ tasks.CreateDNSZonesFromNSGroupTask(),
+ tasks.CreateDNSZonesCidrFromNSGroupTask(),
+ )
+ else:
+ args['dns_member'] = dns_members[0]
+ args['secondary_dns_members'] = dns_members[1:]
+ create_dns_zones_flow.add(
+ tasks.CreateDNSZonesTask(),
+ tasks.CreateDNSZonesTaskCidr())
+
+ context.store.update(args)
+ context.parent_flow.add(create_dns_zones_flow)
+
+ def delete_dns_zones(self, context, backend_subnet):
+ cfg = self.config_finder.find_config_for_subnet(context,
+ backend_subnet)
+ dns_zone_fqdn = self.pattern_builder(cfg.domain_suffix_pattern).\
+ build(context, backend_subnet)
+ dnsview_name = cfg.dns_view
+
+ network = self._get_network(context, backend_subnet['network_id'])
+ is_external = infoblox_db.is_network_external(context,
+ network.get('id'))
+ is_shared = network.get('shared')
+
+ # If config is global, do not delete dns zone for that subnet
+ # If subnet is for external or shared network, do not delete a zone
+ # for the subnet.
+ # If subnet is for private network (not external, shared, or global),
+ # check if domain suffix is unique to the subnet.
+ # if subnet name is part of the domain suffix pattern, then delete
+ # forward zone.
+ # if network name is part of the domain suffix pattern, then delete
+ # forward zone only if the subnet is only remaining subnet
+ # in the network.
+ # Reverse zone is deleted when not global, not external, and not shared
+ if neutron_conf.CONF.allow_admin_network_deletion or \
+ not (cfg.is_global_config or is_external or is_shared):
+ if (('{subnet_name}' in cfg.domain_suffix_pattern or
+ '{subnet_id}' in cfg.domain_suffix_pattern) or
+ (('{network_name}' in cfg.domain_suffix_pattern or
+ '{network_id}' in cfg.domain_suffix_pattern) and
+ infoblox_db.is_last_subnet_in_network(
+ context, backend_subnet['id'],
+ backend_subnet['network_id'])) or
+ ('{tenant_id}' in cfg.domain_suffix_pattern and
+ infoblox_db.is_last_subnet_in_tenant(
+ context, backend_subnet['id'],
+ backend_subnet['tenant_id'])) or
+ (self._determine_static_zone_deletion(
+ context, backend_subnet,
+ cfg.is_static_domain_suffix))):
+ # delete forward zone
+ self.infoblox.delete_dns_zone(dnsview_name, dns_zone_fqdn)
+
+ # delete reverse zone
+ self.infoblox.delete_dns_zone(dnsview_name,
+ backend_subnet['cidr'])
+
+ def _determine_static_zone_deletion(self, context,
+ backend_subnet, is_static):
+ """
+ Checking config if deletion is possible:
+ global tenant
+ x x n/a
+ o x cannot delete, global cannot be deleted
+ x o allow delete, only tenant should use
+ o o cannot delete, global cannot be deleted
+ If possible, then subnet must be the last one among all private
+ networks.
+ """
+ if not is_static:
+ return False
+
+ cfgs = self.config_finder.get_all_configs(context, backend_subnet)
+ for cfg in cfgs:
+ if cfg.is_global_config and cfg.is_static_domain_suffix:
+ return False
+
+ return infoblox_db.is_last_subnet_in_private_networks(
+ context, backend_subnet['id'])
+
+
+def has_nameservers(subnet):
+ try:
+ has_dns = iter(subnet['dns_nameservers']) is not None
+ except (TypeError, KeyError):
+ has_dns = False
+
+ return has_dns
+
+
+def get_nameservers(subnet):
+ if has_nameservers(subnet):
+ return subnet['dns_nameservers']
+ return []
+
+
+def build_fqdn(prefix, zone, ip_address):
+ ip_address = ip_address.replace('.', '-')
+ if zone:
+ zone.lstrip('.')
+ return "%(prefix)s%(ip_address)s.%(zone)s" % {
+ 'prefix': prefix,
+ 'ip_address': ip_address,
+ 'zone': zone
+ }
diff --git a/neutron/ipam/drivers/infoblox/ea_manager.py b/neutron/ipam/drivers/infoblox/ea_manager.py
new file mode 100755
index 0000000..42fd79f
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/ea_manager.py
@@ -0,0 +1,329 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from neutron.db import db_base_plugin_v2
+from neutron.db import l3_db
+from neutron.db import models_v2
+from neutron.ipam.drivers.infoblox import connector
+from neutron.ipam.drivers.infoblox import constants as ib_constants
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.ipam.drivers.infoblox import l2_driver
+from neutron.ipam.drivers.infoblox import nova_manager
+from neutron.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class InfobloxEaManager(object):
+ # CMP == cloud management platform
+ OPENSTACK_OBJECT_FLAG = 'CMP Type'
+
+ def __init__(self, infoblox_db):
+ # Passing this thru constructor to avoid cyclic imports
+ self.db = infoblox_db
+ self._network_l2_info_provider = l2_driver.L2Info()
+
+ def get_extattrs_for_nview(self, context):
+ """
+ Generates EAs for Network View
+ :param context: current neutron context
+ :return: dict with extensible attributes ready to be sent as part of
+ NIOS WAPI
+ """
+ os_tenant_id = context.tenant_id
+
+ attributes = {
+ 'Tenant ID': os_tenant_id,
+ # OpenStack should not own entire network view,
+ # since shared or external networks may be created in it
+ 'Cloud API Owned': False,
+ }
+ return self._build_extattrs(attributes)
+
+ def get_extattrs_for_network(self, context, subnet=None, network=None):
+ """
+ Sets non-null values from subnet and network to corresponding EAs in
+ NIOS
+ :param context: current neutron context
+ :param subnet: neutron subnet object
+ :param network: neutron network object
+ :return: dict with extensible attributes ready to be sent as part of
+ NIOS WAPI
+ """
+ if subnet is None:
+ subnet = {}
+ if network is None:
+ network = {}
+
+ os_subnet_id = subnet.get('id')
+ os_subnet_name = subnet.get('name')
+
+ os_network_id = network.get('id')
+ os_network_name = network.get('name')
+ os_network_l2_info = self._network_l2_info_provider.\
+ get_network_l2_info(context.session, os_network_id)
+ os_network_type = os_network_l2_info.get('network_type').upper()
+
+ os_segmentation_id = os_network_l2_info.get('segmentation_id')
+ os_physical_network = os_network_l2_info.get('physical_network')
+ os_tenant_id = (network.get('tenant_id') or
+ subnet.get('tenant_id') or
+ context.get('tenant_id'))
+ os_user_id = context.user_id
+
+ attributes = {
+ 'Subnet ID': os_subnet_id,
+ 'Subnet Name': os_subnet_name,
+ 'Network ID': os_network_id,
+ 'Network Name': os_network_name,
+ 'Network Encap': os_network_type,
+ 'Segmentation ID': os_segmentation_id,
+ 'Physical Network Name': os_physical_network,
+ 'Tenant ID': os_tenant_id,
+ 'Account': os_user_id,
+ }
+
+ # set clowd_api_owned, is_external, is_shared from common routine
+ common_ea = self._get_common_ea(context, subnet, network)
+ attributes.update(common_ea)
+
+ return self._build_extattrs(attributes)
+
+ def get_extattrs_for_range(self, context, network):
+ os_user_id = context.user_id
+ os_tenant_id = context.tenant_id
+ common_ea = self._get_common_ea(context, network=network)
+
+ attributes = {
+ 'Tenant ID': os_tenant_id,
+ 'Account': os_user_id,
+ 'Cloud API Owned': common_ea['Cloud API Owned'],
+ }
+ return self._build_extattrs(attributes)
+
+ def get_default_extattrs_for_ip(self, context):
+ attributes = {
+ 'Tenant ID': context.tenant_id,
+ 'Account': context.user_id,
+ 'Port ID': None,
+ 'Port Attached Device - Device Owner': None,
+ 'Port Attached Device - Device ID': None,
+ 'Cloud API Owned': True,
+ 'IP Type': 'Fixed',
+ 'VM ID': None,
+ 'VM Name': None
+ }
+ return self._build_extattrs(attributes)
+
+ def get_extattrs_for_ip(self, context, port, ignore_instance_id=False):
+ # Fallback to 'None' as string since NIOS requires this value to be set
+ os_tenant_id = port.get('tenant_id') or context.tenant_id or 'None'
+ os_user_id = context.user_id
+
+ neutron_internal_services_dev_owners = \
+ ib_constants.NEUTRON_INTERNAL_SERVICE_DEVICE_OWNERS
+
+ # for gateway ip, no instance id exists
+ os_instance_id = None
+ os_instance_name = None
+
+ set_os_instance_id = ((not ignore_instance_id) and
+ (port['device_owner'] not in
+ neutron_internal_services_dev_owners))
+
+ if set_os_instance_id:
+ # for floating ip, no instance id exists
+ os_instance_id = self._get_instance_id(context, port)
+ if os_instance_id:
+ nm = nova_manager.NovaManager()
+ os_instance_name = nm.get_instance_name_by_id(os_instance_id)
+
+ network = self.db.get_network(context, port['network_id'])
+ common_ea = self._get_common_ea(context, network=network)
+
+ attributes = {
+ 'Tenant ID': os_tenant_id,
+ 'Account': os_user_id,
+ 'Port ID': port['id'],
+ 'Port Attached Device - Device Owner': port['device_owner'],
+ 'Port Attached Device - Device ID': port['device_id'],
+ 'Cloud API Owned': common_ea['Cloud API Owned'],
+ 'VM ID': os_instance_id,
+ 'VM Name': os_instance_name,
+ }
+
+ if self.db.is_network_external(context, port['network_id']):
+ attributes['IP Type'] = 'Floating'
+ else:
+ attributes['IP Type'] = 'Fixed'
+
+ return self._build_extattrs(attributes)
+
+ def get_extattrs_for_zone(self, context, subnet=None, network=None):
+ os_user_id = context.user_id
+ os_tenant_id = context.tenant_id
+ common_ea = self._get_common_ea(context, subnet=subnet, network=None)
+
+ attributes = {
+ 'Tenant ID': os_tenant_id,
+ 'Account': os_user_id,
+ 'Cloud API Owned': common_ea['Cloud API Owned'],
+ }
+ return self._build_extattrs(attributes)
+
+ def _get_common_ea(self, context, subnet=None, network=None):
+ if hasattr(subnet, 'external'):
+ os_network_is_external = subnet.get('external')
+ elif network:
+ os_network_is_external = self.db.is_network_external(
+ context, network.get('id'))
+ else:
+ os_network_is_external = False
+
+ if network:
+ os_network_is_shared = network.get('shared')
+ else:
+ os_network_is_shared = False
+
+ os_cloud_owned = not (os_network_is_external or os_network_is_shared)
+ attributes = {
+ 'Is External': os_network_is_external,
+ 'Is Shared': os_network_is_shared,
+ 'Cloud API Owned': os_cloud_owned,
+ }
+ return attributes
+
+ def _get_instance_id(self, context, port):
+ is_floatingip = port['device_owner'] == l3_db.DEVICE_OWNER_FLOATINGIP
+
+ if is_floatingip:
+ os_instance_id = self.db.get_instance_id_by_floating_ip(
+ context, floating_ip_id=port['device_id'])
+ else:
+ os_instance_id = port['device_id']
+
+ return os_instance_id
+
+ @staticmethod
+ def _to_str_or_none(value):
+ retval = None
+ if not isinstance(value, basestring):
+ if value is not None:
+ retval = str(value)
+ else:
+ retval = value
+ return retval
+
+ def _build_extattrs(self, attributes):
+ extattrs = {}
+ for name, value in attributes.iteritems():
+ str_val = self._to_str_or_none(value)
+ if str_val:
+ extattrs[name] = {'value': str_val}
+
+ self.add_openstack_extattrs_marker(extattrs)
+ return extattrs
+
+ @classmethod
+ def add_openstack_extattrs_marker(cls, extattrs):
+ extattrs[cls.OPENSTACK_OBJECT_FLAG] = {'value': 'OpenStack'}
+
+
+def _construct_extattrs(filters):
+ extattrs = {}
+ for name, filter_value_list in filters.items():
+ # Filters in Neutron look like a dict
+ # {
+ # 'filter1_name': ['filter1_value'],
+ # 'filter2_name': ['filter2_value']
+ # }
+ # So we take only the first item from user's input which is
+ # filter_value_list here.
+ # Also not Infoblox filters must be removed from filters.
+ # Infoblox filters must be as following:
+ # neutron <command> --infoblox_ea:<EA_name> <EA_value>
+ infoblox_prefix = 'infoblox_ea:'
+ if name.startswith(infoblox_prefix) and filter_value_list:
+ # "infoblox-ea:" removed from filter name
+ prefix_len = len(infoblox_prefix)
+ attr_name = name[prefix_len:]
+ extattrs[attr_name] = {'value': filter_value_list[0]}
+ return extattrs
+
+
+def _extattrs_result_filter_hook(query, filters, db_model,
+ os_object, ib_objtype, mapping_id):
+ """Result filter hook which filters Infoblox objects by
+ Extensible Attributes (EAs) and returns Query object containing
+ OpenStack objects which are equal to Infoblox ones.
+ """
+ infoblox = connector.Infoblox()
+ infoblox_objects_ids = []
+ extattrs = _construct_extattrs(filters)
+
+ if extattrs:
+ InfobloxEaManager.add_openstack_extattrs_marker(extattrs)
+ infoblox_objects = infoblox.get_object(
+ ib_objtype, return_fields=['extattrs'],
+ extattrs=extattrs)
+ if infoblox_objects:
+ for infoblox_object in infoblox_objects:
+ try:
+ obj_id = infoblox_object['extattrs'][mapping_id]['value']
+ except KeyError:
+ raise exceptions.NoAttributeInInfobloxObject(
+ os_object=os_object, ib_object=ib_objtype,
+ attribute=mapping_id)
+ infoblox_objects_ids.append(obj_id)
+ query = query.filter(db_model.id.in_(infoblox_objects_ids))
+ return query
+
+
+def subnet_extattrs_result_filter_hook(query, filters):
+ return _extattrs_result_filter_hook(
+ query, filters, models_v2.Subnet, 'subnet', 'network', 'Subnet ID')
+
+
+def network_extattrs_result_filter_hook(query, filters):
+ return _extattrs_result_filter_hook(
+ query, filters, models_v2.Network, 'subnet', 'network',
+ 'Network ID')
+
+
+def port_extattrs_result_filter_hook(query, filters):
+ if cfg.CONF.use_host_records_for_ip_allocation:
+ ib_objtype = 'record:host'
+ else:
+ ib_objtype = 'record:a'
+ return _extattrs_result_filter_hook(
+ query, filters, models_v2.Port, 'port', ib_objtype, 'Port ID')
+
+
+if (cfg.CONF.ipam_driver ==
+ 'neutron.ipam.drivers.infoblox.infoblox_ipam.InfobloxIPAM'):
+ db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
+ models_v2.Network, 'network_extattrs', None, None,
+ network_extattrs_result_filter_hook)
+
+ db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
+ models_v2.Subnet, 'subnet_extattrs', None, None,
+ subnet_extattrs_result_filter_hook)
+
+ db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
+ models_v2.Port, 'port_extattrs', None, None,
+ port_extattrs_result_filter_hook)
diff --git a/neutron/ipam/drivers/infoblox/exceptions.py b/neutron/ipam/drivers/infoblox/exceptions.py
new file mode 100755
index 0000000..abe1762
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/exceptions.py
@@ -0,0 +1,125 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron.common import exceptions
+
+
+class InfobloxException(exceptions.NeutronException):
+ """Generic Infoblox Exception."""
+ def __init__(self, response, **kwargs):
+ self.response = response
+ super(InfobloxException, self).__init__(**kwargs)
+
+
+class InfobloxIsMisconfigured(exceptions.NeutronException):
+ message = _("Infoblox IPAM is misconfigured: infoblox_wapi, "
+ "infoblox_username and infoblox_password must be defined.")
+
+
+class InfobloxSearchError(InfobloxException):
+ message = _("Cannot search '%(objtype)s' object(s): "
+ "%(content)s [code %(code)s]")
+
+
+class InfobloxCannotCreateObject(InfobloxException):
+ message = _("Cannot create '%(objtype)s' object(s): "
+ "%(content)s [code %(code)s]")
+
+
+class InfobloxCannotDeleteObject(InfobloxException):
+ message = _("Cannot delete object with ref %(ref)s: "
+ "%(content)s [code %(code)s]")
+
+
+class InfobloxCannotUpdateObject(InfobloxException):
+ message = _("Cannot update object with ref %(ref)s: "
+ "%(content)s [code %(code)s]")
+
+
+class InfobloxFuncException(InfobloxException):
+ message = _("Error occured during function's '%(func_name)s' call: "
+ "ref %(ref)s: %(content)s [code %(code)s]")
+
+
+class InfobloxHostRecordIpAddrNotCreated(exceptions.NeutronException):
+ message = _("Infoblox host record ipv4addr/ipv6addr has not been "
+ "created for IP %(ip)s, mac %(mac)s")
+
+
+
+class InfobloxCannotAllocateIpForSubnet(exceptions.NeutronException):
+ message = _("Infoblox Network view %(netview)s, Network %(cidr)s "
+ "does not have IPs available for allocation.")
+
+
+class InfobloxCannotAllocateIp(exceptions.NeutronException):
+ message = _("Cannot allocate IP %(ip_data)s")
+
+
+class InfobloxDidNotReturnCreatedIPBack(exceptions.NeutronException):
+ message = _("Infoblox did not return created IP back")
+
+
+class InfobloxNetworkNotAvailable(exceptions.NeutronException):
+ message = _("No network view %(net_view_name)s for %(cidr)s")
+
+
+class InfobloxObjectParsingError(exceptions.NeutronException):
+ message = _("Infoblox object cannot be parsed from dict: %(data)s")
+
+
+class HostRecordNotPresent(InfobloxObjectParsingError):
+ message = _("Cannot parse Host Record object from dict because "
+ "'ipv4addrs'/'ipv6addrs' is absent.")
+
+
+class InfobloxInvalidIp(InfobloxObjectParsingError):
+ message = _("Bad IP address: %(ip)s")
+
+
+class NoAttributeInInfobloxObject(exceptions.NeutronException):
+ message = _("To find OpenStack %(os_object)s for Infoblox %(ib_object)s, "
+ "%(attribute)s must be in extensible attributes.")
+
+
+class OperationNotAllowed(exceptions.NeutronException):
+ message = _("Requested operation is not allowed: %(reason)s")
+
+
+class InfobloxConnectionError(exceptions.NeutronException):
+ message = _("Infoblox HTTP request failed with: %(reason)s")
+
+
+class InfobloxConfigException(exceptions.NeutronException):
+ """Generic Infoblox Config Exception."""
+ message = _("Config error: %(msg)s")
+
+
+class InfobloxInternalPrivateSubnetAlreadyExist(exceptions.Conflict):
+ message = _("Network with the same CIDR already exists on NIOS.")
+
+
+class InfobloxNetworkTypeNotAllowed(InfobloxException):
+ message = _("Network with network_type '%(network_type)s' "
+ "not allowed by NIOS.")
+
+
+class InfobloxBadWAPICredential(InfobloxException):
+ message = _("Infoblox IPAM is misconfigured: "
+ "infoblox_username and infoblox_password are incorrect.")
+
+
+class InfobloxTimeoutError(InfobloxException):
+ message = _("Connection to NIOS timed out")
diff --git a/neutron/ipam/drivers/infoblox/infoblox_ipam.py b/neutron/ipam/drivers/infoblox/infoblox_ipam.py
new file mode 100755
index 0000000..e0f6456
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/infoblox_ipam.py
@@ -0,0 +1,92 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import taskflow.engines
+from taskflow.patterns import linear_flow
+
+from neutron.db.infoblox import models
+from neutron.ipam.drivers.infoblox import config
+from neutron.ipam.drivers.infoblox import connector
+from neutron.ipam.drivers.infoblox import dns_controller
+from neutron.ipam.drivers.infoblox import ip_allocator
+from neutron.ipam.drivers.infoblox import ipam_controller
+from neutron.ipam.drivers.infoblox import object_manipulator
+from neutron.ipam.drivers import neutron_ipam
+
+
+class FlowContext(object):
+ def __init__(self, neutron_context, flow_name):
+ self.parent_flow = linear_flow.Flow(flow_name)
+ self.context = neutron_context
+ self.store = {}
+
+ def __getattr__(self, item):
+ return getattr(self.context, item)
+
+
+class InfobloxIPAM(neutron_ipam.NeutronIPAM):
+ def __init__(self):
+ super(InfobloxIPAM, self).__init__()
+
+ config_finder = config.ConfigFinder()
+ obj_manipulator = object_manipulator.InfobloxObjectManipulator(
+ connector=connector.Infoblox())
+ ip_alloc = ip_allocator.get_ip_allocator(obj_manipulator)
+
+ self.ipam_controller = ipam_controller.InfobloxIPAMController(
+ config_finder=config_finder,
+ obj_manip=obj_manipulator,
+ ip_allocator=ip_alloc)
+
+ self.dns_controller = dns_controller.InfobloxDNSController(
+ config_finder=config_finder,
+ manipulator=obj_manipulator,
+ ip_allocator=ip_alloc
+ )
+
+ def create_subnet(self, context, subnet):
+ context = FlowContext(context, 'create-subnet')
+ context.store['subnet'] = subnet
+ retval = super(InfobloxIPAM, self).create_subnet(context, subnet)
+ taskflow.engines.run(context.parent_flow, store=context.store)
+ return retval
+
+ def _collect_members_ips(self, context, network_id, model):
+ members = context.session.query(model)
+ result = members.filter_by(network_id=network_id)
+ ip_list = []
+ ipv6_list = []
+ for member in result:
+ ip_list.append(member.server_ip)
+ ipv6_list.append(member.server_ipv6)
+ return (ip_list, ipv6_list)
+
+ def get_additional_network_dict_params(self, ctx, network_id):
+ dns_list, dns_ipv6_list = self._collect_members_ips(
+ ctx, network_id, models.InfobloxDNSMember)
+
+ dhcp_list, dhcp_ipv6_list = self._collect_members_ips(
+ ctx, network_id, models.InfobloxDHCPMember)
+
+ ib_mgmt_ip = self.ipam_controller.ib_db.get_management_net_ip(
+ ctx, network_id)
+
+ return {
+ 'external_dhcp_servers': dhcp_list,
+ 'external_dns_servers': dns_list,
+ 'external_dhcp_ipv6_servers': dhcp_ipv6_list,
+ 'external_dns_ipv6_servers': dns_ipv6_list,
+ 'mgmt_iface_ip': ib_mgmt_ip
+ }
diff --git a/neutron/ipam/drivers/infoblox/ip_allocator.py b/neutron/ipam/drivers/infoblox/ip_allocator.py
new file mode 100755
index 0000000..13178ff
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/ip_allocator.py
@@ -0,0 +1,180 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+from oslo.config import cfg
+from neutron.openstack.common import log as logging
+from neutron.common import constants as neutron_constants
+import six
+
+
+OPTS = [
+ cfg.ListOpt('bind_dns_records_to_fixed_address',
+ default=[],
+ help=_("List of DNS records to bind to "
+ "Fixed Address during create_port")),
+ cfg.ListOpt('unbind_dns_records_from_fixed_address',
+ default=[],
+ help=_("List of DNS records to unbind from "
+ "Fixed Address during delete_port. "
+ "This is typically the same list as "
+ "that for "
+ "bind_resource_records_to_fixedaddress")),
+ cfg.ListOpt('delete_dns_records_associated_with_fixed_address',
+ default=[],
+ help=_("List of associated DNS records to delete "
+ "when a Fixed Address is deleted. This is "
+ "typically a list of DNS records created "
+ "independent of the Infoblox Openstack "
+ "Adaptor (IOA)"))
+]
+
+cfg.CONF.register_opts(OPTS)
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class IPAllocator(object):
+
+ def __init__(self, infoblox):
+ self.infoblox = infoblox
+
+ @abc.abstractmethod
+ def allocate_ip_from_range(self, dnsview_name, networkview_name, zone_auth,
+ hostname, mac, first_ip, last_ip,
+ extattrs=None):
+ pass
+
+ @abc.abstractmethod
+ def allocate_given_ip(self, netview_name, dnsview_name, zone_auth,
+ hostname, mac, ip, extattrs=None):
+ pass
+
+ @abc.abstractmethod
+ def deallocate_ip(self, network_view, dns_view_name, ip):
+ pass
+
+ @abc.abstractmethod
+ def bind_names(self, netview_name, dnsview_name, ip, name, extattrs):
+ pass
+
+ @abc.abstractmethod
+ def unbind_names(self, netview_name, dnsview_name, ip, name, extattrs):
+ pass
+
+
+class HostRecordIPAllocator(IPAllocator):
+ def bind_names(self, netview_name, dnsview_name, ip, name, extattrs):
+ # See OPENSTACK-181. In case hostname already exists on NIOS, update
+ # host record which contains that hostname with the new IP address
+ # rather than creating a separate host record object
+ reserved_hostname_hr = self.infoblox.find_hostname(dnsview_name,
+ name, ip)
+ reserved_ip_hr = self.infoblox.get_host_record(dnsview_name, ip)
+
+ if reserved_hostname_hr == reserved_ip_hr:
+ return
+ if reserved_hostname_hr:
+ for hr_ip in reserved_ip_hr.ips:
+ if hr_ip == ip:
+ self.infoblox.delete_host_record(dnsview_name, ip)
+ self.infoblox.add_ip_to_record(reserved_hostname_hr,
+ ip,
+ hr_ip.mac)
+ break
+ else:
+ self.infoblox.bind_name_with_host_record(dnsview_name, ip,
+ name, extattrs)
+
+ def unbind_names(self, netview_name, dnsview_name, ip, name, extattrs):
+ # Nothing to delete, all will be deleted together with host record.
+ pass
+
+ def allocate_ip_from_range(self, dnsview_name, networkview_name,
+ zone_auth, hostname, mac, first_ip, last_ip,
+ extattrs=None):
+ fqdn = hostname + '.' + zone_auth
+ host_record = self.infoblox.find_hostname(dnsview_name, fqdn,
+ first_ip)
+ if host_record:
+ hr = self.infoblox.add_ip_to_host_record_from_range(
+ host_record, networkview_name, mac, first_ip, last_ip)
+ else:
+ hr = self.infoblox.create_host_record_from_range(
+ dnsview_name, networkview_name, zone_auth, hostname, mac,
+ first_ip, last_ip, extattrs)
+ return hr.ips[-1].ip
+
+ def allocate_given_ip(self, netview_name, dnsview_name, zone_auth,
+ hostname, mac, ip, extattrs=None):
+ hr = self.infoblox.create_host_record_for_given_ip(
+ dnsview_name, zone_auth, hostname, mac, ip, extattrs)
+ return hr.ips[-1].ip
+
+ def deallocate_ip(self, network_view, dns_view_name, ip):
+ host_record = self.infoblox.get_host_record(dns_view_name, ip)
+
+ if host_record and len(host_record.ips) > 1:
+ self.infoblox.delete_ip_from_host_record(host_record, ip)
+ else:
+ self.infoblox.delete_host_record(dns_view_name, ip)
+
+
+class FixedAddressIPAllocator(IPAllocator):
+ def bind_names(self, netview_name, dnsview_name, ip, name, extattrs):
+ bind_cfg = cfg.CONF.bind_dns_records_to_fixed_address
+ if extattrs.get('Port Attached Device - Device Owner').\
+ get('value') == neutron_constants.DEVICE_OWNER_FLOATINGIP:
+ self.infoblox.update_fixed_address_eas(netview_name, ip,
+ extattrs)
+ self.infoblox.update_dns_record_eas(dnsview_name, ip,
+ extattrs)
+ if bind_cfg:
+ self.infoblox.bind_name_with_record_a(
+ dnsview_name, ip, name, bind_cfg, extattrs)
+
+ def unbind_names(self, netview_name, dnsview_name, ip, name, extattrs):
+ unbind_cfg = cfg.CONF.unbind_dns_records_from_fixed_address
+ if unbind_cfg:
+ self.infoblox.unbind_name_from_record_a(
+ dnsview_name, ip, name, unbind_cfg)
+
+ def allocate_ip_from_range(self, dnsview_name, networkview_name,
+ zone_auth, hostname, mac, first_ip, last_ip,
+ extattrs=None):
+ fa = self.infoblox.create_fixed_address_from_range(
+ networkview_name, mac, first_ip, last_ip, extattrs)
+ return fa.ip
+
+ def allocate_given_ip(self, netview_name, dnsview_name, zone_auth,
+ hostname, mac, ip, extattrs=None):
+ fa = self.infoblox.create_fixed_address_for_given_ip(
+ netview_name, mac, ip, extattrs)
+ return fa.ip
+
+ def deallocate_ip(self, network_view, dns_view_name, ip):
+ delete_cfg = cfg.CONF.delete_dns_records_associated_with_fixed_address
+ if delete_cfg:
+ self.infoblox.delete_all_associated_objects(
+ network_view, ip, delete_cfg)
+ self.infoblox.delete_fixed_address(network_view, ip)
+
+
+def get_ip_allocator(obj_manipulator):
+ if cfg.CONF.use_host_records_for_ip_allocation:
+ return HostRecordIPAllocator(obj_manipulator)
+ else:
+ return FixedAddressIPAllocator(obj_manipulator)
diff --git a/neutron/ipam/drivers/infoblox/ipam_controller.py b/neutron/ipam/drivers/infoblox/ipam_controller.py
new file mode 100755
index 0000000..4ac87a2
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/ipam_controller.py
@@ -0,0 +1,420 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg as neutron_conf
+from taskflow.patterns import linear_flow
+
+from neutron.api.v2 import attributes
+from neutron.db.infoblox import infoblox_db
+from neutron.db.infoblox import models
+from neutron.ipam.drivers.infoblox import config
+from neutron.ipam.drivers.infoblox import dns_controller
+from neutron.ipam.drivers.infoblox import ea_manager
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.ipam.drivers.infoblox import tasks
+from neutron.ipam.drivers import neutron_db
+from neutron.ipam.drivers import neutron_ipam
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
+
+
+OPTS = [
+ neutron_conf.BoolOpt('use_host_records_for_ip_allocation',
+ default=True,
+ help=_("Use host records for IP allocation. "
+ "If False then Fixed IP + A + PTR record "
+ "are used.")),
+ neutron_conf.StrOpt('dhcp_relay_management_network_view',
+ default="default",
+ help=_("NIOS network view to be used for DHCP inside "
+ "management network")),
+ neutron_conf.StrOpt('dhcp_relay_management_network',
+ default=None,
+ help=_("CIDR for the management network served by "
+ "Infoblox DHCP member")),
+ neutron_conf.BoolOpt('allow_admin_network_deletion',
+ default=False,
+ help=_("Allow admin network which is global, "
+ "external, or shared to be deleted"))
+]
+
+neutron_conf.CONF.register_opts(OPTS)
+LOG = logging.getLogger(__name__)
+
+
+class InfobloxIPAMController(neutron_ipam.NeutronIPAMController):
+ def __init__(self, obj_manip=None, config_finder=None, ip_allocator=None,
+ extattr_manager=None, ib_db=None, db_mgr=None):
+ """IPAM backend implementation for Infoblox."""
+ self.infoblox = obj_manip
+ self.config_finder = config_finder
+ self.ip_allocator = ip_allocator
+ self.pattern_builder = config.PatternBuilder
+
+ if extattr_manager is None:
+ extattr_manager = ea_manager.InfobloxEaManager(infoblox_db)
+
+ if db_mgr is None:
+ db_mgr = neutron_db
+
+ self.db_manager = db_mgr
+ self.ea_manager = extattr_manager
+
+ if ib_db is None:
+ ib_db = infoblox_db
+
+ self.ib_db = ib_db
+
+ def create_subnet(self, context, s):
+ subnet = super(InfobloxIPAMController, self).create_subnet(context, s)
+
+ cfg = self.config_finder.find_config_for_subnet(context, subnet)
+ dhcp_members = cfg.reserve_dhcp_members()
+ dns_members = cfg.reserve_dns_members()
+
+ network = self._get_network(context, subnet['network_id'])
+ create_infoblox_member = True
+
+ create_subnet_flow = linear_flow.Flow('ib_create_subnet')
+
+ if self.infoblox.network_exists(cfg.network_view, subnet['cidr']):
+ create_subnet_flow.add(tasks.ChainInfobloxNetworkTask())
+ create_infoblox_member = False
+
+ if not infoblox_db.get_network_view(context, subnet['network_id']):
+ infoblox_db.set_network_view(context, cfg.network_view,
+ subnet['network_id'])
+
+ # Neutron will sort this later so make sure infoblox copy is
+ # sorted too.
+ user_nameservers = sorted(dns_controller.get_nameservers(s))
+
+ # For flat network we save member IP as a primary DNS server: to
+ # the beginning of the list.
+ # If this net is not flat, Member IP will later be replaced by
+ # DNS relay IP.
+ nameservers = [item.ipv6 if subnet['ip_version'] == 6
+ else item.ip for item in dns_members]
+
+ nameservers += [n for n in user_nameservers if n not in nameservers]
+
+ nview_extattrs = self.ea_manager.get_extattrs_for_nview(context)
+ network_extattrs = self.ea_manager.get_extattrs_for_network(
+ context, subnet, network)
+ range_extattrs = self.ea_manager.get_extattrs_for_range(
+ context, network)
+ method_arguments = {
+ 'obj_manip': self.infoblox,
+ 'net_view_name': cfg.network_view,
+ 'dns_view_name': cfg.dns_view,
+ 'cidr': subnet['cidr'],
+ 'dhcp_member': dhcp_members,
+ 'gateway_ip': subnet['gateway_ip'],
+ 'disable': True,
+ 'nameservers': nameservers,
+ 'range_extattrs': range_extattrs,
+ 'network_extattrs': network_extattrs,
+ 'nview_extattrs': nview_extattrs,
+ 'related_members': set(cfg.dhcp_members + cfg.dns_members),
+ 'dhcp_trel_ip': infoblox_db.get_management_net_ip(
+ context, subnet['network_id']),
+ 'ip_version': subnet['ip_version']
+ }
+
+ if subnet['ip_version'] == 6 and subnet['enable_dhcp']:
+ if attributes.is_attr_set(subnet.get('ipv6_ra_mode')):
+ method_arguments['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
+ if attributes.is_attr_set(subnet.get('ipv6_address_mode')):
+ method_arguments[
+ 'ipv6_address_mode'] = subnet['ipv6_address_mode']
+
+ if cfg.require_dhcp_relay:
+ for member in dhcp_members:
+ dhcp_member = models.InfobloxDHCPMember(
+ server_ip=member.ip,
+ server_ipv6=member.ipv6,
+ network_id=network.id
+ )
+ context.session.add(dhcp_member)
+
+ for member in dns_members:
+ dns_member = models.InfobloxDNSMember(
+ server_ip=member.ip,
+ server_ipv6=member.ipv6,
+ network_id=network.id
+ )
+ context.session.add(dns_member)
+
+ if cfg.requires_net_view():
+ create_subnet_flow.add(tasks.CreateNetViewTask())
+
+ if cfg.network_template:
+ method_arguments['template'] = cfg.network_template
+ create_subnet_flow.add(tasks.CreateNetworkFromTemplateTask())
+ elif create_infoblox_member:
+ create_subnet_flow.add(tasks.CreateNetworkTask())
+
+ create_subnet_flow.add(tasks.CreateDNSViewTask())
+
+ for ip_range in subnet['allocation_pools']:
+ # context.store is a global dict of method arguments for tasks
+ # in current flow, hence method arguments need to be rebound
+ first = ip_range['start']
+ last = ip_range['end']
+ first_ip_arg = 'ip_range %s' % first
+ last_ip_arg = 'ip_range %s' % last
+ method_arguments[first_ip_arg] = first
+ method_arguments[last_ip_arg] = last
+ task_name = '-'.join([first, last])
+
+ create_subnet_flow.add(
+ tasks.CreateIPRange(name=task_name,
+ rebind={'start_ip': first_ip_arg,
+ 'end_ip': last_ip_arg}))
+
+ context.store.update(method_arguments)
+ context.parent_flow.add(create_subnet_flow)
+
+ return subnet
+
+ def update_subnet(self, context, subnet_id, subnet):
+ backend_subnet = self.get_subnet_by_id(context, subnet_id)
+ cfg = self.config_finder.find_config_for_subnet(context,
+ backend_subnet)
+ cfg.verify_subnet_update_is_allowed(subnet)
+
+ ib_network = self.infoblox.get_network(cfg.network_view,
+ subnet['cidr'])
+
+ user_nameservers = sorted(subnet.get('dns_nameservers', []))
+ updated_nameservers = user_nameservers
+ if (ib_network.member_ip_addrs and
+ ib_network.member_ip_addrs[0] in ib_network.dns_nameservers):
+ # Flat network, primary dns is member_ip
+ primary_dns = ib_network.member_ip_addrs[0]
+ updated_nameservers = [primary_dns] + user_nameservers
+ else:
+ # Network with relays, primary dns is relay_ip
+ primary_dns = self.ib_db.get_subnet_dhcp_port_address(
+ context, subnet['id'])
+ if primary_dns:
+ updated_nameservers = [primary_dns] + user_nameservers
+
+ ib_network.dns_nameservers = updated_nameservers
+
+ network = self._get_network(context, subnet['network_id'])
+ eas = self.ea_manager.get_extattrs_for_network(context, subnet,
+ network)
+ self.infoblox.update_network_options(ib_network, eas)
+
+ self.restart_services(context, subnet=subnet)
+ return backend_subnet
+
+ def delete_subnet(self, context, subnet):
+ deleted_subnet = super(InfobloxIPAMController, self).delete_subnet(
+ context, subnet)
+
+ cfg = self.config_finder.find_config_for_subnet(context, subnet)
+ network = self._get_network(context, subnet['network_id'])
+ members_to_restart = list(set(cfg.dhcp_members + cfg.dns_members))
+ is_shared = network.get('shared')
+ is_external = infoblox_db.is_network_external(context,
+ subnet['network_id'])
+
+ if neutron_conf.CONF.allow_admin_network_deletion or \
+ not (cfg.is_global_config or is_shared or is_external):
+ self.infoblox.delete_network(
+ cfg.network_view, cidr=subnet['cidr'])
+
+ if self._determine_member_deletion(context,
+ cfg.network_view_scope,
+ subnet['id'],
+ subnet['network_id'],
+ subnet['tenant_id']):
+ cfg.release_member(cfg.network_view)
+
+ if cfg.require_dhcp_relay and \
+ self.ib_db.is_last_subnet_in_network(context, subnet['id'],
+ subnet['network_id']):
+ member = context.session.query(models.InfobloxDNSMember)
+ member.filter_by(network_id=network.id).delete()
+
+ member = context.session.query(models.InfobloxDHCPMember)
+ member.filter_by(network_id=network.id).delete()
+
+ preconf_dns_view = cfg._dns_view
+ if (preconf_dns_view and not preconf_dns_view.startswith('default')
+ and not self.infoblox.has_dns_zones(preconf_dns_view)):
+ self.infoblox.delete_dns_view(preconf_dns_view)
+
+ self.restart_services(context, members=members_to_restart)
+ return deleted_subnet
+
+ def _determine_member_deletion(self, context, network_view_scope,
+ subnet_id, network_id, tenant_id):
+ if network_view_scope == 'static':
+ return self.ib_db.is_last_subnet(context, subnet_id)
+ if network_view_scope == 'tenant_id':
+ return self.ib_db.is_last_subnet_in_tenant(context,
+ subnet_id,
+ tenant_id)
+ if network_view_scope == 'network_id':
+ return self.ib_db.is_last_subnet_in_network(context,
+ subnet_id,
+ network_id)
+ # In order to use network_name scope, a network name must be unique.
+ # Openstack does not enforce this so user has to make sure that
+ # each network name is unique when {network_name} pattern is used
+ # for network view name. Then this is the same as network_id scope.
+ if network_view_scope == 'network_name':
+ return self.ib_db.is_last_subnet_in_network(context,
+ subnet_id,
+ network_id)
+
+ def allocate_ip(self, context, subnet, port, ip=None):
+ hostname = uuidutils.generate_uuid()
+ mac = port['mac_address']
+ extattrs = self.ea_manager.get_extattrs_for_ip(context, port)
+
+ LOG.debug("Trying to allocate IP for %s on Infoblox NIOS" % hostname)
+
+ cfg = self.config_finder.find_config_for_subnet(context, subnet)
+
+ networkview_name = cfg.network_view
+ dnsview_name = cfg.dns_view
+ zone_auth = self.pattern_builder(cfg.domain_suffix_pattern).build(
+ context, subnet)
+
+ if ip and ip.get('ip_address', None):
+ subnet_id = ip.get('subnet_id', None)
+ ip_to_be_allocated = ip.get('ip_address', None)
+ allocated_ip = self.ip_allocator.allocate_given_ip(
+ networkview_name, dnsview_name, zone_auth, hostname, mac,
+ ip_to_be_allocated, extattrs)
+ allocated_ip = {'subnet_id': subnet_id,
+ 'ip_address': allocated_ip}
+ else:
+ # Allocate next available considering IP ranges.
+ ip_ranges = subnet['allocation_pools']
+ # Let Infoblox try to allocate an IP from each ip_range
+ # consistently, and break on the first successful allocation.
+ for ip_range in ip_ranges:
+ first_ip = ip_range['first_ip']
+ last_ip = ip_range['last_ip']
+ try:
+ allocated_ip = self.ip_allocator.allocate_ip_from_range(
+ dnsview_name, networkview_name, zone_auth, hostname,
+ mac, first_ip, last_ip, extattrs)
+ allocated_ip = {'subnet_id': subnet['id'],
+ 'ip_address': allocated_ip}
+
+ break
+ except exceptions.InfobloxCannotAllocateIp:
+ LOG.debug("Failed to allocate IP from range (%s-%s)." %
+ (first_ip, last_ip))
+ continue
+ else:
+ # We went through all the ranges and Infoblox did not
+ # allocated any IP.
+ LOG.debug("Network %s does not have IPs "
+ "available for allocation." % subnet['cidr'])
+ return None
+
+ LOG.debug('IP address allocated on Infoblox NIOS: %s', allocated_ip)
+
+ for member in set(cfg.dhcp_members):
+ self.infoblox.restart_all_services(member)
+
+ return allocated_ip
+
+ def deallocate_ip(self, context, subnet, port, ip):
+ cfg = self.config_finder.find_config_for_subnet(context, subnet)
+ net_id = subnet['network_id']
+ self.ip_allocator.deallocate_ip(cfg.network_view, cfg.dns_view, ip)
+ self.ib_db.delete_ip_allocation(context, net_id, subnet, ip)
+
+ for member in set(cfg.dhcp_members):
+ self.infoblox.restart_all_services(member)
+
+ def set_dns_nameservers(self, context, port):
+ # Replace member IP in DNS nameservers by DNS relay IP.
+ for ip in port['fixed_ips']:
+ subnet = self._get_subnet(context, ip['subnet_id'])
+ cfg = self.config_finder.find_config_for_subnet(context, subnet)
+ net = self.infoblox.get_network(cfg.network_view, subnet['cidr'])
+ if not net.members:
+ continue
+ if not net.has_dns_members():
+ LOG.debug("No domain-name-servers option found, it will"
+ "not be updated to the private IPAM relay IP.")
+ continue
+ net.update_member_ip_in_dns_nameservers(ip['ip_address'])
+ self.infoblox.update_network_options(net)
+
+ def create_network(self, context, network):
+ if not neutron_conf.CONF.dhcp_relay_management_network:
+ LOG.info(_('dhcp_relay_management_network option is not set in '
+ 'config. DHCP will be used for management network '
+ 'interface.'))
+ return network
+
+ net_view_name = neutron_conf.CONF.dhcp_relay_management_network_view
+ cidr = neutron_conf.CONF.dhcp_relay_management_network
+ mac = ':'.join(['00'] * 6)
+
+ # Note(pbondar): If IP is allocated for dhcp relay (trel interface)
+ # when dhcp relay management network is set,
+ # OpenStack is unware of this so no port to associate with.
+ # In this case, we still need to populate EAs with default values.
+ ip_extattrs = self.ea_manager.get_default_extattrs_for_ip(context)
+ created_fixed_address = self.infoblox.create_fixed_address_from_cidr(
+ net_view_name, mac, cidr, ip_extattrs)
+
+ self.ib_db.add_management_ip(context,
+ network['id'],
+ created_fixed_address)
+ return network
+
+ def delete_network(self, context, network_id):
+ subnets = self.db_manager.get_subnets_by_network(context, network_id)
+ net_view = infoblox_db.get_network_view(context, network_id)
+
+ for subnet in subnets:
+ LOG.info('Removing subnet %s from network %s.' % (
+ subnet.id, network_id
+ ))
+ self.delete_subnet(context, subnet)
+
+ if net_view and not self.infoblox.has_networks(net_view):
+ self.infoblox.delete_network_view(net_view)
+
+ fixed_address_ref = self.ib_db.get_management_ip_ref(context,
+ network_id)
+
+ if fixed_address_ref is not None:
+ self.infoblox.delete_object_by_ref(fixed_address_ref)
+ self.ib_db.delete_management_ip(context, network_id)
+
+ def restart_services(self, context, members=None, subnet=None):
+ if not members:
+ members = []
+
+ if subnet:
+ cfg = self.config_finder.find_config_for_subnet(context, subnet)
+ for member in set(cfg.dhcp_members + cfg.dns_members):
+ members.append(member)
+
+ for member in set(members):
+ self.infoblox.restart_all_services(member)
diff --git a/neutron/ipam/drivers/infoblox/l2_driver.py b/neutron/ipam/drivers/infoblox/l2_driver.py
new file mode 100644
index 0000000..83ab633
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/l2_driver.py
@@ -0,0 +1,140 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import re
+import six
+
+from oslo.config import cfg
+
+from neutron.openstack.common import importutils
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class L2DriverBase(object):
+ """Defines interface for retreiving info from L2 pluings.
+ L2 Driver should:
+ - be located under 'l2_drivers' directory;
+ - have name {core_plugin_name}.py;
+ - have class name 'Driver';
+ - be inherited from this class;
+ - implement methods which are marked as abstractmethods;
+ """
+ @abc.abstractmethod
+ def init_driver(self):
+ """Should be implemented in driver for L2 plugin.
+ This method should import needed L2 plugin and
+ store reference to it somewhere in self.
+ So any import exception should be raised at this point.
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_network_binding(self, session, network_id):
+ """Should be implemented in driver for L2 plugin.
+ :param session: database session object
+ :param network_id: network id
+ """
+ pass
+
+ def __init__(self):
+ """No need to override this in child.
+ Just inits L2 modules for now.
+ """
+ self.init_driver()
+
+
+class L2Info(object):
+ """This class provides network info from L2 plugins
+ using factory of facades.
+ """
+ def __init__(self, core_plugin=None):
+ """
+ :param: core_plugin: OpenStack core plugin
+ will be loaded from config if not provided
+ """
+ if not core_plugin:
+ core_plugin = cfg.CONF.core_plugin
+ self.core_plugin = core_plugin
+ self.driver = None
+
+ def _get_driver(self):
+ """Return Driver for L2 plugin.
+ Loads appropriate module if not loaded yet.
+ """
+ if not self.driver:
+ self.driver = L2DriverFactory.load(self.core_plugin)
+ return self.driver
+
+ def get_network_l2_info(self, session, network_id):
+ """Decorator/wrapper method for get_network_binding()
+ Converts network info from L2Driver(list format) into
+ dict with fixed keys.
+ :param session: database session object
+ :param network_id: network id
+ """
+ segments = None
+ l2_info = {'network_type': None,
+ 'segmentation_id': None,
+ 'physical_network': None}
+
+ driver = self._get_driver()
+ segments = driver.get_network_binding(session, network_id)
+
+ if segments:
+ for name, value in segments.iteritems():
+ l2_info[name] = value
+
+ return l2_info
+
+
+class L2DriverFactory(object):
+ """This class loads Driver for L2 plugin
+ depending on L2 core_plugin class name.
+ """
+
+ @classmethod
+ def load(cls, core_plugin):
+ """Loads driver for core_plugin
+ """
+ driver_prefix = 'neutron.ipam.drivers.infoblox.l2_drivers.'
+ driver_postfix = '.Driver'
+ # Look for infoblox driver for core plugin
+ plugin_name = cls.get_plugin_name(core_plugin)
+ driver = driver_prefix + plugin_name + driver_postfix
+ LOG.info(_("Loading driver %s for core plugin"), driver)
+ # Try to load driver, generates exception if fails
+ driver_class = importutils.import_class(driver)
+ return driver_class()
+
+ @classmethod
+ def get_plugin_name(cls, core_plugin):
+ """Returns plugin name based on plugin path.
+ Plugin name can be found on position number three in path
+ neutron.plugins.{plugin_name}.(path_to_module)
+ """
+ plugin = str(core_plugin)
+ match = re.match(r'^neutron\.plugins\.([a-zA-Z0-9_]+)\.',
+ plugin)
+ if match:
+ return match.group(1)
+
+ # if plugin doesn't match, assume core_plugin is short name
+ # instead of full path to module.
+ # See examples of full path / short name in setup.cfg
+ return plugin
diff --git a/neutron/ipam/drivers/infoblox/l2_drivers/__init__.py b/neutron/ipam/drivers/infoblox/l2_drivers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/ipam/drivers/infoblox/l2_drivers/ml2.py b/neutron/ipam/drivers/infoblox/l2_drivers/ml2.py
new file mode 100644
index 0000000..51e5f36
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/l2_drivers/ml2.py
@@ -0,0 +1,27 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron.ipam.drivers.infoblox import l2_driver
+
+
+class Driver(l2_driver.L2DriverBase):
+ def init_driver(self):
+ from neutron.plugins.ml2 import db
+ self.db = db
+
+ def get_network_binding(self, session, network_id):
+ # get_network_segments returns array of arrays,
+ # and we need only the first array
+ return self.db.get_network_segments(session, network_id)[0]
diff --git a/neutron/ipam/drivers/infoblox/l2_drivers/nuage.py b/neutron/ipam/drivers/infoblox/l2_drivers/nuage.py
new file mode 100644
index 0000000..14095d5
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/l2_drivers/nuage.py
@@ -0,0 +1,29 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron.ipam.drivers.infoblox import l2_driver
+
+
+class Driver(l2_driver.L2DriverBase):
+ def init_driver(self):
+ pass
+
+ def get_network_binding(self, session, network_id):
+ # get_network_segments returns array of arrays,
+ # and we need only the first array
+ return {'network_type': 'none',
+ 'segmentation_id': 'none',
+ 'physical_network': 'none'}
+
diff --git a/neutron/ipam/drivers/infoblox/nova_manager.py b/neutron/ipam/drivers/infoblox/nova_manager.py
new file mode 100644
index 0000000..5a98e18
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/nova_manager.py
@@ -0,0 +1,47 @@
+# Copyright 2014 Infoblox.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import novaclient.v1_1.client as nclient
+from novaclient import exceptions as novaexc
+from oslo.config import cfg
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class NovaManager(object):
+ _nova_client = None
+
+ def __init__(self):
+ if not NovaManager._nova_client:
+ NovaManager._nova_client = nclient.Client(
+ cfg.CONF.nova_admin_username,
+ cfg.CONF.nova_admin_password,
+ None, # project_id - not actually used
+ auth_url=cfg.CONF.nova_admin_auth_url,
+ tenant_id=cfg.CONF.nova_admin_tenant_id,
+ service_type='compute')
+ self.nova = NovaManager._nova_client
+
+ def get_instance_name_by_id(self, instance_id):
+ try:
+ instance = self.nova.servers.get(instance_id)
+ if instance.human_id:
+ return instance.human_id
+ except (novaexc.NotFound, novaexc.BadRequest):
+ LOG.debug(_("Instance not found: %{instance_id}s"),
+ {'instance_id': instance_id})
+ return instance_id
diff --git a/neutron/ipam/drivers/infoblox/object_manipulator.py b/neutron/ipam/drivers/infoblox/object_manipulator.py
new file mode 100755
index 0000000..77122fc
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/object_manipulator.py
@@ -0,0 +1,883 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+
+from neutron.ipam.drivers.infoblox import exceptions as exc
+from neutron.ipam.drivers.infoblox import objects
+from neutron.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+class IPBackend():
+ def __init__(self, object_manipulator):
+ self.obj_man = object_manipulator
+
+ def get_network(self, net_view_name, cidr):
+ net_data = {'network_view': net_view_name,
+ 'network': cidr}
+ net = self.obj_man._get_infoblox_object_or_none(
+ self.ib_network_name, net_data,
+ return_fields=['options', 'members'])
+ if not net:
+ raise exc.InfobloxNetworkNotAvailable(
+ net_view_name=net_view_name, cidr=cidr)
+ return objects.Network.from_dict(net)
+
+ def get_all_associated_objects(self, net_view_name, ip):
+ assoc_with_ip = {
+ 'network_view': net_view_name,
+ 'ip_address': ip
+ }
+ assoc_objects = self.obj_man._get_infoblox_object_or_none(
+ self.ib_ipaddr_object_name, assoc_with_ip,
+ return_fields=['objects'], proxy=True)
+ if assoc_objects:
+ return assoc_objects['objects']
+ return []
+
+ def network_exists(self, net_view_name, cidr):
+ net_data = {'network_view': net_view_name, 'network': cidr}
+ try:
+ net = self.obj_man._get_infoblox_object_or_none(
+ self.ib_network_name, net_data,
+ return_fields=['options', 'members'])
+ except exc.InfobloxSearchError:
+ net = None
+ return net is not None
+
+ def delete_network(self, net_view_name, cidr):
+ payload = {'network_view': net_view_name,
+ 'network': cidr}
+ self.obj_man._delete_infoblox_object(
+ self.ib_network_name, payload)
+
+ def delete_ip_range(self, net_view, start_ip, end_ip):
+ range_data = {'start_addr': start_ip,
+ 'end_addr': end_ip,
+ 'network_view': net_view}
+ self.obj_man._delete_infoblox_object(self.ib_range_name, range_data)
+
+ def delete_ip_from_host_record(self, host_record, ip):
+ host_record.ips.remove(ip)
+ self.obj_man._update_host_record_ips(self.ib_ipaddrs_name, host_record)
+ return host_record
+
+ def delete_host_record(self, dns_view_name, ip_address):
+ host_record_data = {'view': dns_view_name,
+ self.ib_ipaddr_name: ip_address}
+ self.obj_man._delete_infoblox_object(
+ 'record:host', host_record_data)
+
+ def delete_fixed_address(self, network_view, ip):
+ fa_data = {'network_view': network_view,
+ self.ib_ipaddr_name: ip}
+ self.obj_man._delete_infoblox_object(
+ self.ib_fixedaddress_name, fa_data)
+
+ def bind_name_with_host_record(self, dnsview_name, ip, name, extattrs):
+ record_host = {
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ update_kwargs = {'name': name, 'extattrs': extattrs}
+ self.obj_man._update_infoblox_object(
+ 'record:host', record_host, update_kwargs)
+
+ def update_host_record_eas(self, dns_view, ip, extattrs):
+ fa_data = {'view': dns_view,
+ self.ib_ipaddr_name: ip}
+ fa = self.obj_man._get_infoblox_object_or_none(
+ 'record:host', fa_data)
+ if fa:
+ self.obj_man._update_infoblox_object_by_ref(
+ fa, {'extattrs': extattrs})
+
+ def update_fixed_address_eas(self, network_view, ip, extattrs):
+ fa_data = {'network_view': network_view,
+ self.ib_ipaddr_name: ip}
+ fa = self.obj_man._get_infoblox_object_or_none(
+ self.ib_fixedaddress_name, fa_data)
+ if fa:
+ self.obj_man._update_infoblox_object_by_ref(
+ fa, {'extattrs': extattrs})
+
+ def update_dns_record_eas(self, dns_view, ip, extattrs):
+ fa_data = {'view': dns_view,
+ self.ib_ipaddr_name: ip}
+ fa = self.obj_man._get_infoblox_object_or_none(
+ 'record:a', fa_data)
+ if fa:
+ self.obj_man._update_infoblox_object_by_ref(
+ fa, {'extattrs': extattrs})
+
+ fa = self.obj_man._get_infoblox_object_or_none(
+ 'record:ptr', fa_data)
+ if fa:
+ self.obj_man._update_infoblox_object_by_ref(
+ fa, {'extattrs': extattrs})
+
+
+class IPv4Backend(IPBackend):
+ ib_ipaddr_name = 'ipv4addr'
+ ib_ipaddrs_name = 'ipv4addrs'
+ ib_ipaddr_object_name = 'ipv4address'
+ ib_network_name = 'network'
+ ib_fixedaddress_name = 'fixedaddress'
+ ib_range_name = 'range'
+
+ def create_network(self, net_view_name, cidr, nameservers=None,
+ members=None, gateway_ip=None, dhcp_trel_ip=None,
+ network_extattrs=None):
+ network_data = {'network_view': net_view_name,
+ 'network': cidr,
+ 'extattrs': network_extattrs}
+ members_struct = []
+ for member in members:
+ members_struct.append({'ipv4addr': member.ip,
+ '_struct': 'dhcpmember'})
+ network_data['members'] = members_struct
+
+ dhcp_options = []
+
+ if nameservers:
+ dhcp_options.append({'name': 'domain-name-servers',
+ 'value': ",".join(nameservers)})
+
+ if gateway_ip:
+ dhcp_options.append({'name': 'routers',
+ 'value': gateway_ip})
+
+ if dhcp_trel_ip:
+ dhcp_options.append({'name': 'dhcp-server-identifier',
+ 'num': 54,
+ 'value': dhcp_trel_ip})
+
+ if dhcp_options:
+ network_data['options'] = dhcp_options
+
+ return self.obj_man._create_infoblox_object(
+ self.ib_network_name, network_data, check_if_exists=False)
+
+ def create_ip_range(self, network_view, start_ip, end_ip, network,
+ disable, range_extattrs):
+ range_data = {'start_addr': start_ip,
+ 'end_addr': end_ip,
+ 'extattrs': range_extattrs,
+ 'network_view': network_view}
+ ib_object = self.obj_man._get_infoblox_object_or_none('range',
+ range_data)
+ if not ib_object:
+ range_data['disable'] = disable
+ self.obj_man._create_infoblox_object(
+ 'range', range_data, check_if_exists=False)
+
+ def add_ip_to_record(self, host_record, ip, mac):
+ host_record.ips.append(objects.IPv4(ip, mac))
+ ips = self.obj_man._update_host_record_ips('ipv4addrs', host_record)
+ hr = objects.HostRecordIPv4.from_dict(ips)
+ return hr
+
+ def create_host_record(self):
+ return objects.HostRecordIPv4()
+
+ def get_host_record(self, dns_view, ip):
+ data = {
+ 'view': dns_view,
+ 'ipv4addr': ip
+ }
+
+ raw_host_record = self.obj_man._get_infoblox_object_or_none(
+ 'record:host', data, return_fields=['ipv4addrs'])
+
+ if raw_host_record:
+ hr = objects.HostRecordIPv4.from_dict(raw_host_record)
+ return hr
+
+ def get_fixed_address(self):
+ return objects.FixedAddressIPv4()
+
+ def bind_name_with_record_a(self, dnsview_name, ip, name, bind_list,
+ extattrs):
+ # Forward mapping
+ if 'record:a' in bind_list:
+ payload = {
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ additional_create_kwargs = {
+ 'name': name,
+ 'extattrs':extattrs
+ }
+ self.obj_man._create_infoblox_object(
+ 'record:a', payload,
+ additional_create_kwargs,
+ update_if_exists=True)
+
+ # Reverse mapping
+ if 'record:ptr' in bind_list:
+ record_ptr_data = {
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ additional_create_kwargs = {
+ 'ptrdname': name,
+ 'extattrs': extattrs
+ }
+ self.obj_man._create_infoblox_object(
+ 'record:ptr', record_ptr_data,
+ additional_create_kwargs,
+ update_if_exists=True)
+
+ def unbind_name_from_record_a(self, dnsview_name, ip, name, unbind_list):
+ if 'record:a' in unbind_list:
+ dns_record_a = {
+ 'name': name,
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ self.obj_man._delete_infoblox_object(
+ 'record:a', dns_record_a)
+
+ if 'record:ptr' in unbind_list:
+ dns_record_ptr = {
+ 'ptrdname': name,
+ 'view': dnsview_name
+ }
+ self.obj_man._delete_infoblox_object(
+ 'record:ptr', dns_record_ptr)
+
+ def find_hostname(self, dns_view, hostname):
+ data = {
+ 'name': hostname,
+ 'view': dns_view
+ }
+
+ raw_host_record = self.obj_man._get_infoblox_object_or_none(
+ 'record:host', data, return_fields=['ipv4addrs'])
+
+ if raw_host_record:
+ hr = objects.HostRecordIPv4.from_dict(raw_host_record)
+ return hr
+
+
+class IPv6Backend(IPBackend):
+ ib_ipaddr_name = 'ipv6addr'
+ ib_ipaddrs_name = 'ipv6addrs'
+ ib_ipaddr_object_name = 'ipv6address'
+ ib_network_name = 'ipv6network'
+ ib_fixedaddress_name = 'ipv6fixedaddress'
+ ib_range_name = 'ipv6range'
+
+ def create_network(self, net_view_name, cidr, nameservers=None,
+ members=None, gateway_ip=None, dhcp_trel_ip=None,
+ network_extattrs=None):
+ network_data = {'network_view': net_view_name,
+ 'network': cidr,
+ 'extattrs': network_extattrs}
+
+ # member here takes ipv4addr since pre-hellfire NIOS version does not
+ # support ipv6addr (Hellfire supports ipv6addr.
+ # We are using ipv4addr to suppport both versions
+ # This is just to the GM know which member is used for
+ # their internal communication between GM and member so
+ # it has nothing to do wiht DHCP protocol.
+ members_struct = []
+ for member in members:
+ members_struct.append(member.specifier)
+ network_data['members'] = members_struct
+
+ dhcp_options = []
+
+ if nameservers:
+ dhcp_options.append({'name': 'domain-name-servers',
+ 'value': ",".join(nameservers)})
+
+ if dhcp_options:
+ network_data['options'] = dhcp_options
+
+ return self.obj_man._create_infoblox_object(
+ self.ib_network_name, network_data, check_if_exists=False)
+
+ def create_ip_range(self, network_view, start_ip, end_ip, network,
+ disable, range_extattrs):
+ range_data = {'start_addr': start_ip,
+ 'end_addr': end_ip,
+ 'extattrs': range_extattrs,
+ 'network': network,
+ 'network_view': network_view}
+ ib_object = self.obj_man._get_infoblox_object_or_none('ipv6range',
+ range_data)
+ if not ib_object:
+ range_data['disable'] = disable
+ self.obj_man._create_infoblox_object(
+ 'ipv6range', range_data, check_if_exists=False)
+
+ def add_ip_to_record(self, host_record, ip, mac):
+ host_record.ips.append(objects.IPv6(ip, mac))
+ ips = self.obj_man._update_host_record_ips('ipv6addrs', host_record)
+ hr = objects.HostRecordIPv6.from_dict(ips)
+ return hr
+
+ def create_host_record(self):
+ return objects.HostRecordIPv6()
+
+ def get_host_record(self, dns_view, ip):
+ data = {
+ 'view': dns_view,
+ 'ipv6addr': ip
+ }
+
+ raw_host_record = self.obj_man._get_infoblox_object_or_none(
+ 'record:host', data, return_fields=['ipv6addrs'])
+
+ if raw_host_record:
+ hr = objects.HostRecordIPv6.from_dict(raw_host_record)
+ return hr
+
+ def get_fixed_address(self):
+ return objects.FixedAddressIPv6()
+
+ def bind_name_with_record_a(self, dnsview_name, ip, name, bind_list,
+ extattrs):
+ # Forward mapping
+ if 'record:aaaa' in bind_list:
+ payload = {
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ additional_create_kwargs = {
+ 'name': name,
+ 'extattrs':extattrs
+ }
+ self.obj_man._create_infoblox_object(
+ 'record:aaaa', payload,
+ additional_create_kwargs,
+ update_if_exists=True)
+
+ # Reverse mapping
+ if 'record:ptr' in bind_list:
+ record_ptr_data = {
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ additional_create_kwargs = {
+ 'ptrdname': name,
+ 'extattrs': extattrs
+ }
+ self.obj_man._create_infoblox_object(
+ 'record:ptr', record_ptr_data,
+ additional_create_kwargs,
+ update_if_exists=True)
+
+ def unbind_name_from_record_a(self, dnsview_name, ip, name, unbind_list):
+ if 'record:aaaa' in unbind_list:
+ dns_record_a = {
+ 'name': name,
+ self.ib_ipaddr_name: ip,
+ 'view': dnsview_name
+ }
+ self.obj_man._delete_infoblox_object(
+ 'record:aaaa', dns_record_a)
+
+ if 'record:ptr' in unbind_list:
+ dns_record_ptr = {
+ 'ptrdname': name,
+ 'view': dnsview_name
+ }
+ self.obj_man._delete_infoblox_object(
+ 'record:ptr', dns_record_ptr)
+
+ def find_hostname(self, dns_view, hostname):
+ data = {
+ 'name': hostname,
+ 'view': dns_view
+ }
+
+ raw_host_record = self.obj_man._get_infoblox_object_or_none(
+ 'record:host', data, return_fields=['ipv6addrs'])
+
+ if raw_host_record:
+ hr = objects.HostRecordIPv6.from_dict(raw_host_record)
+ return hr
+
+
+class IPBackendFactory():
+ @staticmethod
+ def get_ip_version(ipaddr):
+ if type(ipaddr) is dict:
+ ip = ipaddr['ip_address']
+ else:
+ ip = ipaddr
+
+ try:
+ ip = netaddr.IPAddress(ip)
+ except ValueError:
+ ip = netaddr.IPNetwork(ip)
+ return ip.version
+
+ @staticmethod
+ def get(obj_man, ip):
+ ip = IPBackendFactory.get_ip_version(ip)
+ if ip == 4:
+ return IPv4Backend(obj_man)
+ elif ip == 6:
+ return IPv6Backend(obj_man)
+
+
+class InfobloxObjectManipulator(object):
+ def __init__(self, connector):
+ self.connector = connector
+
+ def create_network_view(self, netview_name, nview_extattrs, member):
+ net_view_data = {'name': netview_name,
+ 'extattrs': nview_extattrs}
+ return self._create_infoblox_object('networkview', net_view_data,
+ delegate_member=member)
+
+ def delete_network_view(self, net_view_name):
+ # never delete default network view
+ if net_view_name == 'default':
+ return
+
+ net_view_data = {'name': net_view_name}
+ self._delete_infoblox_object('networkview', net_view_data)
+
+ def create_dns_view(self, net_view_name, dns_view_name):
+ dns_view_data = {'name': dns_view_name,
+ 'network_view': net_view_name}
+ return self._create_infoblox_object('view', dns_view_data)
+
+ def delete_dns_view(self, net_view_name):
+ net_view_data = {'name': net_view_name}
+ self._delete_infoblox_object('view', net_view_data)
+
+ def get_network(self, net_view_name, cidr):
+ ip_backend = IPBackendFactory.get(self, cidr)
+ return ip_backend.get_network(net_view_name, cidr)
+
+ def has_networks(self, network_view_name):
+ net_data = {'network_view': network_view_name}
+ try:
+ ib_net = self._get_infoblox_object_or_none('network', net_data)
+ return bool(ib_net)
+ except exc.InfobloxSearchError:
+ return False
+
+ def network_exists(self, net_view_name, cidr):
+ ip_backend = IPBackendFactory.get(self, cidr)
+ return ip_backend.network_exists(net_view_name, cidr)
+
+ def create_network(self, net_view_name, cidr, nameservers=None,
+ members=None, gateway_ip=None, dhcp_trel_ip=None,
+ network_extattrs=None):
+ ip_backend = IPBackendFactory.get(self, cidr)
+ ip_backend.create_network(net_view_name, cidr, nameservers,
+ members, gateway_ip, dhcp_trel_ip,
+ network_extattrs)
+
+ def delete_network(self, net_view_name, cidr):
+ ip_backend = IPBackendFactory.get(self, cidr)
+ ip_backend.delete_network(net_view_name, cidr)
+
+ def create_network_from_template(self, net_view_name, cidr, template,
+ network_extattrs):
+ network_data = {
+ 'network_view': net_view_name,
+ 'network': cidr,
+ 'template': template,
+ 'extattrs': network_extattrs
+ }
+ return self._create_infoblox_object('network', network_data,
+ check_if_exists=False)
+
+ def update_network_options(self, ib_network, extattrs=None):
+ payload = {}
+ if ib_network.options:
+ payload['options'] = ib_network.options
+ if extattrs:
+ payload['extattrs'] = extattrs
+ self._update_infoblox_object_by_ref(ib_network.ref, payload)
+
+ def create_ip_range(self, network_view, start_ip, end_ip, network,
+ disable, range_extattrs):
+ ip_backend = IPBackendFactory.get(self, start_ip)
+ ip_backend.create_ip_range(network_view, start_ip, end_ip,
+ network, disable, range_extattrs)
+
+ def delete_ip_range(self, net_view, start_ip, end_ip):
+ ip_backend = IPBackendFactory.get(self, start_ip)
+ ip_backend.delete_ip_range(net_view, start_ip, end_ip)
+
+ def get_host_record(self, dns_view, ip):
+ ip_backend = IPBackendFactory.get(self, ip)
+ return ip_backend.get_host_record(dns_view, ip)
+
+ def find_hostname(self, dns_view, hostname, ip):
+ ip_backend = IPBackendFactory.get(self, ip)
+ return ip_backend.find_hostname(dns_view, hostname)
+
+ def create_host_record_for_given_ip(self, dns_view_name, zone_auth,
+ hostname, mac, ip, extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+
+ hr = ip_backend.create_host_record()
+ hr.ip_version = IPBackendFactory.get_ip_version(ip)
+ hr.hostname = hostname
+ hr.zone_auth = zone_auth
+ hr.mac = mac
+ hr.dns_view = dns_view_name
+ hr.ip = ip
+ hr.extattrs = extattrs
+
+ created_hr = self._create_infoblox_ip_address(hr)
+ return created_hr
+
+ def create_host_record_from_range(self, dns_view_name, network_view_name,
+ zone_auth, hostname, mac, first_ip,
+ last_ip, extattrs):
+ ip_backend = IPBackendFactory.get(self, first_ip)
+
+ hr = ip_backend.create_host_record()
+ hr.ip_version = IPBackendFactory.get_ip_version(first_ip)
+ hr.hostname = hostname
+ hr.zone_auth = zone_auth
+ hr.mac = mac
+ hr.dns_view = dns_view_name
+ hr.ip = objects.IPAllocationObject.next_available_ip_from_range(
+ network_view_name, first_ip, last_ip)
+ hr.extattrs = extattrs
+
+ created_hr = self._create_infoblox_ip_address(hr)
+ return created_hr
+
+ def delete_host_record(self, dns_view_name, ip_address):
+ ip_backend = IPBackendFactory.get(self, ip_address)
+ ip_backend.delete_host_record(dns_view_name, ip_address)
+
+ def create_fixed_address_for_given_ip(self, network_view, mac, ip,
+ extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+
+ fa = ip_backend.get_fixed_address()
+ fa.ip = ip
+ fa.net_view = network_view
+ fa.mac = mac
+ fa.extattrs = extattrs
+
+ created_fa = self._create_infoblox_ip_address(fa)
+ return created_fa
+
+ def create_fixed_address_from_range(self, network_view, mac, first_ip,
+ last_ip, extattrs):
+ ip_backend = IPBackendFactory.get(self, first_ip)
+
+ fa = ip_backend.get_fixed_address()
+ fa.ip = objects.IPAllocationObject.next_available_ip_from_range(
+ network_view, first_ip, last_ip)
+ fa.net_view = network_view
+ fa.mac = mac
+ fa.extattrs = extattrs
+
+ created_fa = self._create_infoblox_ip_address(fa)
+ return created_fa
+
+ def create_fixed_address_from_cidr(self, network_view, mac, cidr,
+ extattrs):
+ ip_backend = IPBackendFactory.get(self, cidr)
+
+ fa = ip_backend.get_fixed_address()
+ fa.ip = objects.IPAllocationObject.next_available_ip_from_cidr(
+ network_view, cidr)
+ fa.mac = mac
+ fa.net_view = network_view
+ fa.extattrs = extattrs
+
+ created_fa = self._create_infoblox_ip_address(fa)
+ return created_fa
+
+ def delete_fixed_address(self, network_view, ip_address):
+ ip_backend = IPBackendFactory.get(self, ip_address)
+ ip_backend.delete_fixed_address(network_view, ip_address)
+
+ def add_ip_to_record(self, host_record, ip, mac):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.add_ip_to_record(host_record, ip, mac)
+
+ def add_ip_to_host_record_from_range(self, host_record, network_view,
+ mac, first_ip, last_ip):
+ ip = objects.IPAllocationObject.next_available_ip_from_range(
+ network_view, first_ip, last_ip)
+ hr = self.add_ip_to_record(host_record, ip, mac)
+ return hr
+
+ def delete_ip_from_host_record(self, host_record, ip):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.delete_ip_from_host_record(host_record, ip)
+
+ def has_dns_zones(self, dns_view):
+ zone_data = {'view': dns_view}
+ try:
+ zone = self._get_infoblox_object_or_none('zone_auth', zone_data)
+ return bool(zone)
+ except exc.InfobloxSearchError:
+ return False
+
+ def create_dns_zone(self, dns_view, dns_zone_fqdn, primary_dns_member=None,
+ secondary_dns_members=None, zone_format=None,
+ ns_group=None, prefix=None, zone_extattrs={}):
+ # TODO(mirantis) support IPv6
+ dns_zone_data = {'fqdn': dns_zone_fqdn,
+ 'view': dns_view,
+ 'extattrs': zone_extattrs}
+ additional_create_kwargs = {}
+
+ if primary_dns_member:
+ grid_primary = [{'name': primary_dns_member.name,
+ '_struct': 'memberserver'}]
+ additional_create_kwargs['grid_primary'] = grid_primary
+
+ if secondary_dns_members:
+ grid_secondaries = [{'name': member.name,
+ '_struct': 'memberserver'}
+ for member in secondary_dns_members]
+ additional_create_kwargs['grid_secondaries'] = grid_secondaries
+
+ if zone_format:
+ additional_create_kwargs['zone_format'] = zone_format
+
+ if ns_group:
+ additional_create_kwargs['ns_group'] = ns_group
+
+ if prefix:
+ additional_create_kwargs['prefix'] = prefix
+
+ try:
+ self._create_infoblox_object(
+ 'zone_auth', dns_zone_data, additional_create_kwargs,
+ check_if_exists=True)
+ except exc.InfobloxCannotCreateObject:
+ LOG.warning(
+ _('Unable to create DNS zone %(dns_zone_fqdn)s '
+ 'for %(dns_view)s'),
+ {'dns_zone_fqdn': dns_zone_fqdn, 'dns_view': dns_view})
+
+ def delete_dns_zone(self, dns_view, dns_zone_fqdn):
+ dns_zone_data = {'fqdn': dns_zone_fqdn,
+ 'view': dns_view}
+ self._delete_infoblox_object('zone_auth', dns_zone_data)
+
+ def update_host_record_eas(self, dns_view, ip, extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.update_host_record_eas(dns_view, ip, extattrs)
+
+ def update_fixed_address_eas(self, network_view, ip, extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.update_fixed_address_eas(network_view, ip, extattrs)
+
+ def update_dns_record_eas(self, dns_view, ip, extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.update_dns_record_eas(dns_view, ip, extattrs)
+
+ def bind_name_with_host_record(self, dnsview_name, ip, name, extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.bind_name_with_host_record(dnsview_name, ip, name, extattrs)
+
+ def bind_name_with_record_a(self, dnsview_name, ip, name, bind_list,
+ extattrs):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.bind_name_with_record_a(dnsview_name, ip, name, bind_list,
+ extattrs)
+
+ def unbind_name_from_record_a(self, dnsview_name, ip, name, unbind_list):
+ ip_backend = IPBackendFactory.get(self, ip)
+ ip_backend.unbind_name_from_record_a(
+ dnsview_name, ip, name, unbind_list)
+
+ def get_member(self, member):
+ return self.connector.get_object('member', {'host_name': member.name})
+
+ def restart_all_services(self, member):
+ ib_member = self.get_member(member)[0]
+ self.connector.call_func('restartservices', ib_member['_ref'],
+ {'restart_option': 'RESTART_IF_NEEDED',
+ 'service_option': 'ALL'})
+
+ def get_object_refs_associated_with_a_record(self, a_record_ref):
+ associated_with_a_record = [ # {object_type, search_field}
+ {'type': 'record:cname', 'search': 'canonical'},
+ {'type': 'record:txt', 'search': 'name'}
+ ]
+
+ obj_refs = []
+ a_record = self.connector.get_object(a_record_ref)
+
+ for rec_inf in associated_with_a_record:
+ objs = self.connector.get_object(
+ rec_inf['type'], {'view': a_record['view'],
+ rec_inf['search']: a_record['name']})
+ if objs:
+ for obj in objs:
+ obj_refs.append(obj['_ref'])
+
+ return obj_refs
+
+ def get_all_associated_objects(self, net_view_name, ip):
+ ip_backend = IPBackendFactory.get(self, ip)
+ return ip_backend.get_all_associated_objects(net_view_name, ip)
+
+ def delete_all_associated_objects(self, net_view_name, ip, delete_list):
+ del_objs = []
+ obj_refs = self.get_all_associated_objects(net_view_name, ip)
+
+ for obj_ref in obj_refs:
+ del_objs.append(obj_ref)
+ if self._get_object_type_from_ref(obj_ref) \
+ in ['record:a', 'record:aaaa']:
+ del_objs.extend(
+ self.get_object_refs_associated_with_a_record(obj_ref))
+
+ for obj_ref in del_objs:
+ if self._get_object_type_from_ref(obj_ref) in delete_list:
+ self.connector.delete_object(obj_ref)
+
+ def delete_object_by_ref(self, ref):
+ try:
+ self.connector.delete_object(ref)
+ except exc.InfobloxCannotDeleteObject as e:
+ LOG.info(_("Failed to delete an object: %s"), e)
+
+ def _create_infoblox_ip_address(self, ip_object):
+ try:
+ created_ip_json = self._create_infoblox_object(
+ ip_object.infoblox_type,
+ ip_object.to_dict(),
+ check_if_exists=False,
+ return_fields=ip_object.return_fields)
+
+ return ip_object.from_dict(created_ip_json)
+ except exc.InfobloxCannotCreateObject as e:
+ if "Cannot find 1 available IP" in e.response['text']:
+ raise exc.InfobloxCannotAllocateIp(ip_data=ip_object.to_dict())
+ else:
+ raise e
+ except exc.HostRecordNotPresent:
+ raise exc.InfobloxHostRecordIpAddrNotCreated(ip=ip_object.ip,
+ mac=ip_object.mac)
+ except exc.InfobloxInvalidIp:
+ raise exc.InfobloxDidNotReturnCreatedIPBack()
+
+ def _create_infoblox_object(self, obj_type, payload,
+ additional_create_kwargs=None,
+ check_if_exists=True,
+ return_fields=None,
+ delegate_member=None,
+ update_if_exists=False):
+ if additional_create_kwargs is None:
+ additional_create_kwargs = {}
+
+ ib_object = None
+ if check_if_exists or update_if_exists:
+ ib_object = self._get_infoblox_object_or_none(obj_type, payload)
+ if ib_object:
+ LOG.info(
+ _("Infoblox %(obj_type)s "
+ "already exists: %(ib_object)s"),
+ {'obj_type': obj_type, 'ib_object': ib_object})
+
+ if not ib_object:
+ payload.update(additional_create_kwargs)
+ ib_object = self.connector.create_object(obj_type, payload,
+ return_fields, delegate_member)
+ LOG.info(
+ _("Infoblox %(obj_type)s "
+ "was created: %(ib_object)s"),
+ {'obj_type': obj_type, 'ib_object': ib_object})
+ elif update_if_exists:
+ self._update_infoblox_object_by_ref(ib_object,
+ additional_create_kwargs)
+
+ return ib_object
+
+ def _get_infoblox_object_or_none(self, obj_type, payload,
+ return_fields=None, proxy=False):
+ # Ignore 'extattrs' for get_object, since this field is not searchible
+ search_payload = {}
+ for key in payload:
+ if key is not 'extattrs':
+ search_payload[key] = payload[key]
+ ib_object = self.connector.get_object(obj_type, search_payload,
+ return_fields, proxy=proxy)
+ if ib_object:
+ if return_fields:
+ return ib_object[0]
+ else:
+ return ib_object[0]['_ref']
+
+ return None
+
+ def _update_infoblox_object(self, obj_type, payload, update_kwargs):
+ ib_object_ref = None
+ warn_msg = _('Infoblox %(obj_type)s will not be updated because'
+ ' it cannot be found: %(payload)s')
+ try:
+ ib_object_ref = self._get_infoblox_object_or_none(obj_type,
+ payload)
+ if not ib_object_ref:
+ LOG.warning(warn_msg, {'obj_type': obj_type,
+ 'payload': payload})
+ except exc.InfobloxSearchError as e:
+ LOG.warning(warn_msg, obj_type, payload)
+ LOG.info(e)
+
+ if ib_object_ref:
+ self._update_infoblox_object_by_ref(ib_object_ref, update_kwargs)
+
+ def _update_infoblox_object_by_ref(self, ref, update_kwargs,
+ return_fields=None):
+ updated_object = self.connector.update_object(ref, update_kwargs,
+ return_fields)
+ LOG.info(_('Infoblox object was updated: %s'), ref)
+ return updated_object
+
+ def _delete_infoblox_object(self, obj_type, payload):
+ ib_object_ref = None
+ warn_msg = _('Infoblox %(obj_type)s will not be deleted because'
+ ' it cannot be found: %(payload)s')
+ try:
+ ib_object_ref = self._get_infoblox_object_or_none(obj_type,
+ payload)
+ if not ib_object_ref:
+ LOG.warning(warn_msg, {'obj_type': obj_type,
+ 'payload': payload})
+ except exc.InfobloxSearchError as e:
+ LOG.warning(warn_msg, {'obj_type': obj_type,
+ 'payload': payload})
+ LOG.info(e)
+
+ if ib_object_ref:
+ self.connector.delete_object(ib_object_ref)
+ LOG.info(_('Infoblox object was deleted: %s'), ib_object_ref)
+
+ def _update_host_record_ips(self, ipaddrs_name, host_record):
+ ipaddrs = {ipaddrs_name: [ip.to_dict(add_host=False)
+ for ip in host_record.ips]}
+ return self._update_infoblox_object_by_ref(
+ host_record.ref, ipaddrs, return_fields=[ipaddrs_name])
+
+ @staticmethod
+ def _get_object_type_from_ref(ref):
+ return ref.split('/', 1)[0]
diff --git a/neutron/ipam/drivers/infoblox/objects.py b/neutron/ipam/drivers/infoblox/objects.py
new file mode 100755
index 0000000..24cdd48
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/objects.py
@@ -0,0 +1,663 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import logging
+import netaddr
+import six
+import random
+from oslo.config import cfg
+import neutron.ipam.drivers.infoblox.exceptions as ib_exc
+
+
+OPTS = [
+ cfg.BoolOpt('use_dhcp_for_ip_allocation_record',
+ default=True,
+ help=_("Used to set 'configure_for_dhcp' option to enable "
+ " or disable dhcp for host or fixed record"))
+]
+
+cfg.CONF.register_opts(OPTS)
+LOG = logging.getLogger(__name__)
+
+
+def is_valid_ip(ip):
+ try:
+ netaddr.IPAddress(ip)
+ except netaddr.core.AddrFormatError:
+ return False
+ return True
+
+
+def generate_duid(mac):
+ duid = [0x00,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ return ':'.join(map(lambda x: "%02x" % x, duid)) + ':' + mac
+
+
+class Network(object):
+ """Sample Infoblox 'network' object in JSON format:
+ [
+ {
+ "_ref": "network/ZG5zLm5ldHdvcmskMTAuMzkuMTEuMC8yNC8w:
+ 10.39.11.0/24/default",
+ "members": [
+ {
+ "_struct": "dhcpmember",
+ "ipv4addr": "10.39.11.123",
+ "name": "infoblox.localdomain"
+ }
+ ],
+ "options": [
+ {
+ "name": "dhcp-lease-time",
+ "num": 51,
+ "use_option": false,
+ "value": "43200",
+ "vendor_class": "DHCP"
+ },
+ {
+ "name": "domain-name-servers",
+ "num": 6,
+ "use_option": true,
+ "value": "10.39.11.123",
+ "vendor_class": "DHCP"
+ },
+ {
+ "name": "routers",
+ "num": 3,
+ "use_option": false,
+ "value": "10.39.11.1",
+ "vendor_class": "DHCP"
+ }
+ ]
+ }
+ ]
+ """
+ DNS_NAMESERVERS_OPTION = 'domain-name-servers'
+
+ def __init__(self):
+ self.infoblox_type = 'network'
+ self.members = []
+ self.options = []
+ self.member_ip_addrs = []
+ self.infoblox_reference = None
+ self.ref = None
+
+ def __repr__(self):
+ return "{0}".format(self.to_dict())
+
+ @staticmethod
+ def from_dict(network_ib_object):
+ net = Network()
+ net.members = network_ib_object['members']
+ net.options = network_ib_object['options']
+
+ for member in net.members:
+ net.member_ip_addrs.append(member['ipv4addr'])
+ net.ref = network_ib_object['_ref']
+ return net
+
+ @property
+ def dns_nameservers(self):
+ # NOTE(max_lobur): The behaviour of the WAPI is as follows:
+ # * If the subnet created without domain-name-servers option it will
+ # be absent in the options list.
+ # * If the subnet created with domain-name-servers option and then
+ # it's cleared by update operation, the option will be present in
+ # the list, will carry the last data, but will have use_option = False
+ # Both cases mean that there are NO specified nameservers on NIOS.
+ dns_nameservers = []
+ for opt in self.options:
+ if self._is_dns_option(opt):
+ if opt.get('use_option', True):
+ dns_nameservers = opt['value'].split(',')
+ break
+ return dns_nameservers
+
+ @dns_nameservers.setter
+ def dns_nameservers(self, value):
+ for opt in self.options:
+ if self._is_dns_option(opt):
+ if value:
+ opt['value'] = ",".join(value)
+ opt['use_option'] = True
+ else:
+ opt['use_option'] = False
+ break
+ else:
+ if value:
+ self.options.append(dict(
+ name=self.DNS_NAMESERVERS_OPTION,
+ value=",".join(value),
+ use_option=True
+ ))
+
+ def has_dns_members(self):
+ for opt in self.options:
+ if self._is_dns_option(opt):
+ return True
+ return False
+
+ def _is_member_ip(self, ip):
+ return ip in self.member_ip_addrs
+
+ def update_member_ip_in_dns_nameservers(self, relay_ip):
+ for opt in self.options:
+ if self._is_dns_option(opt):
+ original_value = opt['value'].split(',')
+ original_value.insert(0, relay_ip)
+ opt['value'] = ",".join(
+ [val for val in original_value
+ if val and not self._is_member_ip(val)])
+
+ return
+
+ def to_dict(self):
+ return {
+ 'members': self.members,
+ 'options': self.options
+ }
+
+ @staticmethod
+ def _is_dns_option(option):
+ return option['name'] == Network.DNS_NAMESERVERS_OPTION
+
+
+class IPAddress(object):
+ def __init__(self, ip=None, mac=None):
+ self.ip = ip
+ self.mac = mac
+ self.configure_for_dhcp = cfg.CONF.use_dhcp_for_ip_allocation_record
+ self.hostname = None
+ self.dns_zone = None
+ self.fqdn = None
+
+ def __eq__(self, other):
+ if isinstance(other, six.string_types):
+ return self.ip == other
+ elif isinstance(other, self.__class__):
+ return self.ip == other.ip and self.dns_zone == other.dns_zone
+ return False
+
+
+class IPv4(IPAddress):
+ def to_dict(self, add_host=False):
+ d = {
+ "ipv4addr": self.ip,
+ "configure_for_dhcp": self.configure_for_dhcp
+ }
+
+ if self.fqdn and add_host:
+ d['host'] = self.fqdn
+
+ if self.mac:
+ d['mac'] = self.mac
+
+ return d
+
+ def __repr__(self):
+ return "IPv4Addr{0}".format(self.to_dict())
+
+ @staticmethod
+ def from_dict(d):
+ ip = d.get('ipv4addr')
+ if not is_valid_ip(ip):
+ raise ib_exc.InfobloxInvalidIp(ip=ip)
+
+ ipv4obj = IPv4()
+ host = d.get('host', 'unknown.unknown')
+ hostname, _, dns_zone = host.partition('.')
+ ipv4obj.ip = ip
+ ipv4obj.mac = d.get('mac')
+ ipv4obj.configure_for_dhcp = d.get('configure_for_dhcp')
+ ipv4obj.hostname = hostname
+ ipv4obj.zone_auth = dns_zone
+ ipv4obj.fqdn = host
+ return ipv4obj
+
+
+class IPv6(IPAddress):
+ def to_dict(self, add_host=False):
+ d = {
+ "ipv6addr": self.ip,
+ "configure_for_dhcp": self.configure_for_dhcp
+ }
+
+ if self.fqdn and add_host:
+ d['host'] = self.fqdn
+
+ if self.mac:
+ d['duid'] = generate_duid(self.mac)
+
+ return d
+
+ def __repr__(self):
+ return "IPv6Addr{0}".format(self.to_dict())
+
+ @staticmethod
+ def from_dict(d):
+ ip = d.get('ipv6addr')
+ if not is_valid_ip(ip):
+ raise ib_exc.InfobloxInvalidIp(ip=ip)
+
+ ipv6obj = IPv6()
+ host = d.get('host', 'unknown.unknown')
+ hostname, _, dns_zone = host.partition('.')
+ ipv6obj.ip = ip
+ ipv6obj.duid = d.get('duid')
+ ipv6obj.configure_for_dhcp = d.get('configure_for_dhcp')
+ ipv6obj.hostname = hostname
+ ipv6obj.zone_auth = dns_zone
+ ipv6obj.fqdn = host
+ return ipv6obj
+
+
+class IPAllocationObject(object):
+ @staticmethod
+ def next_available_ip_from_cidr(net_view_name, cidr):
+ return ('func:nextavailableip:'
+ '{cidr:s},{net_view_name:s}').format(**locals())
+
+ @staticmethod
+ def next_available_ip_from_range(net_view_name, first_ip, last_ip):
+ return ('func:nextavailableip:'
+ '{first_ip}-{last_ip},{net_view_name}').format(**locals())
+
+
+class HostRecord(IPAllocationObject):
+ def __init__(self):
+ self.infoblox_type = 'record:host'
+ self.ips = []
+ self.ref = None
+ self.name = None
+ self.dns_view = None
+ self.extattrs = None
+ self.configure_for_dhcp = cfg.CONF.use_dhcp_for_ip_allocation_record
+
+ def __repr__(self):
+ return "HostRecord{0}".format(self.to_dict())
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.ips == other.ips and
+ self.name == other.name and
+ self.dns_view == other.dns_view)
+
+ @property
+ def zone_auth(self):
+ return self._zone_auth
+
+ @zone_auth.setter
+ def zone_auth(self, value):
+ if value:
+ self._zone_auth = value.lstrip('.')
+
+
+class HostRecordIPv4(HostRecord):
+ """Sample Infoblox host record object in JSON format:
+ {
+ u'_ref': u'record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LmNvbS5nbG9iYWwuY22NA
+ :test_host_name.testsubnet.cloud.global.com/default',
+ u'ipv4addrs': [
+ {
+ u'configure_for_dhcp': False,
+ u'_ref': u'record:host_ipv4addr/lMmQ3ZjkuMmM4ZjhlOTctMGQ5Mi00Y2:22.0.0.2/
+ test_host_name.testsubnet.cloud.global.com/default', u'ipv4addr': u'22.0.0.2',
+ u'mac': u'fa:16:3e:29:87:70',
+ u'host': u'2c8f8e97-0d92-4cac-a350-09a0c53fe664.33c00d42-9715-43fe-862c-6ff2b7e2d7f9.cloud.global.com'
+ }
+ ],
+ u'extattrs': {
+ u'Account': {u'value': u'8a21c40495f04f30a1b2dc6fd1d9ed1a'},
+ u'Cloud API Owned': {u'value': u'True'},
+ u'VM ID': {u'value': u'None'},
+ u'IP Type': {u'value': u'Fixed'},
+ u'CMP Type': {u'value': u'OpenStack'},
+ u'Port ID': {u'value': u'136ef9ad-9c88-41ea-9fa6-bd48d8ec789a'},
+ u'Tenant ID': {u'value': u'00fd80791dee4112bb538c872b206d4c'}
+ }
+ }
+ """
+ return_fields = [
+ 'ipv4addrs',
+ 'extattrs'
+ ]
+
+ def __repr__(self):
+ return "HostRecord{0}".format(self.to_dict())
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.ips == other.ips and
+ self.name == other.name and
+ self.dns_view == other.dns_view)
+
+ @property
+ def ip(self):
+ if self.ips:
+ return self.ips[0].ip
+
+ @ip.setter
+ def ip(self, ip_address):
+ if self.ips:
+ self.ips[0].ip = ip_address
+ else:
+ ip_obj = IPv4()
+ ip_obj.ip = ip_address
+
+ self.ips.append(ip_obj)
+
+ @property
+ def mac(self):
+ if self.ips:
+ return self.ips[0].mac
+
+ @mac.setter
+ def mac(self, mac_address):
+ if self.ips:
+ self.ips[0].mac = mac_address
+ else:
+ ip_obj = IPv4()
+ ip_obj.mac = mac_address
+ self.ips.append(ip_obj)
+
+ @property
+ def hostname(self):
+ if self.ips:
+ return self.ips[0].hostname
+
+ @hostname.setter
+ def hostname(self, name):
+ if self.ips:
+ self.ips[0].hostname = name
+ else:
+ ip_obj = IPv4()
+ ip_obj.hostname = name
+ self.ips.append(ip_obj)
+
+ def to_dict(self):
+ result = {
+ 'view': self.dns_view,
+ 'name': '.'.join([self.hostname, self.zone_auth]),
+ 'extattrs': self.extattrs,
+ 'ipv4addrs': [ip.to_dict() for ip in self.ips]
+ }
+ return result
+
+ @staticmethod
+ def from_dict(hr_dict):
+ ipv4addrs = hr_dict.get('ipv4addrs', None)
+ if not ipv4addrs:
+ raise ib_exc.HostRecordNotPresent()
+
+ ipv4addr = ipv4addrs[0]
+ ip = ipv4addr['ipv4addr']
+ if not is_valid_ip(ip):
+ raise ib_exc.InfobloxInvalidIp(ip=ip)
+ host = ipv4addr.get('host', 'unknown.unknown')
+ mac = ipv4addr.get('mac')
+ hostname, _, dns_zone = host.partition('.')
+
+ host_record = HostRecordIPv4()
+ host_record.hostname = hostname
+ host_record.zone_auth = dns_zone
+ host_record.ref = hr_dict.get('_ref')
+ host_record.ips = [IPv4.from_dict(ip) for ip in ipv4addrs]
+ host_record.extattrs = hr_dict.get('extattrs')
+
+ return host_record
+
+ @property
+ def zone_auth(self):
+ if self.ips:
+ return self.ips[0].zone_auth
+
+ @zone_auth.setter
+ def zone_auth(self, value):
+ if value:
+ self.ips[0].zone_auth = value.lstrip('.')
+
+
+class HostRecordIPv6(HostRecord):
+ """Sample Infoblox host record object in JSON format:
+ {
+ u'_ref': u'record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LmNvbS5nbG9iYWwuYMQ
+ :test_host_name.testsubnet.cloud.global.com/default',
+ u'ipv6addrs': [
+ {
+ u'configure_for_dhcp': False,
+ u'_ref': u'record:host_ipv6addr/ZG5zLmhvc3RfYWRkcmV:2607%3Af0d0%3A1002%3A51%3A%3A2/
+ test_host_name.testsubnet/default',
+ u'host': u'ea30c45d-6385-44a2-b187-94b0c6f8bad1.9706dd0c-b772-4522-93e3-2e4fea2859de.cloud.global.com',
+ u'duid': u'00:6f:6d:ba:fa:16:3e:86:40:e3',
+ u'ipv6addr': u'2607:f0d0:1002:51::2'
+ }
+ ],
+ u'extattrs': {
+ u'Account': {u'value': u'8a21c40495f04f30a1b2dc6fd1d9ed1a'},
+ u'Port ID': {u'value': u'77c2ee08-32bf-4cd6-a24f-586ca91bd533'},
+ u'VM ID': {u'value': u'None'},
+ u'IP Type': {u'value': u'Fixed'},
+ u'CMP Type': {u'value': u'OpenStack'},
+ u'Cloud API Owned': {u'value': u'True'},
+ u'Tenant ID': {u'value': u'00fd80791dee4112bb538c872b206d4c'}
+ }
+ }
+ """
+ return_fields = [
+ 'ipv6addrs',
+ 'extattrs'
+ ]
+
+ def to_dict(self):
+ result = {
+ 'view': self.dns_view,
+ 'name': '.'.join([self.hostname, self.zone_auth]),
+ 'extattrs': self.extattrs
+ }
+
+ result['ipv6addrs'] = [{
+ 'configure_for_dhcp': self.configure_for_dhcp,
+ 'ipv6addr': self.ip,
+ 'duid': generate_duid(self.mac)
+ }]
+
+ return result
+
+ @staticmethod
+ def from_dict(hr_dict):
+ ipv6addrs = hr_dict.get('ipv6addrs', None)
+ if not ipv6addrs:
+ raise ib_exc.HostRecordNotPresent()
+
+ ipv6addr = ipv6addrs[0]
+ ip = ipv6addr['ipv6addr']
+ if not is_valid_ip(ip):
+ raise ib_exc.InfobloxInvalidIp(ip=ip)
+ host = ipv6addr.get('host', 'unknown.unknown')
+ mac = ipv6addr.get('mac')
+
+ hostname, _, dns_zone = host.partition('.')
+
+ host_record = HostRecordIPv6()
+ host_record.hostname = hostname
+ host_record.zone_auth = dns_zone
+ host_record.mac = mac
+ host_record.ip = ip
+ host_record.ref = hr_dict.get('_ref')
+
+ return host_record
+
+ @property
+ def ip(self):
+ if self.ips:
+ return self.ips[0].ip
+
+ @ip.setter
+ def ip(self, ip_address):
+ if self.ips:
+ self.ips[0].ip = ip_address
+ else:
+ ip_obj = IPv6()
+ ip_obj.ip = ip_address
+
+ self.ips.append(ip_obj)
+
+ @property
+ def mac(self):
+ if self.ips:
+ return self.ips[0].mac
+
+ @mac.setter
+ def mac(self, mac_address):
+ if self.ips:
+ self.ips[0].mac = mac_address
+ else:
+ ip_obj = IPv6()
+ ip_obj.mac = mac_address
+ self.ips.append(ip_obj)
+
+ @property
+ def hostname(self):
+ if self.ips:
+ return self.ips[0].hostname
+
+ @hostname.setter
+ def hostname(self, name):
+ if self.ips:
+ self.ips[0].hostname = name
+ else:
+ ip_obj = IPv6()
+ ip_obj.hostname = name
+ self.ips.append(ip_obj)
+
+
+class FixedAddress(IPAllocationObject):
+ def __init__(self):
+ self.infoblox_type = 'fixedaddress'
+ self.ip = None
+ self.net_view = None
+ self.mac = None
+ self.duid = None
+ self.extattrs = None
+ self.ref = None
+
+ def __repr__(self):
+ return "FixedAddress({0})".format(self.to_dict())
+
+
+class FixedAddressIPv4(FixedAddress):
+ def __init__(self):
+ self.infoblox_type = 'fixedaddress'
+
+ self.return_fields = [
+ 'ipv4addr',
+ 'mac',
+ 'network_view',
+ 'extattrs'
+ ]
+
+ def to_dict(self):
+ return {
+ 'mac': self.mac,
+ 'network_view': self.net_view,
+ 'ipv4addr': self.ip,
+ 'extattrs': self.extattrs
+ }
+
+ @staticmethod
+ def from_dict(fixed_address_dict):
+ ip = fixed_address_dict.get('ipv4addr')
+ if not is_valid_ip(ip):
+ raise ib_exc.InfobloxInvalidIp(ip=ip)
+
+ fa = FixedAddress()
+ fa.ip = ip
+ fa.mac = fixed_address_dict.get('mac')
+ fa.net_view = fixed_address_dict.get('network_view')
+ fa.extattrs = fixed_address_dict.get('extattrs')
+ fa.ref = fixed_address_dict.get('_ref')
+ return fa
+
+
+class FixedAddressIPv6(FixedAddress):
+ def __init__(self):
+ self.infoblox_type = 'ipv6fixedaddress'
+
+ self.return_fields = [
+ 'ipv6addr',
+ 'duid',
+ 'network_view',
+ 'extattrs'
+ ]
+
+ def to_dict(self):
+ return {
+ 'duid': generate_duid(self.mac),
+ 'network_view': self.net_view,
+ 'ipv6addr': self.ip,
+ 'extattrs': self.extattrs
+ }
+
+ @staticmethod
+ def from_dict(fixed_address_dict):
+ ip = fixed_address_dict.get('ipv6addr')
+ if not is_valid_ip(ip):
+ raise ib_exc.InfobloxInvalidIp(ip=ip)
+
+ fa = FixedAddress()
+ fa.ip = ip
+ fa.mac = fixed_address_dict.get('mac')
+ fa.net_view = fixed_address_dict.get('network_view')
+ fa.extattrs = fixed_address_dict.get('extattrs')
+ fa.ref = fixed_address_dict.get('_ref')
+ return fa
+
+
+class Member(object):
+ def __init__(self, ip, name, ipv6=None, map_id=None, delegate=False):
+ self.ip = ip
+ self.ipv6 = ipv6
+ self.name = name
+ self.map_id = map_id
+ self.delegate = delegate
+
+ def __eq__(self, other):
+ return self.ip == other.ip and \
+ self.name == other.name and \
+ self.map_id == other.map_id
+
+ def __repr__(self):
+ return \
+ ('Member(IP={ip}, IPv6={ipv6}, name={name}, map_id={map_id}, ' +
+ 'delegate={delegate})').\
+ format(ip=self.ip,
+ ipv6=self.ipv6,
+ name=self.name,
+ map_id=self.map_id,
+ delegate=self.delegate)
+
+ @property
+ def specifier(self):
+ """Return _struct dhcpmember that can be used to specify a member"""
+ specifier = {'_struct': 'dhcpmember'}
+ if self.name:
+ specifier['name'] = self.name
+ elif self.ip:
+ specifier['ipv4addr'] = self.ip
+ elif self.ipv6:
+ specifier['ipv6addr'] = self.ipv6
+ return specifier
diff --git a/neutron/ipam/drivers/infoblox/tasks.py b/neutron/ipam/drivers/infoblox/tasks.py
new file mode 100644
index 0000000..56d8d24
--- /dev/null
+++ b/neutron/ipam/drivers/infoblox/tasks.py
@@ -0,0 +1,143 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import operator
+
+from taskflow import task
+
+from neutron.ipam.drivers.infoblox import exceptions
+
+
+class CreateNetViewTask(task.Task):
+ def execute(self, obj_manip, net_view_name, nview_extattrs, dhcp_member):
+ obj_manip.create_network_view(
+ net_view_name, nview_extattrs, dhcp_member[0])
+
+ def revert(self, obj_manip, net_view_name, **kwargs):
+ if not obj_manip.has_networks(net_view_name):
+ obj_manip.delete_network_view(net_view_name)
+
+
+class CreateNetworkTask(task.Task):
+ def execute(self, obj_manip, net_view_name, cidr, nameservers, dhcp_member,
+ gateway_ip, dhcp_trel_ip, network_extattrs, related_members,
+ ip_version, ipv6_ra_mode=None, ipv6_address_mode=None):
+ obj_manip.create_network(net_view_name, cidr, nameservers, dhcp_member,
+ gateway_ip, dhcp_trel_ip, network_extattrs)
+ for member in related_members:
+ obj_manip.restart_all_services(member)
+
+ def revert(self, obj_manip, net_view_name, related_members, cidr,
+ **kwargs):
+ obj_manip.delete_network(net_view_name, cidr)
+ for member in related_members:
+ obj_manip.restart_all_services(member)
+
+
+class ChainInfobloxNetworkTask(task.Task):
+ def execute(self, obj_manip, net_view_name, cidr, network_extattrs):
+ ea_names = ['Is External', 'Is Shared']
+
+ eas = operator.itemgetter(*ea_names)(network_extattrs)
+ shared_or_external = any(eval(ea['value']) for ea in eas)
+
+ if shared_or_external:
+ ib_network = obj_manip.get_network(net_view_name, cidr)
+ obj_manip.update_network_options(ib_network, network_extattrs)
+ else:
+ raise exceptions.InfobloxInternalPrivateSubnetAlreadyExist()
+
+ def revert(self, obj_manip, net_view_name, cidr, network_extattrs,
+ **kwargs):
+ # keep NIOS network untouched on rollback
+ pass
+
+
+class CreateNetworkFromTemplateTask(task.Task):
+ def execute(self, obj_manip, net_view_name, cidr, template,
+ network_extattrs):
+ obj_manip.create_network_from_template(
+ net_view_name, cidr, template, network_extattrs)
+
+ def revert(self, obj_manip, net_view_name, cidr, **kwargs):
+ obj_manip.delete_network(net_view_name, cidr)
+
+
+class CreateIPRange(task.Task):
+ def execute(self, obj_manip, net_view_name, start_ip, end_ip, disable,
+ cidr, range_extattrs, ip_version, ipv6_ra_mode=None,
+ ipv6_address_mode=None):
+ obj_manip.create_ip_range(net_view_name, start_ip, end_ip,
+ cidr, disable, range_extattrs)
+
+ def revert(self, obj_manip, net_view_name, start_ip, end_ip,
+ ip_version, ipv6_ra_mode=None, ipv6_address_mode=None,
+ **kwargs):
+ obj_manip.delete_ip_range(net_view_name, start_ip, end_ip)
+
+
+class CreateDNSViewTask(task.Task):
+ def execute(self, obj_manip, net_view_name, dns_view_name):
+ obj_manip.create_dns_view(net_view_name, dns_view_name)
+
+ def revert(self, **kwargs):
+ # never delete DNS view
+ pass
+
+
+class CreateDNSZonesTask(task.Task):
+ def execute(self, obj_manip, dnsview_name, fqdn, dns_member,
+ secondary_dns_members, zone_extattrs, **kwargs):
+ obj_manip.create_dns_zone(dnsview_name, fqdn, dns_member,
+ secondary_dns_members,
+ zone_extattrs=zone_extattrs)
+
+ def revert(self, obj_manip, dnsview_name, fqdn, **kwargs):
+ obj_manip.delete_dns_zone(dnsview_name, fqdn)
+
+
+class CreateDNSZonesTaskCidr(task.Task):
+ def execute(self, obj_manip, dnsview_name, cidr, dns_member, zone_format,
+ secondary_dns_members, prefix, zone_extattrs, **kwargs):
+ obj_manip.create_dns_zone(dnsview_name, cidr, dns_member,
+ secondary_dns_members,
+ prefix=prefix,
+ zone_format=zone_format,
+ zone_extattrs=zone_extattrs)
+
+ def revert(self, obj_manip, dnsview_name, cidr, **kwargs):
+ obj_manip.delete_dns_zone(dnsview_name, cidr)
+
+
+class CreateDNSZonesFromNSGroupTask(task.Task):
+ def execute(self, obj_manip, dnsview_name, fqdn, ns_group,
+ zone_extattrs, **kwargs):
+ obj_manip.create_dns_zone(dnsview_name, fqdn, ns_group=ns_group,
+ zone_extattrs=zone_extattrs)
+
+ def revert(self, obj_manip, dnsview_name, fqdn, **kwargs):
+ obj_manip.delete_dns_zone(dnsview_name, fqdn)
+
+
+class CreateDNSZonesCidrFromNSGroupTask(task.Task):
+ def execute(self, obj_manip, dnsview_name, cidr, ns_group, zone_format,
+ prefix, zone_extattrs, **kwargs):
+ obj_manip.create_dns_zone(dnsview_name, cidr,
+ ns_group=ns_group,
+ prefix=prefix,
+ zone_format=zone_format,
+ zone_extattrs=zone_extattrs)
+
+ def revert(self, obj_manip, dnsview_name, cidr, **kwargs):
+ obj_manip.delete_dns_zone(dnsview_name, cidr)
diff --git a/neutron/ipam/drivers/neutron_db.py b/neutron/ipam/drivers/neutron_db.py
new file mode 100644
index 0000000..096722b
--- /dev/null
+++ b/neutron/ipam/drivers/neutron_db.py
@@ -0,0 +1,217 @@
+# Copyright (c) 2014 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import netaddr
+
+from sqlalchemy import orm
+from sqlalchemy.orm import exc
+
+from neutron.common import exceptions as n_exc
+from neutron.db import common_db_mixin
+from neutron.db import models_v2
+from neutron.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NeutronPluginController(common_db_mixin.CommonDbMixin):
+ def _get_network(self, context, net_id):
+ try:
+ network = self._get_by_id(context, models_v2.Network, net_id)
+ except exc.NoResultFound:
+ raise n_exc.NetworkNotFound(net_id=net_id)
+ return network
+
+ def _get_subnet(self, context, subnet_id):
+ try:
+ subnet = self._get_by_id(context, models_v2.Subnet, subnet_id)
+ except exc.NoResultFound:
+ raise n_exc.SubnetNotFound(subnet_id=subnet_id)
+ return subnet
+
+
+def get_subnets_by_port_id(context, port_id):
+ subnets_with_port = []
+ # Get Requested port
+ port = context.session.query(models_v2.Port).filter_by(id=port_id).one()
+ # Collect all subnets from port network
+ subnets = get_subnets_by_network(context, port.network_id)
+ for sub in subnets:
+ # Collect all ports from subnet
+ subnet_ports = get_subnet_ports(context, sub.id)
+ # Compare them with original port, and if they are the same - save
+ subnets_with_port += [sub for sp in subnet_ports if sp.id == port.id]
+
+ return subnets_with_port
+
+
+def get_subnet_ports(context, subnet_id):
+ # Check if any tenant owned ports are using this subnet
+ ports_qry = context.session.query(models_v2.Port).join(
+ models_v2.IPAllocation).with_lockmode(
+ 'update').enable_eagerloads(False)
+ ports = ports_qry.filter_by(subnet_id=subnet_id)
+ return ports
+
+
+def get_all_subnets(context):
+ return context.session.query(models_v2.Subnet).all()
+
+
+def generate_ip(context, subnet):
+ try:
+ return _try_generate_ip(context, subnet)
+ except n_exc.IpAddressGenerationFailure:
+ _rebuild_availability_ranges(context, subnet)
+
+ return _try_generate_ip(context, subnet)
+
+
+def allocate_specific_ip(context, subnet_id, ip_address):
+ """Allocate a specific IP address on the subnet."""
+ ip = int(netaddr.IPAddress(ip_address))
+ range_qry = context.session.query(
+ models_v2.IPAvailabilityRange).join(
+ models_v2.IPAllocationPool).with_lockmode('update')
+ results = range_qry.filter_by(subnet_id=subnet_id)
+ for range in results:
+ first = int(netaddr.IPAddress(range['first_ip']))
+ last = int(netaddr.IPAddress(range['last_ip']))
+ if first <= ip <= last:
+ if first == last:
+ context.session.delete(range)
+ return
+ elif first == ip:
+ range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1)
+ return
+ elif last == ip:
+ range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1)
+ return
+ else:
+ # Split into two ranges
+ new_first = str(netaddr.IPAddress(ip_address) + 1)
+ new_last = range['last_ip']
+ range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1)
+ ip_range = models_v2.IPAvailabilityRange(
+ allocation_pool_id=range['allocation_pool_id'],
+ first_ip=new_first,
+ last_ip=new_last)
+ context.session.add(ip_range)
+ return ip_address
+ return None
+
+
+def get_dns_by_subnet(context, subnet_id):
+ dns_qry = context.session.query(models_v2.DNSNameServer)
+ return dns_qry.filter_by(subnet_id=subnet_id).all()
+
+
+def get_route_by_subnet(context, subnet_id):
+ route_qry = context.session.query(models_v2.SubnetRoute)
+ return route_qry.filter_by(subnet_id=subnet_id).all()
+
+
+def get_subnets_by_network(context, network_id):
+ subnet_qry = context.session.query(models_v2.Subnet)
+ return subnet_qry.filter_by(network_id=network_id).all()
+
+
+def _try_generate_ip(context, subnet):
+ """Generate an IP address.
+
+ The IP address will be generated from one of the subnets defined on
+ the network.
+ """
+ if type(subnet) is list:
+ subnet = subnet[0]
+ range_qry = context.session.query(
+ models_v2.IPAvailabilityRange).join(
+ models_v2.IPAllocationPool).with_lockmode('update')
+ range = range_qry.filter_by(subnet_id=subnet.id).first()
+ if not range:
+ LOG.debug(_("All IPs from subnet %(subnet_id)s (%(cidr)s) "
+ "allocated"),
+ {'subnet_id': subnet.id,
+ 'cidr': subnet.cidr})
+ raise n_exc.IpAddressGenerationFailure(
+ net_id=subnet['network_id'])
+ ip_address = range['first_ip']
+ LOG.debug(_("Allocated IP - %(ip_address)s from %(first_ip)s "
+ "to %(last_ip)s"),
+ {'ip_address': ip_address,
+ 'first_ip': range['first_ip'],
+ 'last_ip': range['last_ip']})
+ if range['first_ip'] == range['last_ip']:
+ # No more free indices on subnet => delete
+ LOG.debug(_("No more free IP's in slice. Deleting allocation "
+ "pool."))
+ context.session.delete(range)
+ else:
+ # increment the first free
+ range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1)
+ return {'ip_address': ip_address, 'subnet_id': subnet.id}
+
+
+def _rebuild_availability_ranges(context, subnets):
+ """Rebuild availability ranges.
+
+ This method is called only when there's no more IP available or by
+ _update_subnet_allocation_pools. Calling
+ _update_subnet_allocation_pools before calling this function deletes
+ the IPAllocationPools associated with the subnet that is updating,
+ which will result in deleting the IPAvailabilityRange too.
+ """
+ ip_qry = context.session.query(
+ models_v2.IPAllocation).with_lockmode('update')
+ # PostgreSQL does not support select...for update with an outer join.
+ # No join is needed here.
+ pool_qry = context.session.query(
+ models_v2.IPAllocationPool).options(
+ orm.noload('available_ranges')).with_lockmode('update')
+ for subnet in sorted(subnets):
+ LOG.debug(_("Rebuilding availability ranges for subnet %s")
+ % subnet)
+
+ # Create a set of all currently allocated addresses
+ ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
+ allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
+ for i in ip_qry_results])
+
+ for pool in pool_qry.filter_by(subnet_id=subnet['id']):
+ # Create a set of all addresses in the pool
+ poolset = netaddr.IPSet(netaddr.iter_iprange(pool['first_ip'],
+ pool['last_ip']))
+
+ # Use set difference to find free addresses in the pool
+ available = poolset - allocations
+
+ # Generator compacts an ip set into contiguous ranges
+ def ipset_to_ranges(ipset):
+ first, last = None, None
+ for cidr in ipset.iter_cidrs():
+ if last and last + 1 != cidr.first:
+ yield netaddr.IPRange(first, last)
+ first = None
+ first, last = first if first else cidr.first, cidr.last
+ if first:
+ yield netaddr.IPRange(first, last)
+
+ # Write the ranges to the db
+ for ip_range in ipset_to_ranges(available):
+ available_range = models_v2.IPAvailabilityRange(
+ allocation_pool_id=pool['id'],
+ first_ip=str(netaddr.IPAddress(ip_range.first)),
+ last_ip=str(netaddr.IPAddress(ip_range.last)))
+ context.session.add(available_range)
diff --git a/neutron/ipam/drivers/neutron_ipam.py b/neutron/ipam/drivers/neutron_ipam.py
new file mode 100755
index 0000000..5df01ce
--- /dev/null
+++ b/neutron/ipam/drivers/neutron_ipam.py
@@ -0,0 +1,449 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron.api.v2 import attributes
+from neutron.common import constants
+from neutron.common import exceptions as q_exc
+from neutron.db import models_v2
+from neutron.ipam import base
+from neutron.ipam.drivers import neutron_db
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
+
+LOG = logging.getLogger(__name__)
+
+
+class NeutronIPAMController(base.IPAMController):
+ def _make_allocation_pools(self, context, backend_subnet, subnet):
+ # Store information about allocation pools and ranges
+ for pool in subnet['allocation_pools']:
+ ip_pool = models_v2.IPAllocationPool(subnet=backend_subnet,
+ first_ip=pool['start'],
+ last_ip=pool['end'])
+ context.session.add(ip_pool)
+ ip_range = models_v2.IPAvailabilityRange(
+ ipallocationpool=ip_pool,
+ first_ip=pool['start'],
+ last_ip=pool['end'])
+ context.session.add(ip_range)
+
+ def create_subnet(self, context, subnet):
+ tenant_id = self._get_tenant_id_for_create(context, subnet)
+ network = self._get_network(context, subnet['network_id'])
+
+ # The 'shared' attribute for subnets is for internal plugin
+ # use only. It is not exposed through the API
+ args = {'tenant_id': tenant_id,
+ 'id': subnet.get('id') or uuidutils.generate_uuid(),
+ 'name': subnet['name'],
+ 'network_id': subnet['network_id'],
+ 'ip_version': subnet['ip_version'],
+ 'cidr': subnet['cidr'],
+ 'enable_dhcp': subnet['enable_dhcp'],
+ 'gateway_ip': subnet['gateway_ip'],
+ 'shared': network.shared}
+ if subnet['ip_version'] == 6 and subnet['enable_dhcp']:
+ if attributes.is_attr_set(subnet.get('ipv6_ra_mode')):
+ args['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
+ if attributes.is_attr_set(subnet.get('ipv6_address_mode')):
+ args['ipv6_address_mode'] = subnet['ipv6_address_mode']
+ backend_subnet = models_v2.Subnet(**args)
+
+ self._make_allocation_pools(context, backend_subnet, subnet)
+ context.session.add(backend_subnet)
+
+ return self._make_subnet_dict(backend_subnet)
+
+ def create_network(self, context, network):
+ return network
+
+ def get_subnet_by_id(self, context, subnet_id):
+ return self._get_subnet(context, subnet_id)
+
+ def update_subnet(self, context, subnet_id, subnet):
+ backend_subnet = self.get_subnet_by_id(context, subnet_id)
+ return backend_subnet
+
+ def _make_subnet_dict(self, subnet, fields=None):
+ res = {'id': subnet['id'],
+ 'name': subnet['name'],
+ 'tenant_id': subnet['tenant_id'],
+ 'network_id': subnet['network_id'],
+ 'ip_version': subnet['ip_version'],
+ 'cidr': subnet['cidr'],
+ 'allocation_pools': [{'start': pool['first_ip'],
+ 'end': pool['last_ip']}
+ for pool in subnet['allocation_pools']],
+ 'gateway_ip': subnet['gateway_ip'],
+ 'enable_dhcp': subnet['enable_dhcp'],
+ 'ipv6_ra_mode': subnet['ipv6_ra_mode'],
+ 'ipv6_address_mode': subnet['ipv6_address_mode'],
+ 'dns_nameservers': [dns['address']
+ for dns in subnet['dns_nameservers']],
+ 'host_routes': [{'destination': route['destination'],
+ 'nexthop': route['nexthop']}
+ for route in subnet['routes']],
+ 'shared': subnet['shared']
+ }
+ # Call auxiliary extend functions, if any
+ self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
+
+ return self._fields(res, fields)
+
+ def get_subnets(self, context, filters=None, fields=None,
+ sorts=None, limit=None, marker=None,
+ page_reverse=False):
+ marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
+ return self._get_collection(context, models_v2.Subnet,
+ self._make_subnet_dict,
+ filters=filters, fields=fields,
+ sorts=sorts,
+ limit=limit,
+ marker_obj=marker_obj,
+ page_reverse=page_reverse)
+
+ def get_subnets_count(self, context, filters=None):
+ return self._get_collection_count(context, models_v2.Subnet,
+ filters=filters)
+
+ def delete_subnet(self, context, backend_subnet):
+ pass
+
+ def force_off_ports(self, context, ports):
+ """Force Off ports on subnet delete event."""
+ for port in ports:
+ query = (context.session.query(models_v2.Port).
+ enable_eagerloads(False).filter_by(id=port.id))
+ if not context.is_admin:
+ query = query.filter_by(tenant_id=context.tenant_id)
+
+ query.delete()
+
+ def allocate_ip(self, context, subnet, host, ip=None):
+ if ip is not None and 'ip_address' in ip:
+ subnet_id = subnet['id']
+ ip_address = {'subnet_id': subnet_id,
+ 'ip_address': ip['ip_address']}
+ neutron_db.allocate_specific_ip(
+ context, subnet_id, ip['ip_address'])
+ return ip_address
+ else:
+ subnets = [subnet]
+ return neutron_db.generate_ip(context, subnets)
+
+ def deallocate_ip(self, context, backend_subnet, host, ip_address):
+ # IPAllocations are automatically handled by cascade deletion
+ pass
+
+ def delete_network(self, context, network_id):
+ pass
+
+ def set_dns_nameservers(self, context, port):
+ pass
+
+
+class NeutronDHCPController(base.DHCPController):
+ def __init__(self, db_mgr=None):
+ if db_mgr is None:
+ db_mgr = neutron_db
+
+ self.db_manager = db_mgr
+
+ def configure_dhcp(self, context, backend_subnet, dhcp_params):
+ # Store information about DNS Servers
+ if dhcp_params['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED:
+ for addr in dhcp_params['dns_nameservers']:
+ ns = models_v2.DNSNameServer(address=addr,
+ subnet_id=backend_subnet['id'])
+ context.session.add(ns)
+ backend_subnet['dns_nameservers'].append(addr)
+
+ # Store host routes
+ if dhcp_params['host_routes'] is not attributes.ATTR_NOT_SPECIFIED:
+ for rt in dhcp_params['host_routes']:
+ route = models_v2.SubnetRoute(
+ subnet_id=backend_subnet['id'],
+ destination=rt['destination'],
+ nexthop=rt['nexthop'])
+ context.session.add(route)
+ backend_subnet['host_routes'].append({
+ 'destination': rt['destination'],
+ 'nexthop': rt['nexthop']})
+
+ def reconfigure_dhcp(self, context, backend_subnet, dhcp_params):
+ changed_dns = False
+ new_dns = []
+ if "dns_nameservers" in dhcp_params:
+ changed_dns = True
+ old_dns_list = self.db_manager.get_dns_by_subnet(
+ context, backend_subnet['id'])
+ new_dns_addr_set = set(dhcp_params["dns_nameservers"])
+ old_dns_addr_set = set([dns['address']
+ for dns in old_dns_list])
+
+ new_dns = list(new_dns_addr_set)
+ for dns_addr in old_dns_addr_set - new_dns_addr_set:
+ for dns in old_dns_list:
+ if dns['address'] == dns_addr:
+ context.session.delete(dns)
+ for dns_addr in new_dns_addr_set - old_dns_addr_set:
+ dns = models_v2.DNSNameServer(
+ address=dns_addr,
+ subnet_id=backend_subnet['id'])
+ context.session.add(dns)
+
+ if len(dhcp_params['dns_nameservers']):
+ del dhcp_params['dns_nameservers']
+
+ def _combine(ht):
+ return ht['destination'] + "_" + ht['nexthop']
+
+ changed_host_routes = False
+ new_routes = []
+ if "host_routes" in dhcp_params:
+ changed_host_routes = True
+ old_route_list = self.db_manager.get_route_by_subnet(
+ context, backend_subnet['id'])
+
+ new_route_set = set([_combine(route)
+ for route in dhcp_params['host_routes']])
+
+ old_route_set = set([_combine(route)
+ for route in old_route_list])
+
+ for route_str in old_route_set - new_route_set:
+ for route in old_route_list:
+ if _combine(route) == route_str:
+ context.session.delete(route)
+ for route_str in new_route_set - old_route_set:
+ route = models_v2.SubnetRoute(
+ destination=route_str.partition("_")[0],
+ nexthop=route_str.partition("_")[2],
+ subnet_id=backend_subnet['id'])
+ context.session.add(route)
+
+ # Gather host routes for result
+ for route_str in new_route_set:
+ new_routes.append(
+ {'destination': route_str.partition("_")[0],
+ 'nexthop': route_str.partition("_")[2]})
+ del dhcp_params['host_routes']
+
+ backend_subnet.update(dhcp_params)
+
+ result = {}
+ if changed_dns:
+ result['new_dns'] = new_dns
+ if changed_host_routes:
+ result['new_routes'] = new_routes
+ return result
+
+ def bind_mac(self, context, backend_subnet, ip_address, mac_address):
+ pass
+
+ def unbind_mac(self, context, backend_subnet, ip_address):
+ pass
+
+ def dhcp_is_enabled(self, context, backend_subnet):
+ pass
+
+ def disable_dhcp(self, context, backend_subnet):
+ pass
+
+ def get_dhcp_ranges(self, context, backend_subnet):
+ pass
+
+
+class NeutronDNSController(base.DNSController):
+ """DNS controller for standard neutron behavior is not implemented because
+ neutron does not provide that functionality
+ """
+ def bind_names(self, context, backend_port):
+ pass
+
+ def unbind_names(self, context, backend_port):
+ pass
+
+ def create_dns_zones(self, context, backend_subnet):
+ pass
+
+ def delete_dns_zones(self, context, backend_subnet):
+ pass
+
+ def disassociate_floatingip(self, context, floatingip, port_id):
+ pass
+
+
+class NeutronIPAM(base.IPAMManager):
+ def __init__(self, dhcp_controller=None, dns_controller=None,
+ ipam_controller=None, db_mgr=None):
+ # These should be initialized in derived DDI class
+ if dhcp_controller is None:
+ dhcp_controller = NeutronDHCPController()
+
+ if dns_controller is None:
+ dns_controller = NeutronDNSController()
+
+ if ipam_controller is None:
+ ipam_controller = NeutronIPAMController()
+
+ if db_mgr is None:
+ db_mgr = neutron_db
+
+ self.dns_controller = dns_controller
+ self.ipam_controller = ipam_controller
+ self.dhcp_controller = dhcp_controller
+ self.db_manager = db_mgr
+
+ def create_subnet(self, context, subnet):
+ with context.session.begin(subtransactions=True):
+ # Allocate IP addresses. Create allocation pools only
+ backend_subnet = self.ipam_controller.create_subnet(context,
+ subnet)
+ self.dns_controller.create_dns_zones(context, backend_subnet)
+ # Configure DHCP
+ dhcp_params = subnet
+ self.dhcp_controller.configure_dhcp(context, backend_subnet,
+ dhcp_params)
+
+ self.ipam_controller.get_subnet_by_id(context,
+ backend_subnet['id'])
+ return backend_subnet
+
+ def update_subnet(self, context, subnet_id, subnet):
+ with context.session.begin(subtransactions=True):
+ backend_subnet = self.ipam_controller.update_subnet(
+ context, subnet_id, subnet)
+
+ # Reconfigure DHCP for subnet
+ dhcp_params = subnet
+ dhcp_changes = self.dhcp_controller.reconfigure_dhcp(
+ context, backend_subnet, dhcp_params)
+
+ return backend_subnet, dhcp_changes
+
+ def delete_subnet(self, context, subnet):
+ if isinstance(subnet, models_v2.Subnet):
+ subnet_id = subnet.id
+ else:
+ subnet_id = subnet
+
+ with context.session.begin(subtransactions=True):
+ backend_subnet = self.ipam_controller.get_subnet_by_id(context,
+ subnet_id)
+ subnet_ports = self.db_manager.get_subnet_ports(context, subnet_id)
+
+ ports_to_remove = [port for port in subnet_ports if
+ neutron_db.get_subnets_by_port_id(
+ context, port.id) <= 1]
+
+ has_ports_allocated = not all(
+ p.device_owner == constants.DEVICE_OWNER_DHCP
+ for p in subnet_ports)
+
+ if has_ports_allocated:
+ raise q_exc.SubnetInUse(subnet_id=backend_subnet['id'])
+
+ self.ipam_controller.force_off_ports(context, ports_to_remove)
+ self.dns_controller.delete_dns_zones(context, backend_subnet)
+ self.dhcp_controller.disable_dhcp(context, backend_subnet)
+ self.ipam_controller.delete_subnet(context, backend_subnet)
+
+ return subnet_id
+
+ def delete_subnets_by_network(self, context, network_id):
+ with context.session.begin(subtransactions=True):
+ subnets = neutron_db.get_subnets_by_network(
+ context, network_id)
+ for subnet in subnets:
+ self.delete_subnet(context, subnet['id'])
+
+ def get_subnet_by_id(self, context, subnet_id):
+ with context.session.begin(subtransactions=True):
+ return self.ipam_controller.get_subnet_by_id(context, subnet_id)
+
+ def allocate_ip(self, context, host, ip):
+ with context.session.begin(subtransactions=True):
+ subnet_id = ip.get('subnet_id', None)
+ if not subnet_id:
+ LOG.debug(_("ip object must have %(subnet_id)s") % subnet_id)
+ raise
+ backend_subnet = self.ipam_controller.get_subnet_by_id(context,
+ subnet_id)
+ ip_address = self.ipam_controller.allocate_ip(
+ context,
+ backend_subnet,
+ host,
+ ip)
+
+ LOG.debug('IPAMManager allocate IP: %s' % ip_address)
+ mac_address = host['mac_address']
+ self.dhcp_controller.bind_mac(
+ context,
+ backend_subnet,
+ ip_address,
+ mac_address)
+ return ip_address
+
+ def deallocate_ip(self, context, host, ip):
+ with context.session.begin(subtransactions=True):
+ subnet_id = ip['subnet_id']
+ ip_address = ip['ip_address']
+ backend_subnet = self.ipam_controller.get_subnet_by_id(
+ context, subnet_id)
+ self.dhcp_controller.unbind_mac(
+ context,
+ backend_subnet,
+ ip_address)
+ self.ipam_controller.deallocate_ip(
+ context,
+ backend_subnet,
+ host,
+ ip_address)
+
+ def get_subnets(self, context, filters=None, fields=None,
+ sorts=None, limit=None, marker=None,
+ page_reverse=False):
+ return self.ipam_controller.get_subnets(context, filters, fields,
+ sorts, limit, marker,
+ page_reverse)
+
+ def create_network(self, context, network):
+ return self.ipam_controller.create_network(context, network)
+
+ def delete_network(self, context, network_id):
+ self.ipam_controller.delete_network(
+ context, network_id)
+
+ def create_port(self, context, port):
+ self.dns_controller.bind_names(context, port)
+ if constants.DEVICE_OWNER_DHCP == port['device_owner']:
+ self.ipam_controller.set_dns_nameservers(context, port)
+
+ def update_port(self, context, port):
+ self.dns_controller.bind_names(context, port)
+
+ def delete_port(self, context, port):
+ self.dns_controller.unbind_names(context, port)
+
+ def associate_floatingip(self, context, floatingip, port):
+ self.create_port(context, port)
+
+ def disassociate_floatingip(self, context, floatingip, port_id):
+ self.dns_controller.disassociate_floatingip(context, floatingip,
+ port_id)
+
+ def get_additional_network_dict_params(self, ctx, network_id):
+ return {}
diff --git a/neutron/manager.py b/neutron/manager.py
index 3a21f61..6986a34 100644
--- a/neutron/manager.py
+++ b/neutron/manager.py
@@ -123,6 +123,11 @@ class NeutronManager(object):
# the rest of service plugins
self.service_plugins = {constants.CORE: self.plugin}
self._load_service_plugins()
+ # Load IPAMManager driver
+ ipam_driver_name = cfg.CONF.ipam_driver
+ LOG.info(_("Loading ipam driver: %s"), ipam_driver_name)
+ self.ipam = self._get_plugin_instance('neutron.ipam_drivers',
+ ipam_driver_name)
def _get_plugin_instance(self, namespace, plugin_provider):
try:
@@ -223,3 +228,7 @@ class NeutronManager(object):
# Return weakrefs to minimize gc-preventing references.
return dict((x, weakref.proxy(y))
for x, y in cls.get_instance().service_plugins.iteritems())
+
+ @classmethod
+ def get_ipam(cls):
+ return cls.get_instance().ipam
diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py
index 9dcf6f8..1fb3160 100644
--- a/neutron/plugins/ml2/plugin.py
+++ b/neutron/plugins/ml2/plugin.py
@@ -650,6 +650,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
mech_context)
self.type_manager.release_network_segments(session, id)
+
+ manager.NeutronManager.get_ipam().delete_network(
+ context, id)
+
record = self._get_network(context, id)
LOG.debug(_("Deleting network record %s"), record)
session.delete(record)
@@ -766,6 +770,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
self.mechanism_manager.delete_subnet_precommit(
mech_context)
+ manager.NeutronManager.get_ipam().delete_subnet(
+ context, id)
+
LOG.debug(_("Deleting subnet record"))
session.delete(record)
diff --git a/neutron/tests/unit/agent/linux/test_dhcp_relay.py b/neutron/tests/unit/agent/linux/test_dhcp_relay.py
new file mode 100644
index 0000000..576ad82
--- /dev/null
+++ b/neutron/tests/unit/agent/linux/test_dhcp_relay.py
@@ -0,0 +1,63 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import mock
+
+from neutron.agent.linux import dhcp_relay
+
+from neutron.agent.common import config
+from neutron.common import exceptions as exc
+from neutron.tests import base
+
+
+class DhcpDnsProxyTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(DhcpDnsProxyTestCase, self).setUp()
+
+ def _get_config(self):
+ conf = mock.MagicMock()
+ conf.interface_driver = "neutron.agent.linux.interface.NullDriver"
+ config.register_interface_driver_opts_helper(conf)
+ return conf
+
+ def test_raises_exception_on_unset_config_options(self):
+ conf = self._get_config()
+ network = mock.Mock()
+ self.assertRaises(exc.InvalidConfigurationOption,
+ dhcp_relay.DhcpDnsProxy, conf, network)
+
+ def test_implements_get_isolated_subnets(self):
+ network = mock.Mock()
+ conf = self._get_config()
+ conf.external_dhcp_servers = ['1.1.1.1']
+ conf.external_dns_servers = ['1.1.1.2']
+
+ proxy = dhcp_relay.DhcpDnsProxy(conf, network)
+ try:
+ self.assertTrue(callable(proxy.get_isolated_subnets))
+ except AttributeError as ex:
+ self.fail(ex)
+
+ def test_implements_should_enable_metadata(self):
+ network = mock.Mock()
+ conf = self._get_config()
+ conf.external_dhcp_servers = ['1.1.1.1']
+ conf.external_dns_servers = ['1.1.1.2']
+
+ proxy = dhcp_relay.DhcpDnsProxy(conf, network)
+
+ try:
+ self.assertTrue(callable(proxy.should_enable_metadata))
+ except AttributeError as ex:
+ self.fail(ex)
diff --git a/neutron/tests/unit/ipam/__init__.py b/neutron/tests/unit/ipam/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/tests/unit/ipam/drivers/__init__.py b/neutron/tests/unit/ipam/drivers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/__init__.py b/neutron/tests/unit/ipam/drivers/infoblox/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_config.py b/neutron/tests/unit/ipam/drivers/infoblox/test_config.py
new file mode 100755
index 0000000..1959efe
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_config.py
@@ -0,0 +1,714 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import operator
+
+import mock
+from testtools import matchers
+
+from neutron.db.infoblox import infoblox_db
+from neutron.db.infoblox import models
+from neutron.ipam.drivers.infoblox import config
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.ipam.drivers.infoblox import objects
+from neutron.openstack.common import jsonutils
+from neutron.tests import base
+
+
+class ConfigFinderTestCase(base.BaseTestCase):
+ def test_config_reads_data_from_json(self):
+ valid_config = """
+ [
+ {
+ "condition": "tenant",
+ "is_external": false,
+ "network_view": "{tenant_id}",
+ "dhcp_members": "next-available-member",
+ "require_dhcp_relay": true,
+ "domain_suffix_pattern": "local.test.com",
+ "hostname_pattern": "%{instance_id}"
+ },
+ {
+ "condition": "global",
+ "is_external": true,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "%{instance_id}"
+ }
+ ]
+ """
+
+ subnet = {
+ 'network_id': 'some-net-id',
+ 'cidr': '192.168.1.0/24',
+ 'tenant_id': 'some-tenant-id'
+ }
+ context = mock.MagicMock()
+
+ cfg = config.ConfigFinder(stream=io.BytesIO(valid_config),
+ member_manager=mock.Mock())
+ cfg.configure_members = mock.Mock()
+ subnet_config = cfg.find_config_for_subnet(context, subnet)
+
+ self.assertIsNotNone(subnet_config)
+ self.assertIsInstance(subnet_config, config.Config)
+
+ def test_throws_error_on_invalid_configuration(self):
+ invalid_config = """
+ [
+ {
+ "condition": "tenant",
+ "is_external": false,
+ "network_view": "{tenant_id}",
+ "dhcp_members": "next-available-member",
+ "require_dhcp_relay": True,
+ "domain_suffix_pattern": "local.test.com",
+ "hostname_pattern": "{instance_id}"
+ },
+ {
+ "condition": "global"
+ "is_external": true,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{instance_id}"
+ }
+ ]
+ """
+
+ # configuration is considered invalid if JSON parser has failed
+ self.assertRaises(exceptions.InfobloxConfigException,
+ config.ConfigFinder,
+ stream=io.BytesIO(invalid_config),
+ member_manager=mock.Mock())
+
+ @mock.patch('neutron.db.infoblox.infoblox_db.is_network_external')
+ def test_external_network_matches_first_external_config(self, is_external):
+ expected_condition = 'global'
+ external_config = """
+ [
+ {{
+ "condition": "tenant",
+ "is_external": false,
+ "network_view": "{{tenant_id}}",
+ "dhcp_members": "next-available-member",
+ "require_dhcp_relay": true,
+ "domain_suffix_pattern": "local.test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }},
+ {{
+ "condition": "{expected_condition:s}",
+ "is_external": true,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }},
+ {{
+ "condition": "tenant",
+ "is_external": true,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }}
+ ]
+ """.format(**locals())
+ context = mock.Mock()
+ subnet = {
+ 'network_id': 'some-net-id',
+ 'cidr': '192.168.1.0/24',
+ 'tenant_id': 'some-tenant-id'
+ }
+ is_external.return_value = True
+ cfg = config.ConfigFinder(stream=io.BytesIO(external_config),
+ member_manager=mock.Mock())
+ cfg._is_member_registered = mock.Mock(return_value=True)
+ config_for_subnet = cfg.find_config_for_subnet(context, subnet)
+
+ self.assertThat(config_for_subnet.condition,
+ matchers.Equals(expected_condition))
+
+ @mock.patch('neutron.db.infoblox.infoblox_db.is_network_external')
+ def test_subnet_range_condition_matches(self, is_external):
+ expected_cidr = '10.0.0.0/24'
+ expected_condition = 'subnet_range:{0}'.format(expected_cidr)
+ external_config = """
+ [
+ {{
+ "condition": "{expected_condition:s}",
+ "is_external": false,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }},
+ {{
+ "condition": "tenant",
+ "is_external": false,
+ "network_view": "{{tenant_id}}",
+ "dhcp_members": "next-available-member",
+ "require_dhcp_relay": true,
+ "domain_suffix_pattern": "local.test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }},
+ {{
+ "condition": "tenant",
+ "is_external": true,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }}
+ ]
+ """.format(**locals())
+ context = mock.Mock()
+ subnet = {
+ 'network_id': 'some-net-id',
+ 'cidr': expected_cidr,
+ 'tenant_id': 'some-tenant-id'
+ }
+ is_external.return_value = False
+ cfg = config.ConfigFinder(stream=io.BytesIO(external_config),
+ member_manager=mock.Mock())
+ cfg._is_member_registered = mock.Mock(return_value=True)
+ config_for_subnet = cfg.find_config_for_subnet(context, subnet)
+
+ self.assertThat(config_for_subnet.condition,
+ matchers.Equals(expected_condition))
+
+ @mock.patch('neutron.db.infoblox.infoblox_db.is_network_external')
+ def test_tenant_id_condition_matches(self, is_external_mock):
+ expected_tenant_id = 'some-tenant-id'
+ expected_condition = 'tenant_id:{0}'.format(expected_tenant_id)
+ tenant_id_conf = """
+ [
+ {{
+ "condition": "{expected_condition:s}",
+ "is_external": false,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }},
+ {{
+ "condition": "global",
+ "is_external": false,
+ "network_view": "{{tenant_id}}",
+ "dhcp_members": "next-available-member",
+ "require_dhcp_relay": true,
+ "domain_suffix_pattern": "local.test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }},
+ {{
+ "condition": "tenant",
+ "is_external": true,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }}
+ ]
+ """.format(**locals())
+ context = mock.Mock()
+ subnet = {
+ 'network_id': 'some-net-id',
+ 'cidr': '10.0.0.0/24',
+ 'tenant_id': expected_tenant_id
+ }
+ is_external_mock.return_value = False
+ cfg = config.ConfigFinder(stream=io.BytesIO(tenant_id_conf),
+ member_manager=mock.Mock())
+ cfg._is_member_registered = mock.Mock(return_value=True)
+ config_for_subnet = cfg.find_config_for_subnet(context, subnet)
+
+ self.assertThat(config_for_subnet.condition,
+ matchers.Equals(expected_condition))
+
+ def test_raises_on_invalid_condition(self):
+ config_template = """
+ [
+ {{
+ "condition": "{condition}",
+ "is_external": false,
+ "dhcp_members": "member1.infoblox.com",
+ "domain_suffix_pattern": "test.com",
+ "hostname_pattern": "{{instance_id}}"
+ }}
+ ]
+ """
+
+ member_manager = mock.Mock()
+ for valid in config.ConfigFinder.VALID_CONDITIONS:
+ valid_conf = config_template.format(condition=valid)
+ try:
+ config.ConfigFinder(io.BytesIO(valid_conf), member_manager)
+ except exceptions.InfobloxConfigException as e:
+ msg = 'Unexpected {error_type} for {config}'.format(
+ error_type=type(e), config=valid_conf)
+ self.fail(msg)
+
+ invalid_cof = config_template.format(condition='invalid-condition')
+ self.assertRaises(exceptions.InfobloxConfigException,
+ config.ConfigFinder, io.BytesIO(invalid_cof),
+ member_manager)
+
+ def test_raises_if_no_suitable_config_found(self):
+ cfg = """
+ [
+ {
+ "condition": "tenant_id:wrong-id",
+ "is_external": false
+ }
+ ]
+ """
+ context = mock.MagicMock()
+ subnet = mock.MagicMock()
+
+ cf = config.ConfigFinder(io.BytesIO(cfg), member_manager=mock.Mock())
+ cf._is_member_registered = mock.Mock(return_value=True)
+ self.assertRaises(exceptions.InfobloxConfigException,
+ cf.find_config_for_subnet, context, subnet)
+
+
+class ConfigTestCase(base.BaseTestCase):
+ def test_exception_raised_on_missing_condition(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ self.assertRaises(exceptions.InfobloxConfigException,
+ config.Config, {}, context, subnet,
+ member_manager=mock.Mock())
+
+ def test_default_values_are_set(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ cfg = config.Config({'condition': 'global'}, context, subnet,
+ member_manager=mock.Mock())
+
+ self.assertFalse(cfg.is_external)
+ self.assertFalse(cfg.require_dhcp_relay)
+ self.assertEqual(cfg.network_view, 'default')
+ self.assertEqual(cfg.dns_view, 'default')
+ self.assertIsNone(cfg.network_template)
+ self.assertIsNone(cfg.ns_group)
+ self.assertEqual(cfg.hostname_pattern,
+ 'host-{ip_address}.{subnet_name}')
+ self.assertEqual(cfg.domain_suffix_pattern, 'global.com')
+
+ def test_dhcp_member_is_returned_as_is_if_explicitly_set(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ expected_member_name = 'some-dhcp-member.com'
+ expected_dhcp_member = [objects.Member(name=expected_member_name,
+ ip='some-ip')]
+
+ conf_dict = {
+ 'condition': 'global',
+ 'dhcp_members': expected_member_name
+ }
+
+ member_manager = mock.Mock()
+ member_manager.find_members.return_value = expected_dhcp_member
+ cfg = config.Config(conf_dict, context, subnet, member_manager)
+
+ self.assertEqual(cfg.dhcp_members, expected_dhcp_member)
+ assert member_manager.find_members.called
+ assert not member_manager.next_available.called
+ assert not member_manager.reserve_member.called
+
+ def test_dhcp_member_is_taken_from_member_config_if_next_available(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ expected_member = objects.Member('some-member-ip', 'some-member-name')
+
+ member_manager = mock.Mock()
+ member_manager.find_members.return_value = None
+ member_manager.next_available.return_value = expected_member
+
+ conf_dict = {
+ 'condition': 'global',
+ 'dhcp_members': config.Config.NEXT_AVAILABLE_MEMBER
+ }
+
+ cfg = config.Config(conf_dict, context, subnet, member_manager)
+ members = cfg.reserve_dhcp_members()
+ self.assertEqual(members[0], expected_member)
+ assert member_manager.find_members.called_once
+ assert member_manager.next_available.called_once
+ assert member_manager.reserve_meber.called_once
+
+ def test_dns_view_joins_net_view_with_default_if_not_default(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+ member_manager = mock.Mock()
+
+ expected_net_view = 'non-default-net-view'
+ conf_dict = {
+ 'condition': 'global',
+ 'network_view': expected_net_view,
+ }
+
+ cfg = config.Config(conf_dict, context, subnet, member_manager)
+
+ self.assertTrue(cfg.dns_view.startswith('default'))
+ self.assertTrue(cfg.dns_view.endswith(expected_net_view))
+
+ def test_dns_view_is_default_if_netview_is_default(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+ member_manager = mock.Mock()
+
+ conf_dict = {
+ 'condition': 'global',
+ 'network_view': 'default',
+ }
+
+ cfg = config.Config(conf_dict, context, subnet, member_manager)
+
+ self.assertThat(cfg.dns_view, matchers.Equals(cfg.network_view))
+
+ def test_configured_value_is_returned_for_dns_view(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+ member_manager = mock.Mock()
+
+ expected_dns_view = 'some-dns-view'
+ conf_dict = {
+ 'condition': 'global',
+ 'dns_view': expected_dns_view
+ }
+
+ cfg = config.Config(conf_dict, context, subnet, member_manager)
+ self.assertEqual(cfg.dns_view, expected_dns_view)
+
+ def test_reserve_dns_members_always_returns_list(self):
+ configs = [
+ {
+ 'dns_members': 'member1',
+ 'condition': 'global'
+ },
+ {
+ 'dns_members': ['member1', 'member2'],
+ 'condition': 'global'
+ },
+ {
+ 'condition': 'global'
+ }
+ ]
+ context = mock.Mock()
+ subnet = mock.Mock()
+ member_manager = mock.Mock()
+
+ for conf in configs:
+ cfg = config.Config(conf, context, subnet, member_manager)
+
+ reserved_members = cfg.reserve_dns_members()
+
+ self.assertTrue(isinstance(reserved_members, list))
+
+ def test_reserve_members_list(self):
+ def mock_get_member(member_name):
+ return objects.Member(ip='10.20.30.40', name=member_name)
+
+ conf = {'condition': 'global'}
+ members = ['member40.com', 'member41.com']
+
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ member_manager = mock.Mock()
+ member_manager.get_member = mock_get_member
+ member_manager.find_members = mock.Mock(return_value=None)
+
+ cfg = config.Config(conf, context, subnet, member_manager)
+ cfg._dhcp_members = members
+ reserved_members = cfg.reserve_dhcp_members()
+
+ self.assertEqual(
+ reserved_members,
+ [
+ objects.Member(ip='10.20.30.40', name='member40.com'),
+ objects.Member(ip='10.20.30.40', name='member41.com')
+ ]
+ )
+
+ def test_subnet_update_not_allowed_if_subnet_name_is_in_pattern(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+ subnet_new = mock.Mock()
+ member_manager = mock.Mock()
+
+ cfg = {
+ 'condition': 'global',
+ 'hostname_pattern': 'host-{ip_address}',
+ 'domain_suffix_pattern': '{subnet_name}.global.com'
+ }
+
+ conf = config.Config(cfg, context, subnet, member_manager)
+ self.assertRaises(exceptions.OperationNotAllowed,
+ conf.verify_subnet_update_is_allowed, subnet_new)
+
+ def test_subnet_update_is_allowed_if_subnet_name_is_not_in_pattern(self):
+ context = mock.Mock()
+ subnet = mock.Mock()
+ subnet_new = mock.Mock()
+ member_manager = mock.Mock()
+
+ allowed_suffixes = [
+ 'network_id',
+ 'subnet_id',
+ 'user_id',
+ 'tenant_id',
+ 'ip_address',
+ 'network_name',
+ 'instance_id'
+ ]
+
+ for allowed_suffix in allowed_suffixes:
+ domain_pattern = '{{0}}.global.com'.format(allowed_suffix)
+
+ cfg = {
+ 'condition': 'global',
+ 'hostname_pattern': 'host-{ip_address}',
+ 'domain_suffix_pattern': domain_pattern
+ }
+
+ conf = config.Config(cfg, context, subnet, member_manager)
+ try:
+ conf.verify_subnet_update_is_allowed(subnet_new)
+ except exceptions.OperationNotAllowed as e:
+ self.fail('Unexpected exception {}'.format(e))
+
+ def test_same_configs_are_equal(self):
+ member_manager = mock.Mock()
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ cfg = {
+ 'condition': 'global',
+ 'network_view': 'netview',
+ 'dns_view': 'dnsview',
+ 'dhcp_members': ['m1', 'm2', 'm3'],
+ 'dns_members': ['m1', 'm2', 'm3']
+ }
+
+ c1 = config.Config(cfg, context, subnet, member_manager)
+ c2 = config.Config(cfg, context, subnet, member_manager)
+
+ self.assertTrue(c1 == c1)
+ self.assertTrue(c1 == c2)
+ self.assertTrue(c2 == c1)
+ self.assertTrue(c2 == c2)
+ self.assertTrue(c1 != object())
+ self.assertTrue(c2 != object())
+
+ def test_same_configs_are_not_added_to_set(self):
+ member_manager = mock.Mock()
+ context = mock.Mock()
+ subnet = mock.Mock()
+
+ cfg = {
+ 'condition': 'global',
+ 'network_view': 'netview',
+ 'dns_view': 'dnsview',
+ 'dhcp_members': ['m1', 'm2', 'm3'],
+ 'dns_members': ['m1', 'm2', 'm3']
+ }
+
+ s = set()
+
+ for _ in xrange(10):
+ s.add(config.Config(cfg, context, subnet, member_manager))
+
+ self.assertEqual(len(s), 1)
+
+ cfg['condition'] = 'tenant'
+ s.add(config.Config(cfg, context, subnet, member_manager))
+ self.assertEqual(len(s), 2)
+
+ s.add(config.Config(cfg, context, subnet, member_manager))
+ self.assertEqual(len(s), 2)
+
+
+class MemberManagerTestCase(base.BaseTestCase):
+ def test_raises_error_if_no_config_file(self):
+ self.assertRaises(exceptions.InfobloxConfigException,
+ config.MemberManager)
+
+ def test_returns_next_available_member(self):
+ context = mock.MagicMock()
+ member_config = [{"name": "member%d" % i,
+ "member_name": "member%d" % i,
+ "ipv4addr": "192.168.1.%d" % i,
+ "ipv6addr": "2001:DB8::%s" % i}
+ for i in xrange(1, 5)]
+
+ avail_members = []
+ for i in xrange(1, 5):
+ member = type('Member', (object,), {})
+ member.member_name = "member%d" % i
+ member.member_type = models.DHCP_MEMBER_TYPE
+ avail_members.append(member)
+
+ avail_member = avail_members[0]
+
+ mm = config.MemberManager(io.BytesIO(jsonutils.dumps(member_config)))
+
+ with mock.patch.object(infoblox_db, 'get_available_member',
+ mock.Mock(return_value=avail_member)):
+ available_member = mm.next_available(context, models.DHCP_MEMBER_TYPE)
+ self.assertIn(available_member.ip,
+ map(operator.itemgetter('ipv4addr'), member_config))
+
+ def test_raises_no_member_available_if_all_members_used(self):
+ context = mock.MagicMock()
+ member_config = [{"name": "member%d" % i,
+ "ipv4addr": "192.168.1.%d" % i,
+ "ipv6addr": "2001:DB8::%s" % i}
+ for i in xrange(1, 5)]
+
+ used_members = [member_config[i]['name']
+ for i in xrange(len(member_config))]
+
+ mm = config.MemberManager(io.BytesIO(jsonutils.dumps(member_config)))
+
+ with mock.patch.object(infoblox_db, 'get_used_members',
+ mock.Mock(return_value=used_members)):
+ self.assertRaises(exceptions.InfobloxConfigException,
+ mm.next_available,
+ context,
+ models.DHCP_MEMBER_TYPE)
+
+ def test_reserve_member_stores_member_in_db(self):
+ context = mock.Mock()
+ mapping = 'some-mapping-value'
+ member_name = 'member1'
+ member_config = [{"name": member_name,
+ "ipv4addr": "192.168.1.1",
+ "ipv6addr": "2001:DB8::1"}]
+
+ mm = config.MemberManager(io.BytesIO(jsonutils.dumps(member_config)))
+
+ with mock.patch.object(infoblox_db, 'attach_member') as attach_mock:
+ mm.reserve_member(context, mapping, member_name,
+ models.DHCP_MEMBER_TYPE)
+
+ attach_mock.assert_called_once_with(
+ context, mapping, member_name, models.DHCP_MEMBER_TYPE)
+
+ def test_finds_member_for_mapping(self):
+ context = mock.Mock()
+ mapping = 'some-mapping-value'
+ expected_member_name = 'member1'
+ expected_ip = '10.0.0.1'
+ expected_ipv6 = '2001:DB8::3'
+ expected_map_id = None
+
+ expected_member = type('Member', (object,), {})
+ expected_member.member_name = expected_member_name
+ expected_member.map_id = expected_map_id
+ expected_member.member_type = models.DHCP_MEMBER_TYPE
+
+ expected_members = [expected_member]
+
+ mm = config.MemberManager(
+ io.BytesIO(jsonutils.dumps([{'name': expected_member_name,
+ 'ipv4addr': expected_ip,
+ 'ipv6addr': expected_ipv6,
+ 'map_id': expected_map_id}])))
+
+ with mock.patch.object(infoblox_db, 'get_members') as get_mock:
+ get_mock.return_value = expected_members
+ members = mm.find_members(context, mapping,
+ models.DHCP_MEMBER_TYPE)
+
+ self.assertEqual(expected_ip, members[0].ip)
+ self.assertEqual(expected_members[0].member_name,
+ members[0].name)
+
+ def test_builds_member_from_config(self):
+ ip = 'some-ip'
+ name = 'some-name'
+
+ mm = config.MemberManager(
+ io.BytesIO(jsonutils.dumps([{'name': name,
+ 'ipv4addr': ip,
+ 'ipv6addr': ip}])))
+
+ m = mm.get_member(name)
+
+ self.assertEqual(m.name, name)
+ self.assertEqual(m.ip, ip)
+
+ def test_raises_member_not_available_if_member_is_not_in_config(self):
+ ip = 'some-ip'
+ actual_name = 'some-name'
+ search_for_name = 'some-other-name'
+
+ mm = config.MemberManager(
+ io.BytesIO(jsonutils.dumps([{'name': actual_name,
+ 'ipv4addr': ip,
+ 'ipv6addr': ip}])))
+
+ self.assertRaises(exceptions.InfobloxConfigException, mm.get_member,
+ search_for_name)
+
+ def test_member_marked_as_unavailable(self):
+ expected_ip = "192.168.1.2"
+ expected_ipv6 = "2001:DB8::3"
+ expected_name = "available_member"
+ member_config = [{"name": expected_name,
+ "ipv4addr": expected_ip,
+ "ipv6addr": expected_ipv6},
+ {"name": "unavailable_member",
+ "ipv4addr": "192.168.1.3",
+ "ipv6addr": "2001:DB8::3",
+ "is_available": False}]
+
+ expected_member = objects.Member(ip=expected_ip, name=expected_name)
+ mm = config.MemberManager(io.BytesIO(jsonutils.dumps(member_config)))
+
+ self.assertEqual(1, len(mm.configured_members))
+ self.assertEqual(expected_member, mm.configured_members[0])
+
+ def test_config_mismatch_tenant_id_value(self):
+ valid_config = """
+ [
+ {
+ "condition": "tenant_id"
+ }
+ ]
+ """
+
+ self.assertRaises(
+ exceptions.InfobloxConfigException,
+ config.ConfigFinder,
+ stream=io.BytesIO(valid_config),
+ member_manager=mock.Mock()
+ )
+
+ def test_config_mismatch_subnet_range_value(self):
+ valid_config = """
+ [
+ {
+ "condition": "subnet_range"
+ }
+ ]
+ """
+
+ self.assertRaises(
+ exceptions.InfobloxConfigException,
+ config.ConfigFinder,
+ stream=io.BytesIO(valid_config),
+ member_manager=mock.Mock()
+ )
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_connector.py b/neutron/tests/unit/ipam/drivers/infoblox/test_connector.py
new file mode 100755
index 0000000..e6e4dcf
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_connector.py
@@ -0,0 +1,266 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import requests
+from requests import exceptions as req_exc
+
+from neutron.ipam.drivers.infoblox import connector
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.tests import base
+
+
+valid_config = mock.Mock()
+valid_config.infoblox_wapi = 'http://localhost'
+valid_config.infoblox_username = 'user'
+valid_config.infoblox_password = 'pass'
+valid_config.infoblox_sslverify = False
+valid_config.infoblox_http_max_retries = 3
+
+
+class UrlMatcher(object):
+ def __init__(self, url, obj):
+ self.url = url
+ self.obj = obj
+
+ def __eq__(self, actual_url):
+ return self.url in actual_url and self.obj in actual_url
+
+
+class TestInfobloxConnector(base.BaseTestCase):
+ def setUp(self):
+ super(TestInfobloxConnector, self).setUp()
+ self.config(infoblox_wapi='https://infoblox.example.org/wapi/v1.1/')
+ self.config(infoblox_username='admin')
+ self.config(infoblox_password='password')
+ self.connector = connector.Infoblox()
+
+ def test_throws_error_on_username_not_set(self):
+ fake_conf = mock.Mock()
+ fake_conf.infoblox_wapi = 'http://localhost'
+ fake_conf.infoblox_username = None
+ fake_conf.infoblox_password = 'password'
+
+ with mock.patch.object(connector.cfg, 'CONF', fake_conf):
+ self.assertRaises(exceptions.InfobloxIsMisconfigured,
+ connector.Infoblox)
+
+ def test_throws_error_on_password_not_set(self):
+ fake_conf = mock.Mock()
+ fake_conf.infoblox_wapi = 'http://localhost'
+ fake_conf.infoblox_username = 'user'
+ fake_conf.infoblox_password = None
+
+ with mock.patch.object(connector.cfg, 'CONF', fake_conf):
+ self.assertRaises(exceptions.InfobloxIsMisconfigured,
+ connector.Infoblox)
+
+ def test_throws_error_on_wapi_url_not_set(self):
+ fake_conf = mock.Mock()
+ fake_conf.infoblox_wapi = None
+ fake_conf.infoblox_username = 'user'
+ fake_conf.infoblox_password = 'pass'
+
+ with mock.patch.object(connector.cfg, 'CONF', fake_conf):
+ self.assertRaises(exceptions.InfobloxIsMisconfigured,
+ connector.Infoblox)
+
+ @mock.patch.object(connector.cfg, 'CONF', valid_config)
+ def test_create_object(self):
+ objtype = 'network'
+ payload = {'ip': '0.0.0.0'}
+
+ with mock.patch.object(requests.Session, 'post',
+ return_value=mock.Mock()) as patched_create:
+ patched_create.return_value.status_code = 201
+ patched_create.return_value.content = '{}'
+ self.connector.create_object(objtype, payload)
+ patched_create.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v1.1/network',
+ data='{"ip": "0.0.0.0"}',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ def test_create_object_with_extattrs(self):
+ objtype = 'network'
+ payload = {'ip': '0.0.0.0',
+ 'extattrs': {'Subnet ID': {'value': 'fake_subnet_id'}}}
+ with mock.patch.object(requests.Session, 'post',
+ return_value=mock.Mock()) as patched_create:
+ patched_create.return_value.status_code = 201
+ patched_create.return_value.content = '{}'
+ self.connector.create_object(objtype, payload)
+ patched_create.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v1.1/network',
+ data='{"ip": "0.0.0.0", "extattrs": {"Subnet ID":'
+ ' {"value": "fake_subnet_id"}}}',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ @mock.patch.object(connector.cfg, 'CONF', valid_config)
+ def test_get_object(self):
+ objtype = 'network'
+ payload = {'ip': '0.0.0.0'}
+
+ with mock.patch.object(requests.Session, 'get',
+ return_value=mock.Mock()) as patched_get:
+ patched_get.return_value.status_code = 200
+ patched_get.return_value.content = '{}'
+ self.connector.get_object(objtype, payload)
+ patched_get.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v1.1/network?ip=0.0.0.0',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ def test_get_object_in_cloud(self):
+ self.config(infoblox_wapi='https://infoblox.example.org/wapi/v2.0/')
+ self.connector = connector.Infoblox()
+
+ objtype = 'network'
+ payload = {'ip': '0.0.0.0'}
+
+ with mock.patch.object(requests.Session, 'get',
+ return_value=mock.Mock()) as patched_get:
+ patched_get.return_value.status_code = 200
+ patched_get.return_value.content = '{"network": "my-network"}'
+ self.connector.get_object(objtype, payload)
+ patched_get.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v2.0/network?'
+ 'ip=0.0.0.0&_proxy_search=GM',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ def test_get_objects_with_extattrs_in_cloud(self):
+ self.config(infoblox_wapi='https://infoblox.example.org/wapi/v2.0/')
+ self.connector = connector.Infoblox()
+
+ objtype = 'network'
+ payload = {'ip': '0.0.0.0'}
+ extattrs = {
+ 'Subnet ID': {'value': 'fake_subnet_id'}
+ }
+ with mock.patch.object(requests.Session, 'get',
+ return_value=mock.Mock()) as patched_get:
+ patched_get.return_value.status_code = 200
+ patched_get.return_value.content = '{"network": "my-network"}'
+ self.connector.get_object(objtype, payload, extattrs=extattrs)
+ patched_get.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v2.0/network?'
+ '*Subnet ID=fake_subnet_id&ip=0.0.0.0&_proxy_search=GM',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ def test_get_objects_with_extattrs(self):
+ objtype = 'network'
+ payload = {'ip': '0.0.0.0'}
+ extattrs = {
+ 'Subnet ID': {'value': 'fake_subnet_id'}
+ }
+ with mock.patch.object(requests.Session, 'get',
+ return_value=mock.Mock()) as patched_get:
+ patched_get.return_value.status_code = 200
+ patched_get.return_value.content = '{}'
+ self.connector.get_object(objtype, payload, extattrs=extattrs)
+ patched_get.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/'
+ 'v1.1/network?*Subnet ID=fake_subnet_id&ip=0.0.0.0',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ @mock.patch.object(connector.cfg, 'CONF', valid_config)
+ def test_update_object(self):
+ ref = 'network'
+ payload = {'ip': '0.0.0.0'}
+
+ with mock.patch.object(requests.Session, 'put',
+ return_value=mock.Mock()) as patched_update:
+ patched_update.return_value.status_code = 200
+ patched_update.return_value.content = '{}'
+ self.connector.update_object(ref, payload)
+ patched_update.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v1.1/network',
+ data='{"ip": "0.0.0.0"}',
+ headers={'Content-type': 'application/json'},
+ timeout=60,
+ verify=False
+ )
+
+ @mock.patch.object(connector.cfg, 'CONF', valid_config)
+ def test_delete_object(self):
+ ref = 'network'
+ with mock.patch.object(requests.Session, 'delete',
+ return_value=mock.Mock()) as patched_delete:
+ patched_delete.return_value.status_code = 200
+ patched_delete.return_value.content = '{}'
+ self.connector.delete_object(ref)
+ patched_delete.assert_called_once_with(
+ 'https://infoblox.example.org/wapi/v1.1/network',
+ timeout=60,
+ verify=False
+ )
+
+ def test_neutron_exception_is_raised_on_any_request_error(self):
+ # timeout exception raises InfobloxTimeoutError
+ f = mock.Mock()
+ f.__name__ = 'mock'
+ f.side_effect = req_exc.Timeout
+ self.assertRaises(exceptions.InfobloxTimeoutError,
+ connector.reraise_neutron_exception(f))
+
+ # all other request exception raises InfobloxConnectionError
+ supported_exceptions = [req_exc.HTTPError,
+ req_exc.ConnectionError,
+ req_exc.ProxyError,
+ req_exc.SSLError,
+ req_exc.TooManyRedirects,
+ req_exc.InvalidURL]
+
+ for exc in supported_exceptions:
+ f.side_effect = exc
+ self.assertRaises(exceptions.InfobloxConnectionError,
+ connector.reraise_neutron_exception(f))
+
+ def test_non_cloud_api_detection(self):
+ wapi_not_cloud = ('https://infoblox.example.org/wapi/v1.4.1/',
+ 'https://infoblox.example.org/wapi/v1.9/',
+ 'https://wapi.wapi.wap/wapi/v1.99/')
+
+ for url in wapi_not_cloud:
+ print url
+ self.assertFalse(self.connector.is_cloud_wapi(url))
+
+ def test_cloud_api_detection(self):
+ wapi_cloud = ('https://infoblox.example.org/wapi/v2.1/',
+ 'https://infoblox.example.org/wapi/v2.0/',
+ 'https://wapi.wapi.wap/wapi/v2.0.1/',
+ 'https://wapi.wapi.wap/wapi/v3.0/',
+ 'https://wapi.wapi.wap/wapi/v11.0.1/')
+
+ for url in wapi_cloud:
+ self.assertTrue(self.connector.is_cloud_wapi(url))
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_dns_controller.py b/neutron/tests/unit/ipam/drivers/infoblox/test_dns_controller.py
new file mode 100755
index 0000000..2d3bcb6
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_dns_controller.py
@@ -0,0 +1,355 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+import taskflow.engines
+
+from neutron.common import constants as neutron_constants
+from neutron.db.infoblox import infoblox_db
+from neutron.ipam.drivers.infoblox import dns_controller
+from neutron.ipam.drivers.infoblox import infoblox_ipam
+from neutron.db.infoblox import infoblox_db
+from neutron.plugins.common import constants as plugins_constants
+from neutron.tests import base
+
+
+class SubstringMatcher(object):
+ def __init__(self, names):
+ if not isinstance(names, list):
+ names = [names]
+ self.names = names
+
+ def __eq__(self, expected):
+ return all([name in expected for name in self.names])
+
+
+class DnsControllerTestCase(base.BaseTestCase):
+ fip_netview_name = 'my-test-fip-netview-name'
+
+ def setUp(self):
+ super(DnsControllerTestCase, self).setUp()
+ self.manip = mock.Mock()
+ self.context = mock.Mock()
+ self.port = mock.MagicMock()
+ config_finder = mock.MagicMock()
+ expected_value = 'some-expected-value'
+
+ def port_dict(item):
+ if item == 'fixed_ips':
+ return [{'ip_address': 'some-ip',
+ 'subnet_id': 'some-id'}]
+
+ return expected_value
+ self.port.__getitem__.side_effect = port_dict
+
+ subnet = {'network_id': 'some-net-id',
+ 'name': 'some-dns'}
+
+ self.ip_allocator = mock.Mock()
+
+ self.dns_ctrlr = dns_controller.InfobloxDNSController(
+ self.ip_allocator, self.manip, config_finder=config_finder)
+ self.dns_ctrlr.ea_manager = mock.Mock()
+ self.dns_ctrlr.pattern_builder = mock.Mock()
+ self.dns_ctrlr._get_subnet = mock.Mock()
+ self.dns_ctrlr._get_subnet.return_value = subnet
+
+ def test_bind_host_names_binds_fqdn_with_ip_in_dns_view(self):
+ self.dns_ctrlr.bind_names(self.context, self.port)
+ assert self.ip_allocator.bind_names.called_once
+
+ def test_unbind_host_names_binds_fqdn_with_ip_in_dns_view(self):
+ self.dns_ctrlr.unbind_names(self.context, self.port)
+ assert self.ip_allocator.unbind_names.called_once
+
+ def test_restarts_services_on_bind(self):
+ self.dns_ctrlr.bind_names(self.context, self.port)
+ assert self.manip.restart_all_services.called_once
+
+ def test_restarts_services_on_unbind(self):
+ self.dns_ctrlr.unbind_names(self.context, self.port)
+ assert self.manip.restart_all_services.called_once
+
+ def test_get_hostname_pattern_dhcp_port(self):
+ port = {'device_owner': neutron_constants.DEVICE_OWNER_DHCP}
+ result = self.dns_ctrlr.get_hostname_pattern(port, mock.Mock())
+ self.assertEqual('dhcp-port-{ip_address}', result)
+
+ def test_get_hostname_pattern_router_iface(self):
+ port = {'device_owner': neutron_constants.DEVICE_OWNER_ROUTER_INTF}
+ result = self.dns_ctrlr.get_hostname_pattern(port, mock.Mock())
+ self.assertEqual('router-iface-{ip_address}', result)
+
+ def test_get_hostname_pattern_router_gw(self):
+ port = {'device_owner': neutron_constants.DEVICE_OWNER_ROUTER_GW}
+ result = self.dns_ctrlr.get_hostname_pattern(port, mock.Mock())
+ self.assertEqual('router-gw-{ip_address}', result)
+
+ def test_get_hostname_pattern_lb_vip(self):
+ port = {'device_owner': 'neutron:' + plugins_constants.LOADBALANCER}
+ result = self.dns_ctrlr.get_hostname_pattern(port, mock.Mock())
+ self.assertEqual('lb-vip-{ip_address}', result)
+
+ def test_get_hostname_pattern_instance_port(self):
+ port = {'device_owner': 'nova:compute'}
+ cfg_mock = mock.MagicMock()
+ cfg_mock.hostname_pattern = 'host-{ip_address}'
+
+ result = self.dns_ctrlr.get_hostname_pattern(port, cfg_mock)
+ self.assertEqual('host-{ip_address}', result)
+
+
+class GenericDNSControllerTestCase(base.BaseTestCase):
+ def test_fqdn_is_built_with_prefix_ip_address_and_dns_zone(self):
+ ip_address = '192.168.1.1'
+ prefix = 'host-'
+ zone = 'some.dns.zone'
+
+ fqdn = dns_controller.build_fqdn(prefix, zone, ip_address)
+
+ self.assertTrue(fqdn.startswith(prefix))
+ self.assertTrue(fqdn.endswith(zone))
+ self.assertIn(ip_address.replace('.', '-'), fqdn)
+
+ def test_no_exception_if_subnet_has_no_nameservers_defined(self):
+ subnet = {}
+ nss = dns_controller.get_nameservers(subnet)
+ self.assertTrue(nss == [])
+
+ subnet = {'dns_nameservers': object()}
+ nss = dns_controller.get_nameservers(subnet)
+ self.assertTrue(nss == [])
+
+ subnet = {'dns_nameservers': []}
+ nss = dns_controller.get_nameservers(subnet)
+ self.assertTrue(nss == [])
+
+
+class DomainZoneTestCase(base.BaseTestCase):
+ def test_two_dns_zones_created_on_create_dns_zone(self):
+ manip = mock.Mock()
+ context = infoblox_ipam.FlowContext(mock.Mock(), 'create-dns')
+ subnet = {'network_id': 'some-id',
+ 'name': 'some-name',
+ 'ip_version': 4,
+ 'cidr': '10.100.0.0/24'}
+ expected_member = 'member-name'
+
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+
+ cfg = mock.Mock()
+ cfg.ns_group = None
+ cfg.reserve_dns_members.return_value = [expected_member]
+
+ config_finder.find_config_for_subnet.return_value = cfg
+
+ dns_ctrlr = dns_controller.InfobloxDNSController(
+ ip_allocator, manip, config_finder)
+ dns_ctrlr.pattern_builder = mock.Mock()
+ dns_ctrlr.create_dns_zones(context, subnet)
+
+ taskflow.engines.run(context.parent_flow, store=context.store)
+
+ assert (manip.method_calls ==
+ [mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ expected_member,
+ mock.ANY,
+ zone_extattrs=mock.ANY),
+ mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ expected_member,
+ mock.ANY,
+ prefix=mock.ANY,
+ zone_extattrs=mock.ANY,
+ zone_format=mock.ANY)])
+
+ def test_secondary_dns_members(self):
+ manip = mock.Mock()
+ context = infoblox_ipam.FlowContext(mock.Mock(), 'create-dns')
+ subnet = {'network_id': 'some-id',
+ 'name': 'some-name',
+ 'ip_version': 4,
+ 'cidr': '10.100.0.0/24'}
+ primary_dns_member = 'member-primary'
+ secondary_dns_members = ['member-secondary']
+
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+
+ cfg = mock.Mock()
+ cfg.ns_group = None
+ cfg.reserve_dns_members.return_value = ([primary_dns_member]
+ + secondary_dns_members)
+
+ config_finder.find_config_for_subnet.return_value = cfg
+
+ dns_ctrlr = dns_controller.InfobloxDNSController(
+ ip_allocator, manip, config_finder)
+ dns_ctrlr.pattern_builder = mock.Mock()
+ dns_ctrlr.create_dns_zones(context, subnet)
+
+ taskflow.engines.run(context.parent_flow, store=context.store)
+
+ assert (manip.method_calls ==
+ [mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ primary_dns_member,
+ secondary_dns_members,
+ zone_extattrs=mock.ANY),
+ mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ primary_dns_member,
+ secondary_dns_members,
+ prefix=mock.ANY,
+ zone_extattrs=mock.ANY,
+ zone_format=mock.ANY)])
+
+ def test_prefix_for_classless_networks(self):
+ manip = mock.Mock()
+ context = infoblox_ipam.FlowContext(mock.Mock(), 'create-dns')
+ subnet = {'network_id': 'some-id',
+ 'name': 'some-name',
+ 'ip_version': 4,
+ 'cidr': '192.168.0.128/27'}
+
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+
+ cfg = mock.Mock()
+ cfg.ns_group = None
+ cfg.reserve_dns_members.return_value = (['some-member'])
+
+ config_finder.find_config_for_subnet.return_value = cfg
+
+ dns_ctrlr = dns_controller.InfobloxDNSController(
+ ip_allocator, manip, config_finder)
+ dns_ctrlr.pattern_builder = mock.Mock()
+ dns_ctrlr.create_dns_zones(context, subnet)
+
+ taskflow.engines.run(context.parent_flow, store=context.store)
+
+ assert (manip.method_calls ==
+ [mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ zone_extattrs=mock.ANY),
+ mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ prefix=subnet['name'],
+ zone_extattrs=mock.ANY,
+ zone_format=mock.ANY)])
+
+ def test_prefix_for_classfull_networks(self):
+ manip = mock.Mock()
+ context = infoblox_ipam.FlowContext(mock.Mock(), 'create-dns')
+ subnet = {'network_id': 'some-id',
+ 'name': 'some-name',
+ 'ip_version': 4,
+ 'cidr': '192.168.0.0/24'}
+
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+
+ cfg = mock.Mock()
+ cfg.ns_group = None
+ cfg.reserve_dns_members.return_value = (['some-member'])
+
+ config_finder.find_config_for_subnet.return_value = cfg
+
+ dns_ctrlr = dns_controller.InfobloxDNSController(
+ ip_allocator, manip, config_finder)
+ dns_ctrlr.pattern_builder = mock.Mock()
+ dns_ctrlr.create_dns_zones(context, subnet)
+
+ taskflow.engines.run(context.parent_flow, store=context.store)
+
+ assert (manip.method_calls ==
+ [mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ zone_extattrs=mock.ANY),
+ mock.call.create_dns_zone(mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ prefix=None,
+ zone_extattrs=mock.ANY,
+ zone_format=mock.ANY)
+ ])
+
+ @mock.patch.object(infoblox_db, 'is_network_external',
+ mock.Mock())
+ def test_two_dns_zones_deleted_when_not_using_global_dns_zone(self):
+ manip = mock.Mock()
+ context = mock.Mock()
+ subnet = {'network_id': 'some-id',
+ 'cidr': 'some-cidr',
+ 'name': 'some-name'}
+ expected_dns_view = 'some-expected-dns-view'
+
+ cfg = mock.Mock()
+ cfg.is_global_config = False
+ cfg.domain_suffix_pattern = '{subnet_name}.cloud.com'
+ cfg.dns_view = expected_dns_view
+
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+ config_finder.find_config_for_subnet.return_value = cfg
+ dns_ctrlr = dns_controller.InfobloxDNSController(
+ ip_allocator, manip, config_finder)
+ dns_ctrlr.pattern_builder = mock.Mock()
+
+ network = {'id': 'some-net-id',
+ 'shared': False}
+ dns_ctrlr._get_network = mock.Mock()
+ dns_ctrlr._get_network.return_value = network
+ infoblox_db.is_network_external.return_value = False
+
+ dns_ctrlr.delete_dns_zones(context, subnet)
+
+ assert manip.method_calls == [
+ mock.call.delete_dns_zone(expected_dns_view, mock.ANY),
+ mock.call.delete_dns_zone(expected_dns_view, subnet['cidr'])
+ ]
+
+ def test_no_dns_zone_is_deleted_when_global_dns_zone_used(self):
+ manip = mock.Mock()
+ context = mock.Mock()
+ subnet = {'network_id': 'some-id',
+ 'cidr': 'some-cidr',
+ 'name': 'some-name'}
+ expected_dns_view = 'some-expected-dns-view'
+
+ cfg = mock.Mock()
+ cfg.is_global_config = True
+ cfg.domain_suffix_pattern = '{subnet_name}.cloud.com'
+ cfg.dns_view = expected_dns_view
+
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+ config_finder.find_config_for_subnet.return_value = cfg
+ dns_ctrlr = dns_controller.InfobloxDNSController(
+ ip_allocator, manip, config_finder)
+ dns_ctrlr.pattern_builder = mock.Mock()
+ dns_ctrlr.delete_dns_zones(context, subnet)
+
+ assert not manip.delete_dns_zone.called
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_ip_allocator.py b/neutron/tests/unit/ipam/drivers/infoblox/test_ip_allocator.py
new file mode 100755
index 0000000..2748c0b
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_ip_allocator.py
@@ -0,0 +1,115 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.ipam.drivers.infoblox import ip_allocator
+from neutron.tests import base
+
+
+class FixedAddressAllocatorTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(FixedAddressAllocatorTestCase, self).setUp()
+ self.ib_mock = mock.Mock()
+
+ self.extattrs = 'test-extattrs'
+ self.netview = 'some-test-net-view'
+ self.mac = 'de:ad:be:ef:00:00'
+ self.ip = '192.168.1.1'
+ self.dnsview = 'some-dns-view'
+ self.zone_auth = 'zone-auth'
+ self.hostname = 'host1'
+ self.dhcp_enabled = True
+
+ self.allocator = ip_allocator.FixedAddressIPAllocator(self.ib_mock)
+
+ def test_creates_fixed_address_on_allocate_ip(self):
+ self.allocator.allocate_given_ip(
+ self.netview, self.dnsview, self.zone_auth,
+ self.hostname, self.mac, self.ip, self.extattrs)
+
+ self.ib_mock.create_fixed_address_for_given_ip.assert_called_once_with(
+ self.netview, self.mac, self.ip, self.extattrs)
+
+ def test_creates_fixed_address_range_on_range_allocation(self):
+ first_ip = '192.168.1.1'
+ last_ip = '192.168.1.123'
+
+ self.allocator.allocate_ip_from_range(
+ self.dnsview, self.netview, self.zone_auth, self.hostname,
+ self.mac, first_ip, last_ip, self.extattrs)
+
+ self.ib_mock.create_fixed_address_from_range.assert_called_once_with(
+ self.netview, self.mac, first_ip, last_ip, self.extattrs)
+
+ def test_deletes_fixed_address(self):
+ self.allocator.deallocate_ip(self.netview, self.dnsview, self.ip)
+
+ self.ib_mock.delete_fixed_address.assert_called_once_with(self.netview,
+ self.ip)
+
+
+class HostRecordAllocatorTestCase(base.BaseTestCase):
+ def test_creates_host_record_on_allocate_ip(self):
+ ib_mock = mock.MagicMock()
+
+ netview = 'some-test-net-view'
+ dnsview = 'some-dns-view'
+ zone_auth = 'zone-auth'
+ hostname = 'host1'
+ mac = 'de:ad:be:ef:00:00'
+ ip = '192.168.1.1'
+
+ ib_mock.find_hostname.return_value = None
+
+ allocator = ip_allocator.HostRecordIPAllocator(ib_mock)
+ allocator.allocate_given_ip(netview, dnsview, zone_auth, hostname,
+ mac, ip)
+
+ ib_mock.create_host_record_for_given_ip.assert_called_once_with(
+ dnsview, zone_auth, hostname, mac, ip, mock.ANY)
+
+ def test_creates_host_record_range_on_range_allocation(self):
+ ib_mock = mock.MagicMock()
+
+ netview = 'some-test-net-view'
+ dnsview = 'some-dns-view'
+ zone_auth = 'zone-auth'
+ hostname = 'host1'
+ mac = 'de:ad:be:ef:00:00'
+ first_ip = '192.168.1.2'
+ last_ip = '192.168.1.254'
+
+ ib_mock.find_hostname.return_value = None
+
+ allocator = ip_allocator.HostRecordIPAllocator(ib_mock)
+ allocator.allocate_ip_from_range(
+ dnsview, netview, zone_auth, hostname, mac, first_ip, last_ip)
+
+ ib_mock.create_host_record_from_range.assert_called_once_with(
+ dnsview, netview, zone_auth, hostname,
+ mac, first_ip, last_ip, mock.ANY)
+
+ def test_deletes_host_record(self):
+ ib_mock = mock.MagicMock()
+
+ netview = 'some-test-net-view'
+ dnsview = 'some-dns-view'
+ ip = '192.168.1.2'
+
+ allocator = ip_allocator.HostRecordIPAllocator(ib_mock)
+ allocator.deallocate_ip(netview, dnsview, ip)
+
+ ib_mock.delete_host_record.assert_called_once_with(dnsview, ip)
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_ipam_controller.py b/neutron/tests/unit/ipam/drivers/infoblox/test_ipam_controller.py
new file mode 100755
index 0000000..2695722
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_ipam_controller.py
@@ -0,0 +1,837 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+import taskflow.engines
+
+from neutron.common import exceptions as neutron_exc
+from neutron.db.infoblox import infoblox_db as infoblox_db
+from neutron.ipam.drivers.infoblox import ea_manager
+from neutron.ipam.drivers.infoblox import exceptions as ib_exceptions
+from neutron.ipam.drivers.infoblox import infoblox_ipam
+from neutron.ipam.drivers.infoblox import ipam_controller
+from neutron.ipam.drivers.infoblox import objects
+from neutron.tests import base
+
+
+class SubstringMatcher(object):
+ def __init__(self, expected):
+ self.expected = expected
+
+ def __eq__(self, actual):
+ return self.expected in actual
+
+ def __repr__(self):
+ return "Expected substring: '{}'".format(self.expected)
+
+
+class CreateSubnetTestCases(base.BaseTestCase):
+ def setUp(self):
+ super(CreateSubnetTestCases, self).setUp()
+
+ self.expected_net_view_name = 'some-tenant-id'
+ self.cidr = 'some-cidr'
+ self.first_ip = '192.168.0.1'
+ self.last_ip = '192.168.0.254'
+ self.subnet = mock.MagicMock()
+ self.subnet.__getitem__.side_effect = mock.MagicMock()
+ self.object_manipulator = mock.Mock()
+ ip_allocator = mock.Mock()
+ self.object_manipulator.network_exists.return_value = False
+ self.object_manipulator.set_network_view = mock.MagicMock()
+
+ cfg = mock.Mock()
+ cfg.reserve_dhcp_members = mock.Mock(return_value=[])
+ cfg.reserve_dns_members = mock.Mock(return_value=[])
+ cfg.dhcp_members = ['member1.com']
+ cfg.dns_members = ['member1.com']
+
+ config_finder = mock.Mock()
+ config_finder.find_config_for_subnet = mock.Mock(return_value=cfg)
+
+ context = infoblox_ipam.FlowContext(mock.MagicMock(), 'create-subnet')
+
+ b = ipam_controller.InfobloxIPAMController(self.object_manipulator,
+ config_finder,
+ ip_allocator)
+ b.ea_manager = mock.Mock()
+ b.create_subnet(context, self.subnet)
+ taskflow.engines.run(context.parent_flow, store=context.store)
+
+ def test_network_view_is_created_on_subnet_creation(self):
+ assert self.object_manipulator.create_network_view.called_once
+
+ def test_dns_view_is_created_on_subnet_creation(self):
+ assert self.object_manipulator.create_dns_view.called_once
+
+ def test_infoblox_network_is_created_on_subnet_create(self):
+ assert self.object_manipulator.create_network.called_once
+
+ def test_ip_range_is_created_on_subnet_create(self):
+ assert self.object_manipulator.create_ip_range.called_once
+
+
+class UpdateSubnetTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(UpdateSubnetTestCase, self).setUp()
+ self.object_manipulator = mock.Mock()
+ self.context = mock.Mock()
+ ip_allocator = mock.Mock()
+ config_finder = mock.Mock()
+
+ cfg = mock.Mock()
+ cfg.dhcp_members = ['member1.com']
+ cfg.dns_members = ['member1.com']
+ config_finder.find_config_for_subnet.return_value = cfg
+
+ self.ipam = ipam_controller.InfobloxIPAMController(
+ self.object_manipulator, config_finder, ip_allocator)
+ self.ipam.ea_manager = mock.Mock()
+
+ self.sub_id = 'fake-id'
+ self.new_nameservers = ['new_serv1', 'new_serv2']
+ self.sub = dict(
+ id=self.sub_id,
+ cidr='test-cidr',
+ dns_nameservers=self.new_nameservers,
+ network_id='some-net-id'
+ )
+ self.ib_net = objects.Network()
+ self.object_manipulator.get_network.return_value = self.ib_net
+
+ @mock.patch.object(infoblox_db, 'get_subnet_dhcp_port_address',
+ mock.Mock(return_value=None))
+ def test_update_subnet_dns_no_primary_ip(self):
+ self.ipam.update_subnet(self.context, self.sub_id, self.sub)
+
+ self.assertEqual(self.new_nameservers, self.ib_net.dns_nameservers)
+ self.object_manipulator.update_network_options.assert_called_once_with(
+ self.ib_net, mock.ANY
+ )
+
+ @mock.patch.object(infoblox_db, 'get_subnet_dhcp_port_address',
+ mock.Mock(return_value=None))
+ def test_update_subnet_dns_primary_is_member_ip(self):
+ self.ib_net.member_ip_addrs = ['member-ip']
+ self.ib_net.dns_nameservers = ['member-ip', 'old_serv1', 'old_serv']
+
+ self.ipam.update_subnet(self.context, self.sub_id, self.sub)
+
+ self.assertEqual(['member-ip'] + self.new_nameservers,
+ self.ib_net.dns_nameservers)
+ self.object_manipulator.update_network_options.assert_called_once_with(
+ self.ib_net, mock.ANY
+ )
+
+ @mock.patch.object(infoblox_db, 'get_subnet_dhcp_port_address',
+ mock.Mock())
+ def test_update_subnet_dns_primary_is_relay_ip(self):
+ self.ib_net.member_ip_addr = 'fake_ip'
+ self.ib_net.dns_nameservers = ['relay_ip', '1.1.1.1', '2.2.2.2']
+
+ infoblox_db.get_subnet_dhcp_port_address.return_value = 'relay-ip'
+
+ self.ipam.update_subnet(self.context, self.sub_id, self.sub)
+
+ self.assertEqual(['relay-ip'] + self.new_nameservers,
+ self.ib_net.dns_nameservers)
+ self.object_manipulator.update_network_options.assert_called_once_with(
+ self.ib_net, mock.ANY
+ )
+
+ def test_extensible_attributes_get_updated(self):
+ ea_manager = mock.Mock()
+ manip = mock.MagicMock()
+ config_finder = mock.Mock()
+ config = mock.Mock()
+ config.dhcp_members = ['member1.com']
+ config.dns_members = ['member1.com']
+ config_finder.find_config_for_subnet = mock.Mock(return_value=config)
+ context = mock.Mock()
+ subnet_id = 'some-id'
+ subnet = mock.MagicMock()
+ subnet.name = None
+
+ ctrlr = ipam_controller.InfobloxIPAMController(
+ manip, config_finder, extattr_manager=ea_manager)
+
+ ctrlr.update_subnet(context, subnet_id, subnet)
+
+ assert manip.update_network_options.called_once
+
+
+class AllocateIPTestCase(base.BaseTestCase):
+ def test_host_record_created_on_allocate_ip(self):
+ infoblox = mock.Mock()
+ member_config = mock.MagicMock()
+ ip_allocator = mock.Mock()
+ context = mock.Mock()
+
+ subnet = {'tenant_id': 'some-id', 'id': 'some-id'}
+ mac = 'aa:bb:cc:dd:ee:ff'
+ port = {'id': 'port_id',
+ 'mac_address': mac}
+ ip_dict = {'ip_address': '192.168.1.1',
+ 'subnet_id': 'fake-id'}
+ ip = {'ip_address': '192.168.1.1'}
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_config,
+ ip_allocator)
+ b.pattern_builder = mock.Mock()
+ b.ea_manager = mock.Mock()
+
+ b.allocate_ip(context, subnet, port, ip_dict)
+
+ ip_allocator.allocate_given_ip.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY, mock.ANY, mac, ip['ip_address'],
+ mock.ANY)
+
+ def test_host_record_from_range_created_on_allocate_ip(self):
+ infoblox = mock.Mock()
+ member_config = mock.MagicMock()
+ ip_allocator = mock.Mock()
+ context = mock.Mock()
+
+ first_ip = '192.168.1.1'
+ last_ip = '192.168.1.132'
+
+ subnet = {'allocation_pools': [{'first_ip': first_ip,
+ 'last_ip': last_ip}],
+ 'tenant_id': 'some-id',
+ 'id': 'some-id'}
+
+ mac = 'aa:bb:cc:dd:ee:ff'
+ port = {'mac_address': mac,
+ 'device_owner': 'owner'}
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_config,
+ ip_allocator)
+ b.pattern_builder = mock.Mock()
+ b.ea_manager = mock.Mock()
+ b.allocate_ip(context, subnet, port)
+
+ assert not ip_allocator.allocate_given_ip.called
+ ip_allocator.allocate_ip_from_range.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY, mock.ANY, mac, first_ip, last_ip,
+ mock.ANY)
+
+ def test_cannot_allocate_ip_raised_if_empty_range(self):
+ infoblox = mock.Mock()
+ member_config = mock.Mock()
+ context = mock.Mock()
+ ip_allocator = mock.Mock()
+
+ hostname = 'hostname'
+ subnet = {'allocation_pools': [],
+ 'tenant_id': 'some-id',
+ 'cidr': '192.168.0.0/24'}
+ mac = 'aa:bb:cc:dd:ee:ff'
+ host = {'name': hostname,
+ 'mac_address': mac,
+ 'device_owner': 'owner'}
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_config,
+ ip_allocator)
+ b.pattern_builder = mock.Mock()
+ b.ea_manager = mock.Mock()
+
+ assert not infoblox.create_host_record_range.called
+ assert not infoblox.create_host_record_ip.called
+
+ ip = b.allocate_ip(context, subnet, host)
+ self.assertIsNone(ip)
+
+
+class DeallocateIPTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(DeallocateIPTestCase, self).setUp()
+
+ self.infoblox = mock.Mock()
+
+ cfg = mock.Mock()
+ cfg.dhcp_members = ['172.25.1.1']
+
+ config_finder = mock.MagicMock()
+ config_finder.find_config_for_subnet = mock.Mock(return_value=cfg)
+
+ context = mock.MagicMock()
+ self.ip_allocator = mock.Mock()
+
+ hostname = 'hostname'
+ self.ip = '192.168.0.1'
+ subnet = {'tenant_id': 'some-id',
+ 'network_id': 'some-id',
+ 'id': 'some-id'}
+ mac = 'aa:bb:cc:dd:ee:ff'
+ host = {'name': hostname,
+ 'mac_address': mac}
+
+ b = ipam_controller.InfobloxIPAMController(self.infoblox,
+ config_finder,
+ self.ip_allocator)
+ b.deallocate_ip(context, subnet, host, self.ip)
+
+ def test_ip_is_deallocated(self):
+ self.ip_allocator.deallocate_ip.assert_called_once_with(
+ mock.ANY, mock.ANY, self.ip)
+
+ def test_dns_and_dhcp_services_restarted(self):
+ assert self.infoblox.restart_all_services.called_once
+
+
+class NetOptionsMatcher(object):
+ def __init__(self, expected_ip):
+ self.expected_ip = expected_ip
+
+ def __eq__(self, actual_net):
+ return self.expected_ip in actual_net.dns_nameservers
+
+ def __repr__(self):
+ return "{}".format(self.expected_ip)
+
+
+class DnsNameserversTestCase(base.BaseTestCase):
+ def test_network_is_updated_with_new_ip(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_config = mock.Mock()
+ context = mock.MagicMock()
+
+ expected_ip = '192.168.1.1'
+ cidr = '192.168.1.0/24'
+ port = {'fixed_ips': [{'subnet_id': 'some-id',
+ 'ip_address': expected_ip}]}
+ subnet = {'cidr': cidr,
+ 'tenant_id': 'some-id'}
+
+ network = objects.Network()
+ network.members = ['member1']
+ network.member_ip_addrs = ['192.168.1.2']
+ network.dns_nameservers = [expected_ip]
+
+ infoblox.get_network.return_value = network
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_config,
+ ip_allocator)
+ b._get_subnet = mock.Mock()
+ b._get_subnet.return_value = subnet
+
+ b.set_dns_nameservers(context, port)
+
+ matcher = NetOptionsMatcher(expected_ip)
+ infoblox.update_network_options.assert_called_once_with(matcher)
+
+ def test_network_is_not_updated_if_network_has_no_members(self):
+ infoblox = mock.Mock()
+ member_config = mock.Mock()
+ ip_allocator = mock.Mock()
+ context = mock.MagicMock()
+
+ expected_ip = '192.168.1.1'
+ cidr = '192.168.1.0/24'
+ port = {'fixed_ips': [{'subnet_id': 'some-id',
+ 'ip_address': expected_ip}]}
+ subnet = {'cidr': cidr,
+ 'tenant_id': 'some-id'}
+
+ infoblox.get_network.return_value = objects.Network()
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_config,
+ ip_allocator)
+ b._get_subnet = mock.Mock()
+ b._get_subnet.return_value = subnet
+
+ b.set_dns_nameservers(context, port)
+
+ assert not infoblox.update_network_options.called
+
+ def test_network_is_not_updated_if_network_has_no_dns_members(self):
+ infoblox = mock.Mock()
+ member_config = mock.Mock()
+ ip_allocator = mock.Mock()
+ context = mock.MagicMock()
+
+ expected_ip = '192.168.1.1'
+ cidr = '192.168.1.0/24'
+ port = {'fixed_ips': [{'subnet_id': 'some-id',
+ 'ip_address': expected_ip}]}
+ subnet = {'cidr': cidr,
+ 'tenant_id': 'some-id'}
+ network = objects.Network()
+ network.members = ['member1']
+
+ infoblox.get_network.return_value = network
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_config,
+ ip_allocator)
+ b._get_subnet = mock.Mock()
+ b._get_subnet.return_value = subnet
+
+ b.set_dns_nameservers(context, port)
+
+ assert not infoblox.update_network_options.called
+
+
+class DeleteSubnetTestCase(base.BaseTestCase):
+ @mock.patch.object(infoblox_db, 'is_network_external',
+ mock.Mock())
+ def test_ib_network_deleted(self):
+ infoblox = mock.Mock()
+ member_conf = mock.Mock()
+ config = mock.Mock()
+ config.dhcp_members = ['member1.com']
+ config.dns_members = ['member1.com']
+ config.is_global_config = False
+ member_conf.find_config_for_subnet = mock.Mock(return_value=config)
+ ip_allocator = mock.Mock()
+ context = mock.MagicMock()
+
+ cidr = '192.168.0.0/24'
+ subnet = mock.MagicMock()
+ subnet.__getitem__ = mock.Mock(return_value=cidr)
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator)
+ b._get_network = mock.Mock()
+ b._get_network.return_value = {
+ 'network_id': 'some-net-id',
+ 'shared': False
+ }
+ infoblox_db.is_network_external.return_value = False
+
+ b.delete_subnet(context, subnet)
+
+ infoblox.delete_network.assert_called_once_with(mock.ANY, cidr=cidr)
+
+ def test_member_released(self):
+ infoblox = mock.Mock()
+ member_finder = mock.Mock()
+ config = mock.Mock()
+ config.dhcp_members = ['member1.com']
+ config.dns_members = ['member1.com']
+ member_finder.find_config_for_subnet = mock.Mock(return_value=config)
+ ip_allocator = mock.Mock()
+ context = mock.MagicMock()
+
+ subnet = mock.MagicMock()
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_finder,
+ ip_allocator)
+ b.delete_subnet(context, subnet)
+
+ assert member_finder.member_manager.release_member.called_once
+
+ def test_preconfigured_dns_view_gets_deleted(self):
+ dns_view = "fake dns view"
+ infoblox = mock.Mock()
+ infoblox.has_dns_zones = mock.Mock(return_value=False)
+ ip_allocator = mock.Mock()
+ config = mock.Mock()
+ config.dhcp_members = ['member1.com']
+ config.dns_members = ['member1.com']
+ config._dns_view = dns_view
+ config_finder = mock.Mock()
+ config_finder.find_config_for_subnet = mock.Mock(return_value=config)
+ context = mock.Mock()
+ subnet = mock.MagicMock()
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ config_finder,
+ ip_allocator)
+
+ b.get_subnets_by_network = mock.MagicMock()
+ b.delete_subnet(context, subnet)
+
+ infoblox.delete_dns_view.assert_called_once_with(dns_view)
+
+ def test_network_view_deleted(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_conf = mock.Mock()
+ config = mock.Mock()
+ config.dhcp_members = ['member1.com']
+ config.dns_members = ['member1.com']
+ member_conf.find_config_for_subnet = mock.Mock(return_value=config)
+ context = mock.Mock()
+ network = mock.MagicMock()
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator)
+
+ b.get_subnets_by_network = mock.MagicMock()
+ b.delete_subnet(context, network)
+
+ assert infoblox.delete_network_view.called_once
+
+
+class CreateSubnetFlowTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(CreateSubnetFlowTestCase, self).setUp()
+
+ self.infoblox = mock.Mock()
+ member_conf = mock.MagicMock()
+ ip_allocator = mock.Mock()
+ self.expected_exception = Exception
+ self.context = infoblox_ipam.FlowContext(mock.MagicMock(),
+ 'create-subnet')
+ self.subnet = {'cidr': '192.168.0.0/24',
+ 'tenant_id': 'some-id',
+ 'network_id': 'some-id',
+ 'gateway_ip': '192.168.1.1',
+ 'allocation_pools': [{'start': 'start',
+ 'end': 'end'}],
+ 'ip_version': 'ipv4',
+ 'name': 'some-name',
+ 'enable_dhcp': True}
+
+ self.infoblox.create_ip_range.side_effect = Exception()
+ self.infoblox.network_exists.return_value = False
+
+ self.b = ipam_controller.InfobloxIPAMController(self.infoblox,
+ member_conf,
+ ip_allocator)
+ self.b.pattern_builder = mock.Mock()
+ self.b.ea_manager = mock.Mock()
+
+ def test_flow_is_reverted_in_case_of_error(self):
+ self.infoblox.has_networks.return_value = False
+ self.b.create_subnet(self.context, self.subnet)
+ self.assertRaises(self.expected_exception, taskflow.engines.run,
+ self.context.parent_flow, store=self.context.store)
+
+ assert self.infoblox.delete_network.called
+ assert not self.infoblox.delete_dns_view.called
+ assert self.infoblox.delete_network_view.called
+
+ def test_network_view_is_not_deleted_if_has_networks(self):
+ self.infoblox.has_networks.return_value = True
+ self.b.create_subnet(self.context, self.subnet)
+
+ self.assertRaises(self.expected_exception, taskflow.engines.run,
+ self.context.parent_flow, store=self.context.store)
+
+ assert self.infoblox.delete_network.called
+ assert not self.infoblox.delete_dns_view.called
+ assert not self.infoblox.delete_network_view.called
+
+
+class CreateSubnetFlowNiosNetExistsTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(CreateSubnetFlowNiosNetExistsTestCase, self).setUp()
+
+ self.infoblox = mock.Mock()
+ member_conf = mock.MagicMock()
+ ip_allocator = mock.Mock()
+ self.context = infoblox_ipam.FlowContext(mock.MagicMock(),
+ 'create-subnet')
+ self.subnet = mock.MagicMock()
+ self.subnet.__getitem__.side_effect = mock.MagicMock()
+
+ self.infoblox.network_exists.return_value = True
+
+ self.b = ipam_controller.InfobloxIPAMController(self.infoblox,
+ member_conf,
+ ip_allocator)
+
+ def test_nios_network_is_updated_for_shared_os_network(self):
+ self.b.ea_manager.get_extattrs_for_network = mock.Mock(
+ return_value={
+ 'Is External': {
+ 'value': 'False'},
+ 'Is Shared': {
+ 'value': 'True'}
+ })
+
+ self.b.create_subnet(self.context, self.subnet)
+
+ taskflow.engines.run(self.context.parent_flow,
+ store=self.context.store)
+ assert self.infoblox.update_network_options.called_once
+
+ def test_nios_network_is_updated_for_shared_external_os_network(self):
+ self.b.ea_manager.get_extattrs_for_network = mock.Mock(
+ return_value={
+ 'Is External': {
+ 'value': 'True'},
+ 'Is Shared': {
+ 'value': 'True'}
+ })
+
+ self.b.create_subnet(self.context, self.subnet)
+
+ taskflow.engines.run(self.context.parent_flow,
+ store=self.context.store)
+ assert self.infoblox.update_network_options.called_once
+
+ def test_nios_network_is_updated_for_external_os_network(self):
+ self.b.ea_manager.get_extattrs_for_network = mock.Mock(
+ return_value={
+ 'Is External': {
+ 'value': 'True'},
+ 'Is Shared': {
+ 'value': 'False'}
+ })
+
+ self.b.create_subnet(self.context, self.subnet)
+
+ taskflow.engines.run(self.context.parent_flow,
+ store=self.context.store)
+ assert self.infoblox.update_network_options.called_once
+
+ def test_exception_is_raised_if_network_is_private(self):
+ self.b.ea_manager.get_extattrs_for_network = mock.Mock(
+ return_value={
+ 'Is External': {
+ 'value': 'False'},
+ 'Is Shared': {
+ 'value': 'False'}
+ })
+
+ self.b.create_subnet(self.context, self.subnet)
+
+ self.assertRaises(
+ ib_exceptions.InfobloxInternalPrivateSubnetAlreadyExist,
+ taskflow.engines.run,
+ self.context.parent_flow,
+ store=self.context.store
+ )
+
+
+class DeleteNetworkTestCase(base.BaseTestCase):
+ def test_deletes_all_subnets(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_conf = mock.Mock()
+ context = mock.Mock()
+ db_manager = mock.Mock()
+ network = {'id': 'some-id'}
+ num_subnets = 5
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator,
+ db_mgr=db_manager)
+
+ b.delete_subnet = mock.Mock()
+ db_manager.get_subnets_by_network = mock.Mock()
+ db_manager.get_subnets_by_network.return_value = [
+ mock.Mock() for _ in xrange(num_subnets)]
+
+ b.delete_network(context, network)
+
+ assert b.delete_subnet.called
+ assert b.delete_subnet.call_count == num_subnets
+
+ def test_deletes_network_view(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_conf = mock.Mock()
+ context = mock.MagicMock()
+ network_id = 'some-id'
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator)
+
+ b.delete_network(context, network_id)
+
+ assert infoblox.delete_network_view.called_once
+
+ def test_deletes_management_ip(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_conf = mock.Mock()
+ context = mock.Mock()
+ network = mock.MagicMock()
+ ib_db = mock.Mock()
+ ea_manager = mock.Mock()
+ db_manager = mock.Mock()
+
+ ib_db.is_network_external.return_value = False
+
+ net_view_name = 'expected_network_view'
+ cidr = 'expected_cidr'
+
+ self.config(dhcp_relay_management_network_view=net_view_name,
+ dhcp_relay_management_network=cidr)
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator,
+ ea_manager,
+ ib_db,
+ db_mgr=db_manager)
+
+ b.delete_subnet = mock.Mock()
+ db_manager.get_subnets_by_network = mock.MagicMock()
+
+ b.delete_network(context, network)
+ infoblox.delete_object_by_ref.assert_called_once_with(mock.ANY)
+
+ def test_deletes_management_ip_from_db(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_conf = mock.Mock()
+ context = mock.Mock()
+ expected_net_id = 'some-net-id'
+ ib_db = mock.Mock()
+ db_manager = mock.Mock()
+
+ ib_db.is_network_external.return_value = False
+ net_view_name = 'expected_network_view'
+ cidr = 'expected_cidr'
+
+ self.config(dhcp_relay_management_network_view=net_view_name,
+ dhcp_relay_management_network=cidr)
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator,
+ ib_db=ib_db,
+ db_mgr=db_manager)
+
+ b.delete_subnet = mock.Mock()
+ db_manager.get_subnets_by_network = mock.MagicMock()
+
+ b.delete_network(context, expected_net_id)
+ ib_db.delete_management_ip.assert_called_once_with(
+ context, expected_net_id)
+
+ def test_does_not_delete_management_network_ip_for_external_net(self):
+ infoblox = mock.Mock()
+ ip_allocator = mock.Mock()
+ member_conf = mock.Mock()
+ context = mock.MagicMock()
+ network_id = mock.MagicMock()
+ ib_db = mock.Mock()
+ ea_manager = mock.Mock()
+
+ ib_db.is_network_external.return_value = True
+
+ net_view_name = 'expected_network_view'
+ cidr = 'expected_cidr'
+
+ self.config(dhcp_relay_management_network_view=net_view_name,
+ dhcp_relay_management_network=cidr)
+
+ b = ipam_controller.InfobloxIPAMController(infoblox,
+ member_conf,
+ ip_allocator,
+ ea_manager,
+ ib_db)
+ b.delete_network(context, network_id)
+
+ assert infoblox.delete_object_by_ref.called
+ assert ib_db.delete_management_ip.called
+
+
+class CreateNetworkTestCase(base.BaseTestCase):
+ def test_creates_fixed_address_object_in_management_network(self):
+ infoblox = mock.Mock()
+ config_finder = mock.Mock()
+ ip_allocator = mock.Mock()
+ ea_manager = mock.Mock()
+ context = mock.Mock()
+ network = mock.MagicMock()
+ network.get.return_value = False
+ network.__getitem__.side_effect = mock.Mock()
+
+ expected_net_view = 'expected_net_view_name'
+ cidr = '1.0.0.0/24'
+ expected_mac = '00:00:00:00:00:00'
+ self.config(dhcp_relay_management_network_view=expected_net_view,
+ dhcp_relay_management_network=cidr)
+
+ c = ipam_controller.InfobloxIPAMController(infoblox, config_finder,
+ ip_allocator, ea_manager)
+
+ c.create_network(context, network)
+ infoblox.create_fixed_address_from_cidr.assert_called_once_with(
+ expected_net_view, expected_mac, cidr, mock.ANY)
+
+ def test_stores_fixed_address_object_in_db(self):
+ infoblox = mock.Mock()
+ config_finder = mock.Mock()
+ ip_allocator = mock.Mock()
+ ea_manager = mock.Mock()
+ context = mock.Mock()
+ ib_db = mock.Mock()
+ expected_net_id = 'some-net-id'
+
+ network = mock.MagicMock()
+ network.get.return_value = False
+ network.__getitem__.side_effect = \
+ lambda key: expected_net_id if key == 'id' else None
+
+ expected_net_view = 'expected_net_view_name'
+ cidr = '1.0.0.0/24'
+ self.config(dhcp_relay_management_network_view=expected_net_view,
+ dhcp_relay_management_network=cidr)
+
+ c = ipam_controller.InfobloxIPAMController(
+ infoblox, config_finder, ip_allocator, ea_manager, ib_db)
+
+ c.create_network(context, network)
+
+ ib_db.add_management_ip.assert_called_once_with(
+ context, expected_net_id, mock.ANY)
+
+ def test_does_nothing_if_mgmt_net_is_not_set_in_config(self):
+ infoblox = mock.Mock()
+ config_finder = mock.Mock()
+ ip_allocator = mock.Mock()
+ ea_manager = mock.Mock()
+ context = mock.Mock()
+ ib_db = mock.Mock()
+ network = mock.Mock()
+ network.get.return_value = False
+
+ c = ipam_controller.InfobloxIPAMController(
+ infoblox, config_finder, ip_allocator, ea_manager, ib_db)
+
+ c.create_network(context, network)
+
+ assert not infoblox.create_fixed_address_from_cidr.called
+ assert not ib_db.add_management_ip.called
+
+ def test_does_nothing_for_external_net(self):
+ infoblox = mock.Mock()
+ config_finder = mock.Mock()
+ ip_allocator = mock.Mock()
+ ea_manager = mock.Mock()
+ context = mock.Mock()
+ ib_db = mock.Mock()
+ network = mock.Mock()
+
+ c = ipam_controller.InfobloxIPAMController(
+ infoblox, config_finder, ip_allocator, ea_manager, ib_db)
+
+ try:
+ c.create_network(context, network)
+ except neutron_exc.InvalidConfigurationOption as e:
+ self.fail('Unexpected exception: {}'.format(e))
+
+ assert not infoblox.create_fixed_address_from_cidr.called
+ assert not ib_db.add_management_ip.called
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_object_manipulator.py b/neutron/tests/unit/ipam/drivers/infoblox/test_object_manipulator.py
new file mode 100755
index 0000000..844e8fc
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_object_manipulator.py
@@ -0,0 +1,514 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.ipam.drivers.infoblox import object_manipulator as om
+from neutron.ipam.drivers.infoblox import objects
+from neutron.tests import base
+
+
+class PayloadMatcher(object):
+ ANYKEY = 'MATCH_ANY_KEY'
+
+ def __init__(self, expected_values):
+ self.args = expected_values
+
+ def __eq__(self, actual):
+ expected = []
+
+ for key, expected_value in self.args.iteritems():
+ expected.append(self._verify_value_is_expected(actual, key,
+ expected_value))
+
+ return all(expected)
+
+ def __repr__(self):
+ return "Expected args: %s" % self.args
+
+ def _verify_value_is_expected(self, d, key, expected_value):
+ found = False
+ if not isinstance(d, dict):
+ return False
+
+ for k in d:
+ if isinstance(d[k], dict):
+ found = self._verify_value_is_expected(d[k], key,
+ expected_value)
+ if isinstance(d[k], list):
+ if k == key and d[k] == expected_value:
+ return True
+ for el in d[k]:
+ found = self._verify_value_is_expected(el, key,
+ expected_value)
+
+ if found:
+ return True
+ if (key == k or key == self.ANYKEY) and d[k] == expected_value:
+ return True
+ return found
+
+
+class ObjectManipulatorTestCase(base.BaseTestCase):
+ def test_create_net_view_creates_network_view_object(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+ connector.create_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ net_view_name = 'test_net_view_name'
+ ibom.create_network_view(net_view_name, mock.ANY)
+
+ matcher = PayloadMatcher({'name': net_view_name})
+ connector.get_object.assert_called_once_with(
+ 'networkview', matcher, None, proxy=False)
+ connector.create_object.assert_called_once_with(
+ 'networkview', matcher, mock.ANY)
+
+ def test_create_host_record_creates_host_record_object(self):
+ dns_view_name = 'test_dns_view_name'
+ zone_auth = 'test.dns.zone.com'
+ hostname = 'test_hostname'
+ ip = '192.168.0.1'
+ mac = 'aa:bb:cc:dd:ee:ff'
+
+ sample_host_record = objects.HostRecordIPv4()
+ sample_host_record.hostname = hostname
+ sample_host_record.zone_auth = zone_auth
+ sample_host_record.ip = ip
+
+ connector = mock.Mock()
+ connector.create_object.return_value = sample_host_record.to_dict()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.create_host_record_for_given_ip(dns_view_name, zone_auth,
+ hostname, mac, ip, mock.ANY)
+
+ exp_payload = {
+ 'name': 'test_hostname.test.dns.zone.com',
+ 'view': dns_view_name,
+ 'ipv4addrs': [
+ {'mac': mac, 'configure_for_dhcp': True, 'ipv4addr': ip}
+ ],
+ 'extattrs': {}
+ }
+
+ connector.create_object.assert_called_once_with(
+ 'record:host', exp_payload, ['ipv4addrs', 'extattrs'])
+
+ def test_create_host_record_range_create_host_record_object(self):
+ dns_view_name = 'test_dns_view_name'
+ zone_auth = 'test.dns.zone.com'
+ hostname = 'test_hostname'
+ mac = 'aa:bb:cc:dd:ee:ff'
+ net_view_name = 'test_net_view_name'
+ first_ip = '192.168.0.1'
+ last_ip = '192.168.0.254'
+
+ sample_host_record = objects.HostRecordIPv4()
+ sample_host_record.hostname = hostname
+ sample_host_record.zone_auth = zone_auth
+ sample_host_record.ip = first_ip
+ sample_host_record.view = dns_view_name
+
+ connector = mock.Mock()
+ connector.create_object.return_value = sample_host_record.to_dict()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.create_host_record_from_range(
+ dns_view_name, net_view_name, zone_auth, hostname, mac, first_ip,
+ last_ip, mock.ANY)
+
+ next_ip = \
+ 'func:nextavailableip:192.168.0.1-192.168.0.254,test_net_view_name'
+ exp_payload = {
+ 'name': 'test_hostname.test.dns.zone.com',
+ 'view': dns_view_name,
+ 'ipv4addrs': [
+ {'mac': mac, 'configure_for_dhcp': True, 'ipv4addr': next_ip}
+ ],
+ 'extattrs': mock.ANY
+ }
+
+ connector.create_object.assert_called_once_with(
+ 'record:host', exp_payload, ['ipv4addrs', 'extattrs'])
+
+ def test_delete_host_record_deletes_host_record_object(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ dns_view_name = 'test_dns_view_name'
+ ip_address = '192.168.0.254'
+
+ ibom.delete_host_record(dns_view_name, ip_address)
+
+ matcher = PayloadMatcher({'view': dns_view_name,
+ PayloadMatcher.ANYKEY: ip_address})
+ connector.get_object.assert_called_once_with(
+ 'record:host', matcher, None, proxy=False)
+ connector.delete_object.assert_called_once_with(mock.ANY)
+
+ def test_get_network_gets_network_object(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ net_view_name = 'test_dns_view_name'
+ cidr = '192.168.0.0/24'
+
+ ibom.get_network(net_view_name, cidr)
+
+ matcher = PayloadMatcher({'network_view': net_view_name,
+ 'network': cidr})
+ connector.get_object.assert_called_once_with('network',
+ matcher,
+ mock.ANY,
+ proxy=False)
+
+ def test_throws_network_not_available_on_get_network(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ net_view_name = 'test_dns_view_name'
+ cidr = '192.168.0.0/24'
+
+ self.assertRaises(exceptions.InfobloxNetworkNotAvailable,
+ ibom.get_network, net_view_name, cidr)
+
+ matcher = PayloadMatcher({'network_view': net_view_name,
+ 'network': cidr})
+ connector.get_object.assert_called_once_with('network',
+ matcher,
+ mock.ANY,
+ proxy=False)
+
+ def test_object_is_not_created_if_already_exists(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ net_view_name = 'test_dns_view_name'
+
+ ibom.create_network_view(net_view_name, mock.ANY)
+
+ matcher = PayloadMatcher({'name': net_view_name})
+ connector.get_object.assert_called_once_with(
+ 'networkview', matcher, None, proxy=False)
+ assert not connector.create_object.called
+
+ def test_get_member_gets_member_object(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ member = objects.Member(name='member1', ip='some-ip')
+
+ ibom.get_member(member)
+
+ matcher = PayloadMatcher({'host_name': member.name})
+ connector.get_object.assert_called_once_with('member', matcher)
+
+ def test_restart_services_calls_infoblox_function(self):
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ member = objects.Member(name='member1', ip='some-ip')
+
+ ibom.restart_all_services(member)
+
+ connector.call_func.assert_called_once_with(
+ 'restartservices', mock.ANY, mock.ANY)
+
+ def test_update_network_updates_object(self):
+ ref = 'infoblox_object_id'
+ opts = 'infoblox_options'
+
+ connector = mock.Mock()
+ ib_network = mock.Mock()
+ ib_network.ref = ref
+ ib_network.options = opts
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.update_network_options(ib_network)
+
+ connector.update_object.assert_called_once_with(ref, {'options': opts},
+ mock.ANY)
+
+ def test_update_network_updates_eas_if_not_null(self):
+ ref = 'infoblox_object_id'
+ opts = 'infoblox_options'
+ eas = 'some-eas'
+
+ connector = mock.Mock()
+ ib_network = mock.Mock()
+ ib_network.ref = ref
+ ib_network.options = opts
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.update_network_options(ib_network, eas)
+
+ connector.update_object.assert_called_once_with(
+ ref, {'options': opts, 'extattrs': eas}, mock.ANY)
+
+ def test_member_is_assigned_as_list_on_network_create(self):
+ net_view = 'net-view-name'
+ cidr = '192.168.1.0/24'
+ nameservers = []
+ members = [
+ objects.Member(name='just-a-single-member-ip', ip='some-ip')
+ ]
+ gateway_ip = '192.168.1.1'
+ expected_members = members[0].ip
+ extattrs = mock.Mock()
+
+ connector = mock.Mock()
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.create_network(net_view, cidr, nameservers, members, gateway_ip,
+ extattrs)
+
+ assert not connector.get_object.called
+ matcher = PayloadMatcher({'ipv4addr': expected_members})
+ connector.create_object.assert_called_once_with('network', matcher,
+ None)
+
+ def test_create_ip_range_creates_range_object(self):
+ net_view = 'net-view-name'
+ start_ip = '192.168.1.1'
+ end_ip = '192.168.1.123'
+ disable = False
+
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+ ibom.create_ip_range(net_view, start_ip, end_ip, None,
+ disable, mock.ANY)
+
+ assert connector.get_object.called
+ matcher = PayloadMatcher({'start_addr': start_ip,
+ 'end_addr': end_ip,
+ 'network_view': net_view,
+ 'disable': disable})
+ connector.create_object.assert_called_once_with('range', matcher,
+ mock.ANY)
+
+ def test_delete_ip_range_deletes_infoblox_object(self):
+ net_view = 'net-view-name'
+ start_ip = '192.168.1.1'
+ end_ip = '192.168.1.123'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.delete_ip_range(net_view, start_ip, end_ip)
+
+ matcher = PayloadMatcher({'start_addr': start_ip,
+ 'end_addr': end_ip,
+ 'network_view': net_view})
+ connector.get_object.assert_called_once_with('range', matcher,
+ None, proxy=False)
+ connector.delete_object.assert_called_once_with(mock.ANY)
+
+ def test_delete_network_deletes_infoblox_network(self):
+ net_view = 'net-view-name'
+ cidr = '192.168.1.0/24'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.delete_network(net_view, cidr)
+
+ matcher = PayloadMatcher({'network_view': net_view,
+ 'network': cidr})
+ connector.get_object.assert_called_once_with('network', matcher,
+ None, proxy=False)
+ connector.delete_object.assert_called_once_with(mock.ANY)
+
+ def test_delete_network_view_deletes_infoblox_object(self):
+ net_view = 'net-view-name'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.delete_network_view(net_view)
+
+ matcher = PayloadMatcher({'name': net_view})
+ connector.get_object.assert_called_once_with(
+ 'networkview', matcher, None, proxy=False)
+ connector.delete_object.assert_called_once_with(mock.ANY)
+
+ def test_bind_names_updates_host_record(self):
+ dns_view_name = 'dns-view-name'
+ fqdn = 'host.global.com'
+ ip = '192.168.1.1'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = mock.MagicMock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.bind_name_with_host_record(dns_view_name, ip,
+ fqdn, mock.ANY)
+
+ matcher = PayloadMatcher({'view': dns_view_name,
+ PayloadMatcher.ANYKEY: ip})
+ connector.get_object.assert_called_once_with('record:host',
+ matcher,
+ None,
+ proxy=False)
+
+ matcher = PayloadMatcher({'name': fqdn})
+ connector.update_object.assert_called_once_with(mock.ANY,
+ matcher,
+ mock.ANY)
+
+ def test_create_dns_zone_creates_zone_auth_object(self):
+ dns_view_name = 'dns-view-name'
+ fqdn = 'host.global.com'
+ member = objects.Member(name='member_name', ip='some-ip')
+ zone_format = 'IPV4'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.create_dns_zone(dns_view_name, fqdn, member,
+ zone_format=zone_format)
+
+ matcher = PayloadMatcher({'view': dns_view_name,
+ 'fqdn': fqdn})
+ connector.get_object.assert_called_once_with('zone_auth',
+ matcher,
+ None,
+ proxy=False)
+
+ matcher = PayloadMatcher({'view': dns_view_name,
+ 'fqdn': fqdn,
+ 'zone_format': zone_format,
+ 'name': member.name})
+ connector.create_object.assert_called_once_with('zone_auth',
+ matcher,
+ None)
+
+ def test_create_dns_zone_with_grid_secondaries(self):
+ dns_view_name = 'dns-view-name'
+ fqdn = 'host.global.com'
+ primary_dns_member = objects.Member(name='member_primary',
+ ip='some-ip')
+ secondary_dns_members = [objects.Member(name='member_secondary',
+ ip='some-ip')]
+ zone_format = 'IPV4'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.create_dns_zone(dns_view_name, fqdn, primary_dns_member,
+ secondary_dns_members,
+ zone_format=zone_format)
+
+ matcher = PayloadMatcher({'view': dns_view_name,
+ 'fqdn': fqdn})
+ connector.get_object.assert_called_once_with('zone_auth',
+ matcher,
+ None,
+ proxy=False)
+
+ payload = {'view': dns_view_name,
+ 'fqdn': fqdn,
+ 'zone_format': zone_format,
+ 'grid_primary': [{'name': primary_dns_member.name,
+ '_struct': 'memberserver'}],
+ 'grid_secondaries': [{'name': member.name,
+ '_struct': 'memberserver'}
+ for member in secondary_dns_members],
+ 'extattrs': {}
+ }
+ connector.create_object.assert_called_once_with('zone_auth',
+ payload,
+ None)
+
+ def test_create_host_record_throws_exception_on_error(self):
+ dns_view_name = 'dns-view-name'
+ hostname = 'host.global.com'
+ mac = 'aa:bb:cc:dd:ee:ff'
+ ip = '192.168.1.1'
+ zone_auth = 'my.auth.zone.com'
+
+ connector = mock.Mock()
+ response = {'text': "Cannot find 1 available IP"}
+
+ connector.create_object.side_effect = \
+ exceptions.InfobloxCannotCreateObject(response=response,
+ objtype='host:record',
+ content='adsfasd',
+ code='1234')
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ self.assertRaises(exceptions.InfobloxCannotAllocateIp,
+ ibom.create_host_record_for_given_ip,
+ dns_view_name, zone_auth, hostname, mac, ip,
+ mock.ANY)
+
+ def test_create_dns_view_creates_view_object(self):
+ net_view_name = 'net-view-name'
+ dns_view_name = 'dns-view-name'
+
+ connector = mock.Mock()
+ connector.get_object.return_value = None
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.create_dns_view(net_view_name, dns_view_name)
+
+ matcher = PayloadMatcher({'name': dns_view_name,
+ 'network_view': net_view_name})
+ connector.get_object.assert_called_once_with('view', matcher,
+ None, proxy=False)
+ connector.create_object.assert_called_once_with('view', matcher,
+ None)
+
+ def test_default_net_view_is_never_deleted(self):
+ connector = mock.Mock()
+
+ ibom = om.InfobloxObjectManipulator(connector)
+
+ ibom.delete_network_view('default')
+
+ assert not connector.delete_object.called
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_objects.py b/neutron/tests/unit/ipam/drivers/infoblox/test_objects.py
new file mode 100755
index 0000000..c8531ad
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_objects.py
@@ -0,0 +1,353 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from testtools import matchers
+
+from neutron.ipam.drivers.infoblox import objects
+from neutron.openstack.common import jsonutils
+from neutron.tests import base
+
+
+class InfobloxNetworkObjectTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(InfobloxNetworkObjectTestCase, self).setUp()
+ self.network_object_dict = jsonutils.loads("""[
+ {
+ "_ref": "network/ZG5zLldHdvcmskMTAuMzkuMTE:10.39.11.0/24/default",
+ "members": [
+ {
+ "_struct": "dhcpmember",
+ "ipv4addr": "10.39.11.123",
+ "name": "infoblox.localdomain"
+ }
+ ],
+ "options": [
+ {
+ "name": "dhcp-lease-time",
+ "num": 51,
+ "use_option": false,
+ "value": "43200",
+ "vendor_class": "DHCP"
+ },
+ {
+ "name": "domain-name-servers",
+ "num": 6,
+ "use_option": true,
+ "value": "10.39.11.123,10.39.11.124,10.39.11.125",
+ "vendor_class": "DHCP"
+ },
+ {
+ "name": "routers",
+ "num": 3,
+ "use_option": false,
+ "value": "10.39.11.1",
+ "vendor_class": "DHCP"
+ }
+ ]
+ }
+ ]""")[0]
+
+ def _get_nameservers_opt(self, network):
+ nameservers_opts = filter(lambda opt:
+ opt['name'] ==
+ objects.Network.DNS_NAMESERVERS_OPTION,
+ network['options'])
+ if nameservers_opts:
+ return nameservers_opts[0]
+ return None
+
+ def test_has_members(self):
+ net = objects.Network.from_dict(self.network_object_dict)
+ self.assertTrue(net.members)
+
+ def test_update_member_ip_modifies_member_ip(self):
+ net = objects.Network.from_dict(self.network_object_dict)
+ new_ip = '!!!NEW_IP!!!'
+ net.update_member_ip_in_dns_nameservers(new_ip)
+ self.assertIn(new_ip, net.dns_nameservers)
+
+ def test_get_dns_nameservers(self):
+ net = objects.Network.from_dict(self.network_object_dict)
+ servers = "10.39.11.123,10.39.11.124,10.39.11.125".split(',')
+ self.assertEqual(servers, net.dns_nameservers)
+
+ def test_get_dns_nameservers_no_option(self):
+ nameservers_opt = self._get_nameservers_opt(self.network_object_dict)
+ self.network_object_dict['options'].remove(nameservers_opt)
+ net = objects.Network.from_dict(self.network_object_dict)
+ self.assertEqual([], net.dns_nameservers)
+
+ def test_get_dns_nameservers_use_option_false(self):
+ nameservers_opt = self._get_nameservers_opt(self.network_object_dict)
+ nameservers_opt['use_option'] = False
+ net = objects.Network.from_dict(self.network_object_dict)
+ self.assertEqual([], net.dns_nameservers)
+
+ def test_set_dns_nameservers(self):
+ net = objects.Network.from_dict(self.network_object_dict)
+ net.dns_nameservers = ['1.1.1.1', '3.3.3.3']
+ net_dict = net.to_dict()
+ nameservers_opt = self._get_nameservers_opt(net_dict)
+ self.assertEqual('1.1.1.1,3.3.3.3', nameservers_opt['value'])
+ self.assertTrue(nameservers_opt['use_option'])
+
+ def test_set_dns_nameservers_empty_val(self):
+ net = objects.Network.from_dict(self.network_object_dict)
+ net.dns_nameservers = []
+ net_dict = net.to_dict()
+ nameservers_opt = self._get_nameservers_opt(net_dict)
+ self.assertFalse(nameservers_opt['use_option'])
+
+ def test_set_dns_nameservers_no_previous_option(self):
+ nameservers_opt = self._get_nameservers_opt(self.network_object_dict)
+ self.network_object_dict['options'].remove(nameservers_opt)
+ net = objects.Network.from_dict(self.network_object_dict)
+ net.dns_nameservers = ['7.7.7.7', '8.8.8.8']
+ net_dict = net.to_dict()
+ nameservers_opt = self._get_nameservers_opt(net_dict)
+ self.assertEqual('7.7.7.7,8.8.8.8', nameservers_opt['value'])
+ self.assertTrue(nameservers_opt['use_option'])
+
+ def test_set_dns_nameservers_no_previous_option_and_empty_val(self):
+ nameservers_opt = self._get_nameservers_opt(self.network_object_dict)
+ self.network_object_dict['options'].remove(nameservers_opt)
+ net = objects.Network.from_dict(self.network_object_dict)
+ net.dns_nameservers = []
+ net_dict = net.to_dict()
+ nameservers_opt = self._get_nameservers_opt(net_dict)
+ self.assertIsNone(nameservers_opt)
+
+
+class InfobloxIPv4ObjectTestCase(base.BaseTestCase):
+ def test_removes_correct_object_from_list_of_ips(self):
+ removed_ip = '192.168.1.2'
+ ips = [
+ objects.IPv4(ip='192.168.1.1'),
+ objects.IPv4(ip=removed_ip),
+ objects.IPv4(ip='192.168.1.3')
+ ]
+
+ ips.remove(removed_ip)
+
+ self.assertEqual(len(ips), 2)
+ for ip in ips:
+ self.assertTrue(ip.ip != removed_ip)
+
+
+class InfobloxIPv4HostRecordObjectTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(InfobloxIPv4HostRecordObjectTestCase, self).setUp()
+ host_record_ref = ("record:host/ZG5zLmhvc3QkLjY3OC5jb20uZ2xvYmFsLmNs"
+ "b3VkLnRlc3RzdWJuZXQudGVzdF9ob3N0X25hbWU:"
+ "test_host_name.testsubnet.cloud.global.com/"
+ "default.687401e9f7a7471abbf301febf99854e")
+ ipv4addrs_ref = ("record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuNjc"
+ "4LmNvbS5nbG9iYWwuY2xvdWQudGVzdHN1Ym5ldC50ZXN0X2h"
+ "vc3RfbmFtZS4xOTIuMTY4LjAuNS4:192.168.0.5/"
+ "test_host_name.testsubnet.cloud.global.com/"
+ "default.687401e9f7a7471abbf301febf99854e")
+ self.host_record = jsonutils.loads("""{
+ "_ref": "%s",
+ "ipv4addrs": [
+ {
+ "_ref": "%s",
+ "configure_for_dhcp": false,
+ "host": "test_host_name.testsubnet.cloud.global.com",
+ "ipv4addr": "192.168.0.5",
+ "mac": "aa:bb:cc:dd:ee:ff"
+ }
+ ]
+ }
+ """ % (host_record_ref, ipv4addrs_ref))
+
+ def test_constructs_object_from_dict(self):
+ host_record = objects.HostRecordIPv4.from_dict(self.host_record)
+ self.assertIsNotNone(host_record)
+
+ def test_hostname_is_set_from_dict(self):
+ expected_hostname = 'expected_hostname'
+ expected_dns_zone = 'expected.dns.zone.com'
+ self.host_record['ipv4addrs'][0]['host'] = '.'.join(
+ [expected_hostname, expected_dns_zone])
+ host_record = objects.HostRecordIPv4.from_dict(self.host_record)
+
+ self.assertEqual(expected_hostname, host_record.hostname)
+ self.assertEqual(expected_dns_zone, host_record.zone_auth)
+
+ def test_all_attributes_are_set_from_dict(self):
+ expected_attributes = ['hostname', 'dns_view', 'mac', 'ip']
+ hr = objects.HostRecordIPv4.from_dict(self.host_record)
+ self.assertTrue(all([expected in dir(hr)
+ for expected in expected_attributes]))
+
+
+class InfobloxIPv6HostRecordObjectTestCase(base.BaseTestCase):
+ def setUp(self):
+ super(InfobloxIPv6HostRecordObjectTestCase, self).setUp()
+ host_record_ref = ("record:host/ZG5zLmhvc3QkLjY3OC5jb20uZ2xvYmFsLmNs"
+ "b3VkLnRlc3RzdWJuZXQudGVzdF9ob3N0X25hbWU:"
+ "test_host_name.testsubnet.cloud.global.com/"
+ "default.687401e9f7a7471abbf301febf99854e")
+ ipv6addrs_ref = ("record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuNjc"
+ "4LmNvbS5nbG9iYWwuY2xvdWQudGVzdHN1Ym5ldC50ZXN0X2h"
+ "vc3RfbmFtZS4xOTIuMTY4LjAuNS4:2001:DB8::3/"
+ "test_host_name.testsubnet.cloud.global.com/"
+ "default.687401e9f7a7471abbf301febf99854e")
+ self.host_record = jsonutils.loads("""{
+ "_ref": "%s",
+ "ipv6addrs": [
+ {
+ "_ref": "%s",
+ "configure_for_dhcp": false,
+ "host": "test_host_name.testsubnet.cloud.global.com",
+ "ipv6addr": "2001:DB8::3",
+ "mac": "aa:bb:cc:dd:ee:ff"
+ }
+ ]
+ }
+ """ % (host_record_ref, ipv6addrs_ref))
+
+ def test_constructs_object_from_dict(self):
+ host_record = objects.HostRecordIPv6.from_dict(self.host_record)
+ self.assertIsNotNone(host_record)
+
+ def test_hostname_is_set_from_dict(self):
+ expected_hostname = 'expected_hostname'
+ expected_dns_zone = 'expected.dns.zone.com'
+ self.host_record['ipv6addrs'][0]['host'] = '.'.join(
+ [expected_hostname, expected_dns_zone])
+ host_record = objects.HostRecordIPv6.from_dict(self.host_record)
+
+ self.assertEqual(expected_hostname, host_record.hostname)
+ self.assertEqual(expected_dns_zone, host_record.zone_auth)
+
+ def test_all_attributes_are_set_from_dict(self):
+ expected_attributes = ['hostname', 'dns_view', 'mac', 'ip']
+ hr = objects.HostRecordIPv6.from_dict(self.host_record)
+ self.assertTrue(all([expected in dir(hr)
+ for expected in expected_attributes]))
+
+
+class FixedAddressIPv4TestCase(base.BaseTestCase):
+ def test_builds_valid_fa_from_infoblox_returned_json(self):
+ fixed_address_ref = ("fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLj"
+ "EwMC42ODAuLg:10.0.0.100/rv-test-netview")
+ ip = "10.0.0.100"
+ fixed_address = jsonutils.loads("""{
+ "_ref": "%s",
+ "ipv4addr": "%s"
+ }""" % (fixed_address_ref, ip))
+
+ fa = objects.FixedAddressIPv4.from_dict(fixed_address)
+ self.assertEqual(fa.ip, ip)
+
+ def test_dict_contains_mac_ip_and_net_view(self):
+ expected_ip = "1.2.3.4"
+ expected_mac = "aa:bb:cc:dd:ee:ff"
+ expected_net_view = "test-net-view-name"
+ expected_extattrs = "test-extattrs"
+
+ expected_dict = {
+ 'mac': expected_mac,
+ 'ipv4addr': expected_ip,
+ 'network_view': expected_net_view,
+ 'extattrs': expected_extattrs
+ }
+
+ fa = objects.FixedAddressIPv4()
+ fa.ip = expected_ip
+ fa.net_view = expected_net_view
+ fa.mac = expected_mac
+ fa.extattrs = expected_extattrs
+
+ self.assertThat(fa.to_dict(), matchers.KeysEqual(expected_dict))
+ self.assertThat(fa.to_dict(), matchers.Equals(expected_dict))
+
+
+class FixedAddressIPv6TestCase(base.BaseTestCase):
+ def test_builds_valid_fa_from_infoblox_returned_json(self):
+ fixed_address_ref = ("fixedaddress/ZG5zLmZpeGVkX2FkZHJlc3MkMTAuMC4wLj"
+ "EwMC42ODAuLg:10.0.0.100/rv-test-netview")
+ ip = "2001:DB8::3"
+ fixed_address = jsonutils.loads("""{
+ "_ref": "%s",
+ "ipv6addr": "%s"
+ }""" % (fixed_address_ref, ip))
+
+ fa = objects.FixedAddressIPv6.from_dict(fixed_address)
+ self.assertEqual(fa.ip, ip)
+
+ @mock.patch.object(objects, 'generate_duid',
+ mock.Mock(return_value=None))
+ def test_dict_contains_mac_ip_and_net_view(self):
+ expected_ip = "2001:DB8::3"
+ duid = "aa:bb:cc:dd:ee:ff"
+ expected_duid = "00:03:00:01:aa:bb:cc:dd:ee:ff"
+ expected_net_view = "test-net-view-name"
+ expected_extattrs = "test-extattrs"
+
+ objects.generate_duid.return_value = expected_duid
+
+ expected_dict = {
+ 'duid': expected_duid,
+ 'ipv6addr': expected_ip,
+ 'network_view': expected_net_view,
+ 'extattrs': expected_extattrs
+ }
+
+ fa = objects.FixedAddressIPv6()
+ fa.ip = expected_ip
+ fa.net_view = expected_net_view
+ fa.mac = duid
+ fa.extattrs = expected_extattrs
+
+ self.assertThat(fa.to_dict(), matchers.KeysEqual(expected_dict))
+ self.assertThat(fa.to_dict(), matchers.Equals(expected_dict))
+
+
+class MemberTestCase(base.BaseTestCase):
+ def test_two_identical_members_are_equal(self):
+ ip = 'some-ip'
+ name = 'some-name'
+
+ m1 = objects.Member(ip, name)
+ m2 = objects.Member(ip, name)
+
+ self.assertEqual(m1, m2)
+
+
+class IPAllocationObjectTestCase(base.BaseTestCase):
+ def test_next_available_ip_returns_properly_formatted_string(self):
+ net_view = 'expected_net_view_name'
+ first_ip = '1.2.3.4'
+ last_ip = '1.2.3.14'
+ cidr = '1.2.3.0/24'
+
+ naip = objects.IPAllocationObject.next_available_ip_from_range(
+ net_view, first_ip, last_ip)
+
+ self.assertTrue(isinstance(naip, basestring))
+ self.assertTrue(naip.startswith('func:nextavailableip:'))
+ self.assertTrue(naip.endswith(
+ '{first_ip}-{last_ip},{net_view}'.format(**locals())))
+
+ naip = objects.IPAllocationObject.next_available_ip_from_cidr(
+ net_view, cidr)
+
+ self.assertTrue(isinstance(naip, basestring))
+ self.assertTrue(naip.startswith('func:nextavailableip:'))
+ self.assertTrue(naip.endswith(
+ '{cidr},{net_view}'.format(**locals())))
diff --git a/neutron/tests/unit/ipam/drivers/infoblox/test_pattern_builder.py b/neutron/tests/unit/ipam/drivers/infoblox/test_pattern_builder.py
new file mode 100644
index 0000000..acae9ac
--- /dev/null
+++ b/neutron/tests/unit/ipam/drivers/infoblox/test_pattern_builder.py
@@ -0,0 +1,114 @@
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.ipam.drivers.infoblox import config
+from neutron.ipam.drivers.infoblox import exceptions
+from neutron.tests import base
+
+
+class PatternBuilderTestCase(base.BaseTestCase):
+ def test_dots_in_ip_address_replaced_with_dashes(self):
+ context = mock.MagicMock()
+ subnet = mock.MagicMock()
+ ip = '192.168.1.1'
+
+ res = config.PatternBuilder("{ip_address}").build(
+ context, subnet, ip_addr=ip)
+
+ self.assertEqual(res, ip.replace('.', '-'))
+
+ def test_raises_error_if_pattern_is_invalid(self):
+ context = mock.MagicMock()
+ subnet = mock.MagicMock()
+
+ pb = config.PatternBuilder("{}")
+ self.assertRaises(exceptions.InfobloxConfigException,
+ pb.build, context, subnet)
+
+ pb = config.PatternBuilder("start..end")
+ self.assertRaises(exceptions.InfobloxConfigException,
+ pb.build, context, subnet)
+
+ pb = config.PatternBuilder("{non-existing-variable}")
+ self.assertRaises(exceptions.InfobloxConfigException,
+ pb.build, context, subnet)
+
+ def test_subnet_id_used_if_subnet_has_no_name(self):
+ context = mock.MagicMock()
+ subnet = mock.MagicMock()
+ subnet_id = 'some-id'
+
+ def get_id(item):
+ if item == 'id':
+ return subnet_id
+ return None
+ subnet.__getitem__.side_effect = get_id
+
+ pb = config.PatternBuilder("{subnet_name}")
+ built = pb.build(context, subnet)
+
+ self.assertEqual(built, subnet_id)
+
+ def test_value_is_built_using_pattern(self):
+ context = mock.MagicMock()
+ subnet = mock.MagicMock()
+ subnet_name = 'subnet-name'
+ ip_address = 'ip_address'
+
+ def get_id(item):
+ if item == 'name':
+ return subnet_name
+ else:
+ return None
+ subnet.__getitem__.side_effect = get_id
+
+ pattern = "host-{ip_address}.{subnet_name}.custom_stuff"
+ pb = config.PatternBuilder(pattern)
+ built = pb.build(context, subnet, ip_addr=ip_address)
+
+ self.assertEqual(built, pattern.format(subnet_name=subnet_name,
+ ip_address=ip_address))
+
+ def test_all_required_pattern_variables_are_supported(self):
+ required_variables = [
+ 'tenant_id', 'instance_id', 'ip_address',
+ 'ip_address_octet1', 'ip_address_octet2', 'ip_address_octet3',
+ 'ip_address_octet4', 'subnet_id', 'subnet_name', 'user_id',
+ 'network_id', 'network_name'
+ ]
+
+ pattern = '.'.join(['{%s}' % v for v in required_variables])
+ context = mock.Mock()
+ context.user_id = 'user-id'
+ subnet = {
+ 'network_id': 'some-net-id',
+ 'tenant_id': 'some-tenant-id',
+ 'id': 'some-subnet-id',
+ 'name': 'some-subnet-name'
+ }
+ port = {
+ 'id': 'some-port-id',
+ 'device_id': 'some-device-id'
+ }
+ ip_addr = '10.0.0.3'
+
+ pb = config.PatternBuilder(pattern)
+
+ try:
+ pb.build(context, subnet, port, ip_addr)
+ except exceptions.InvalidPattern as e:
+ self.fail('Unexpected exception: {}'.format(e))
diff --git a/neutron/tests/unit/ipam/test_base.py b/neutron/tests/unit/ipam/test_base.py
new file mode 100644
index 0000000..24a7a8a
--- /dev/null
+++ b/neutron/tests/unit/ipam/test_base.py
@@ -0,0 +1,181 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2014 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.common import exceptions as q_exc
+from neutron.ipam.drivers import neutron_ipam
+from neutron.tests import base
+
+
+class SubnetCreateTestCase(base.BaseTestCase):
+ def test_subnet_is_created(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.MagicMock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ subnet = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ ipam_driver.create_subnet(context, subnet)
+
+ assert ipam_controller.create_subnet.called_once
+
+ def test_dhcp_is_configured(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.MagicMock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ subnet = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ ipam_driver.create_subnet(context, subnet)
+
+ assert dhcp_controller.configure_dhcp.called_once
+
+
+class SubnetDeleteTestCase(base.BaseTestCase):
+ def test_dns_zones_are_deleted(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.Mock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ subnet_id = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ db_manager.subnet_has_ports_allocated.return_value = False
+ db_manager.get_subnet_ports.return_value = []
+ ipam_driver.delete_subnet(context, subnet_id)
+
+ assert dns_controller.delete_dns_zones.called_once
+
+ def test_dhcp_gets_disabled(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.Mock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ subnet_id = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ db_manager.subnet_has_ports_allocated.return_value = False
+ db_manager.get_subnet_ports.return_value = []
+ ipam_driver.delete_subnet(context, subnet_id)
+
+ assert dhcp_controller.disable_dhcp.called_once
+
+ def test_subnet_is_deleted_from_ipam(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.Mock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ subnet_id = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ db_manager.subnet_has_ports_allocated.return_value = False
+ db_manager.get_subnet_ports.return_value = []
+ ipam_driver.delete_subnet(context, subnet_id)
+
+ assert ipam_controller.delete_subnet.called_once
+
+ def test_raises_error_if_subnet_has_active_ports(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.MagicMock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ subnet_id = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ db_manager.subnet_has_ports_allocated.return_value = True
+ db_manager.get_subnet_ports.return_value = [mock.MagicMock()]
+ self.assertRaises(q_exc.SubnetInUse, ipam_driver.delete_subnet,
+ context, subnet_id)
+
+
+class AllocateIPTestCase(base.BaseTestCase):
+ def test_allocates_ip(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.Mock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ host = mock.MagicMock()
+ ip = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ ipam_driver.allocate_ip(context, host, ip)
+
+ assert ipam_controller.allocate_ip.called_once
+
+ def test_binds_mac(self):
+ dns_controller = mock.Mock()
+ dhcp_controller = mock.Mock()
+ ipam_controller = mock.Mock()
+ db_manager = mock.Mock()
+ context = mock.MagicMock()
+ host = mock.MagicMock()
+ ip = mock.Mock()
+
+ ipam_driver = neutron_ipam.NeutronIPAM(
+ dhcp_controller=dhcp_controller,
+ dns_controller=dns_controller,
+ ipam_controller=ipam_controller,
+ db_mgr=db_manager)
+
+ ipam_driver.allocate_ip(context, host, ip)
+
+ assert dhcp_controller.bind_mac.called_once
diff --git a/neutron/tests/unit/test_db_plugin.py b/neutron/tests/unit/test_db_plugin.py
index 1f91e74..812fb2a 100644
--- a/neutron/tests/unit/test_db_plugin.py
+++ b/neutron/tests/unit/test_db_plugin.py
@@ -20,6 +20,7 @@ import itertools
import mock
from oslo.config import cfg
from testtools import matchers
+from testtools import testcase
import webob.exc
import neutron
@@ -33,8 +34,10 @@ from neutron.common import ipv6_utils
from neutron.common import test_lib
from neutron.common import utils
from neutron import context
+from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2
+from neutron.ipam.drivers import neutron_ipam
from neutron import manager
from neutron.openstack.common import importutils
from neutron.tests import base
@@ -90,6 +93,12 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
if not plugin:
plugin = DB_PLUGIN_KLASS
+ # Create the default configurations
+ args = ['--config-file', etcdir('neutron.conf.test')]
+ # If test_config specifies some config-file, use it, as well
+ for config_file in test_config.get('config_files', []):
+ args.extend(['--config-file', config_file])
+ config.parse(args=args)
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override(
@@ -145,6 +154,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
self._skip_native_pagination = None
self._skip_native_sortin = None
self.ext_api = None
+ # NOTE(jkoelker) for a 'pluggable' framework, Neutron sure
+ # doesn't like when the plugin changes ;)
+ db.clear_db()
# Restore the original attribute map
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk
super(NeutronDbPluginV2TestCase, self).tearDown()
@@ -521,10 +533,17 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
def network(self, name='net1',
admin_state_up=True,
fmt=None,
+ do_delete=True,
**kwargs):
network = self._make_network(fmt or self.fmt, name,
admin_state_up, **kwargs)
yield network
+ if do_delete:
+ # The do_delete parameter allows you to control whether the
+ # created network is immediately deleted again. Therefore, this
+ # function is also usable in tests, which require the creation
+ # of many networks.
+ self._delete('networks', network['network']['id'])
@contextlib.contextmanager
def subnet(self, network=None,
@@ -537,6 +556,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
dns_nameservers=None,
host_routes=None,
shared=None,
+ do_delete=True,
ipv6_ra_mode=None,
ipv6_address_mode=None):
with optional_ctx(network, self.network) as network_to_use:
@@ -553,13 +573,18 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode)
yield subnet
+ if do_delete:
+ self._delete('subnets', subnet['subnet']['id'])
@contextlib.contextmanager
- def port(self, subnet=None, fmt=None, **kwargs):
+ def port(self, subnet=None, fmt=None, no_delete=False,
+ **kwargs):
with optional_ctx(subnet, self.subnet) as subnet_to_use:
net_id = subnet_to_use['subnet']['network_id']
port = self._make_port(fmt or self.fmt, net_id, **kwargs)
yield port
+ if not no_delete:
+ self._delete('ports', port['port']['id'])
def _test_list_with_sort(self, resource,
items, sorts, resources=None, query_params=''):
@@ -576,7 +601,8 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
expected_res = [item[resource]['id'] for item in items]
- self.assertEqual(expected_res, [n['id'] for n in res[resources]])
+ self.assertEqual(sorted([n['id'] for n in res[resources]]),
+ sorted(expected_res))
def _test_list_with_pagination(self, resource, items, sort,
limit, expected_page_num,
@@ -610,8 +636,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
- self.assertEqual([item[resource][verify_key] for item in items],
- [n[verify_key] for n in items_res])
+ self.assertEqual(sorted([item[resource][verify_key]
+ for item in items]),
+ sorted([n[verify_key] for n in items_res]))
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
@@ -650,7 +677,8 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase,
self.assertEqual(expected_page_num, page_num)
expected_res = [item[resource]['id'] for item in items]
expected_res.reverse()
- self.assertEqual(expected_res, [n['id'] for n in item_res])
+ self.assertEqual(sorted(expected_res),
+ sorted([n['id'] for n in item_res]))
class TestBasicGet(NeutronDbPluginV2TestCase):
@@ -777,7 +805,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
self.assertEqual('myname', port['port']['name'])
def test_create_port_as_admin(self):
- with self.network() as network:
+ with self.network(do_delete=False) as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
@@ -989,17 +1017,20 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
- cfg.CONF.set_default('allow_overlapping_ips', True)
- with contextlib.nested(self.port(admin_state_up='True',
- mac_address='00:00:00:00:00:01'),
- self.port(admin_state_up='False',
- mac_address='00:00:00:00:00:02'),
- self.port(admin_state_up='False',
- mac_address='00:00:00:00:00:03')
- ) as (port1, port2, port3):
- self._test_list_with_sort('port', (port3, port2, port1),
- [('admin_state_up', 'asc'),
- ('mac_address', 'desc')])
+ try:
+ cfg.CONF.set_default('allow_overlapping_ips', True)
+ with contextlib.nested(self.port(admin_state_up='True',
+ mac_address='00:00:00:00:00:01'),
+ self.port(admin_state_up='False',
+ mac_address='00:00:00:00:00:02'),
+ self.port(admin_state_up='False',
+ mac_address='00:00:00:00:00:03')
+ ) as (port1, port2, port3):
+ self._test_list_with_sort('port', (port3, port2, port1),
+ [('admin_state_up', 'asc'),
+ ('mac_address', 'desc')])
+ finally:
+ helper_patcher.stop()
def test_list_ports_with_pagination_native(self):
if self._skip_native_pagination:
@@ -1018,14 +1049,17 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- cfg.CONF.set_default('allow_overlapping_ips', True)
- with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
- self.port(mac_address='00:00:00:00:00:02'),
- self.port(mac_address='00:00:00:00:00:03')
- ) as (port1, port2, port3):
- self._test_list_with_pagination('port',
- (port1, port2, port3),
- ('mac_address', 'asc'), 2, 2)
+ try:
+ cfg.CONF.set_default('allow_overlapping_ips', True)
+ with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
+ self.port(mac_address='00:00:00:00:00:02'),
+ self.port(mac_address='00:00:00:00:00:03')
+ ) as (port1, port2, port3):
+ self._test_list_with_pagination('port',
+ (port1, port2, port3),
+ ('mac_address', 'asc'), 2, 2)
+ finally:
+ helper_patcher.stop()
def test_list_ports_with_pagination_reverse_native(self):
if self._skip_native_pagination:
@@ -1045,15 +1079,18 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- cfg.CONF.set_default('allow_overlapping_ips', True)
- with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
- self.port(mac_address='00:00:00:00:00:02'),
- self.port(mac_address='00:00:00:00:00:03')
- ) as (port1, port2, port3):
- self._test_list_with_pagination_reverse('port',
- (port1, port2, port3),
- ('mac_address', 'asc'),
- 2, 2)
+ try:
+ cfg.CONF.set_default('allow_overlapping_ips', True)
+ with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
+ self.port(mac_address='00:00:00:00:00:02'),
+ self.port(mac_address='00:00:00:00:00:03')
+ ) as (port1, port2, port3):
+ self._test_list_with_pagination_reverse('port',
+ (port1, port2, port3),
+ ('mac_address', 'asc'),
+ 2, 2)
+ finally:
+ helper_patcher.stop()
def test_show_port(self):
with self.port() as port:
@@ -1062,7 +1099,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
self.assertEqual(port['port']['id'], sport['port']['id'])
def test_delete_port(self):
- with self.port() as port:
+ with self.port(no_delete=True) as port:
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
@@ -1309,8 +1346,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
def fake_gen_mac(context, net_id):
raise n_exc.MacAddressGenerationFailure(net_id=net_id)
- with mock.patch.object(neutron.db.db_base_plugin_v2.NeutronDbPluginV2,
- '_generate_mac', new=fake_gen_mac):
+ with mock.patch.object(
+ neutron.db.db_base_plugin_v2.NeutronCorePluginV2,
+ '_generate_mac', new=fake_gen_mac):
res = self._create_network(fmt=self.fmt, name='net1',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
@@ -1333,6 +1371,32 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
+ def test_requested_subnet_delete(self):
+ with self.subnet() as subnet:
+ with self.port(subnet=subnet) as port:
+ ips = port['port']['fixed_ips']
+ self.assertEqual(len(ips), 1)
+ self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+ self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+ req = self.new_delete_request('subnet',
+ subnet['subnet']['id'])
+ res = req.get_response(self.api)
+ self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
+
+ def test_generated_duplicate_ip_ipv6(self):
+ with self.subnet(ip_version=6,
+ cidr="2014::/64",
+ ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
+ with self.port(subnet=subnet,
+ fixed_ips=[{'subnet_id': subnet['subnet']['id'],
+ 'ip_address':
+ "2014::1322:33ff:fe44:5566"}]) as port:
+ # Check configuring of duplicate IP
+ kwargs = {"mac_address": "11:22:33:44:55:66"}
+ net_id = port['port']['network_id']
+ res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+ self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
+
def test_requested_subnet_id(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
@@ -1785,8 +1849,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
ctx = context.get_admin_context()
with self.subnet() as subnet:
with contextlib.nested(
- self.port(subnet=subnet, device_id='owner1'),
- self.port(subnet=subnet, device_id='owner1'),
+ self.port(subnet=subnet, device_id='owner1', no_delete=True),
+ self.port(subnet=subnet, device_id='owner1', no_delete=True),
self.port(subnet=subnet, device_id='owner2'),
) as (p1, p2, p3):
network_id = subnet['subnet']['network_id']
@@ -1803,7 +1867,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
ctx = context.get_admin_context()
with self.subnet() as subnet:
with contextlib.nested(
- self.port(subnet=subnet, device_id='owner1'),
+ self.port(subnet=subnet, device_id='owner1', no_delete=True),
self.port(subnet=subnet, device_id='owner1'),
self.port(subnet=subnet, device_id='owner2'),
) as (p1, p2, p3):
@@ -2199,16 +2263,19 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
- with contextlib.nested(self.network(admin_status_up=True,
- name='net1'),
- self.network(admin_status_up=False,
- name='net2'),
- self.network(admin_status_up=False,
- name='net3')
- ) as (net1, net2, net3):
- self._test_list_with_sort('network', (net3, net2, net1),
- [('admin_state_up', 'asc'),
- ('name', 'desc')])
+ try:
+ with contextlib.nested(self.network(admin_status_up=True,
+ name='net1'),
+ self.network(admin_status_up=False,
+ name='net2'),
+ self.network(admin_status_up=False,
+ name='net3')
+ ) as (net1, net2, net3):
+ self._test_list_with_sort('network', (net3, net2, net1),
+ [('admin_state_up', 'asc'),
+ ('name', 'desc')])
+ finally:
+ helper_patcher.stop()
def test_list_networks_with_pagination_native(self):
if self._skip_native_pagination:
@@ -2226,31 +2293,37 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with contextlib.nested(self.network(name='net1'),
- self.network(name='net2'),
- self.network(name='net3')
- ) as (net1, net2, net3):
- self._test_list_with_pagination('network',
- (net1, net2, net3),
- ('name', 'asc'), 2, 2)
+ try:
+ with contextlib.nested(self.network(name='net1'),
+ self.network(name='net2'),
+ self.network(name='net3')
+ ) as (net1, net2, net3):
+ self._test_list_with_pagination('network',
+ (net1, net2, net3),
+ ('name', 'asc'), 2, 2)
+ finally:
+ helper_patcher.stop()
def test_list_networks_without_pk_in_fields_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with contextlib.nested(self.network(name='net1',
- shared=True),
- self.network(name='net2',
- shared=False),
- self.network(name='net3',
- shared=True)
- ) as (net1, net2, net3):
- self._test_list_with_pagination('network',
- (net1, net2, net3),
- ('name', 'asc'), 2, 2,
- query_params="fields=name",
- verify_key='name')
+ try:
+ with contextlib.nested(self.network(name='net1',
+ shared=True),
+ self.network(name='net2',
+ shared=False),
+ self.network(name='net3',
+ shared=True)
+ ) as (net1, net2, net3):
+ self._test_list_with_pagination('network',
+ (net1, net2, net3),
+ ('name', 'asc'), 2, 2,
+ query_params="fields=name",
+ verify_key='name')
+ finally:
+ helper_patcher.stop()
def test_list_networks_without_pk_in_fields_pagination_native(self):
if self._skip_native_pagination:
@@ -2281,13 +2354,16 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with contextlib.nested(self.network(name='net1'),
- self.network(name='net2'),
- self.network(name='net3')
- ) as (net1, net2, net3):
- self._test_list_with_pagination_reverse('network',
- (net1, net2, net3),
- ('name', 'asc'), 2, 2)
+ try:
+ with contextlib.nested(self.network(name='net1'),
+ self.network(name='net2'),
+ self.network(name='net3')
+ ) as (net1, net2, net3):
+ self._test_list_with_pagination_reverse('network',
+ (net1, net2, net3),
+ ('name', 'asc'), 2, 2)
+ finally:
+ helper_patcher.stop()
def test_list_networks_with_parameters(self):
with contextlib.nested(self.network(name='net1',
@@ -2667,8 +2743,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
with self.network() as network:
with contextlib.nested(
self.subnet(network=network),
- self.subnet(network=network, cidr='10.0.1.0/24'),
- ) as (subnet1, subnet2):
+ self.subnet(network=network, cidr='10.0.1.0/24',
+ do_delete=False)) as (subnet1, subnet2):
subnet1_id = subnet1['subnet']['id']
subnet2_id = subnet2['subnet']['id']
with self.port(
@@ -2734,7 +2810,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
set_context=True)
def test_create_subnet_as_admin(self):
- with self.network() as network:
+ with self.network(do_delete=False) as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
@@ -3220,6 +3296,16 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
+ def test_create_subnet_ipv6_attributes(self):
+ gateway_ip = 'fe80::1'
+ cidr = 'fe80::/80'
+
+ for mode in constants.IPV6_MODES:
+ self._test_create_subnet(gateway_ip=gateway_ip,
+ cidr=cidr, ip_version=6,
+ ipv6_ra_mode=mode,
+ ipv6_address_mode=mode)
+
def _test_validate_subnet_ipv6_modes(self, cur_subnet=None,
expect_success=True, **modes):
plugin = manager.NeutronManager.get_plugin()
@@ -3325,6 +3411,31 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
+ def test_create_subnet_invalid_ipv6_combination(self):
+ gateway_ip = 'fe80::1'
+ cidr = 'fe80::/80'
+ with testlib_api.ExpectedException(
+ webob.exc.HTTPClientError) as ctx_manager:
+ self._test_create_subnet(gateway_ip=gateway_ip,
+ cidr=cidr, ip_version=6,
+ ipv6_ra_mode='stateful',
+ ipv6_address_mode='stateless')
+ self.assertEqual(ctx_manager.exception.code,
+ webob.exc.HTTPClientError.code)
+
+ def test_create_subnet_ipv6_single_attribute_set(self):
+ gateway_ip = 'fe80::1'
+ cidr = 'fe80::/80'
+ for mode in constants.IPV6_MODES:
+ self._test_create_subnet(gateway_ip=gateway_ip,
+ cidr=cidr, ip_version=6,
+ ipv6_ra_mode=None,
+ ipv6_address_mode=mode)
+ self._test_create_subnet(gateway_ip=gateway_ip,
+ cidr=cidr, ip_version=6,
+ ipv6_ra_mode=mode,
+ ipv6_address_mode=None)
+
def test_create_subnet_ipv6_ra_mode_ip_version_4(self):
cidr = '10.0.2.0/24'
with testlib_api.ExpectedException(
@@ -3700,18 +3811,21 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
- with contextlib.nested(self.subnet(enable_dhcp=True,
- cidr='10.0.0.0/24'),
- self.subnet(enable_dhcp=False,
- cidr='11.0.0.0/24'),
- self.subnet(enable_dhcp=False,
- cidr='12.0.0.0/24')
- ) as (subnet1, subnet2, subnet3):
- self._test_list_with_sort('subnet', (subnet3,
- subnet2,
- subnet1),
- [('enable_dhcp', 'asc'),
- ('cidr', 'desc')])
+ try:
+ with contextlib.nested(self.subnet(enable_dhcp=True,
+ cidr='10.0.0.0/24'),
+ self.subnet(enable_dhcp=False,
+ cidr='11.0.0.0/24'),
+ self.subnet(enable_dhcp=False,
+ cidr='12.0.0.0/24')
+ ) as (subnet1, subnet2, subnet3):
+ self._test_list_with_sort('subnet', (subnet3,
+ subnet2,
+ subnet1),
+ [('enable_dhcp', 'asc'),
+ ('cidr', 'desc')])
+ finally:
+ helper_patcher.stop()
def test_list_subnets_with_pagination_native(self):
if self._skip_native_pagination:
@@ -3729,13 +3843,16 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
- self.subnet(cidr='11.0.0.0/24'),
- self.subnet(cidr='12.0.0.0/24')
- ) as (subnet1, subnet2, subnet3):
- self._test_list_with_pagination('subnet',
- (subnet1, subnet2, subnet3),
- ('cidr', 'asc'), 2, 2)
+ try:
+ with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
+ self.subnet(cidr='11.0.0.0/24'),
+ self.subnet(cidr='12.0.0.0/24')
+ ) as (subnet1, subnet2, subnet3):
+ self._test_list_with_pagination('subnet',
+ (subnet1, subnet2, subnet3),
+ ('cidr', 'asc'), 2, 2)
+ finally:
+ helper_patcher.stop()
def test_list_subnets_with_pagination_reverse_native(self):
if self._skip_native_sorting:
@@ -3754,14 +3871,17 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
- self.subnet(cidr='11.0.0.0/24'),
- self.subnet(cidr='12.0.0.0/24')
- ) as (subnet1, subnet2, subnet3):
- self._test_list_with_pagination_reverse('subnet',
- (subnet1, subnet2,
- subnet3),
- ('cidr', 'asc'), 2, 2)
+ try:
+ with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
+ self.subnet(cidr='11.0.0.0/24'),
+ self.subnet(cidr='12.0.0.0/24')
+ ) as (subnet1, subnet2, subnet3):
+ self._test_list_with_pagination_reverse('subnet',
+ (subnet1, subnet2,
+ subnet3),
+ ('cidr', 'asc'), 2, 2)
+ finally:
+ helper_patcher.stop()
def test_invalid_ip_version(self):
with self.network() as network:
@@ -4074,9 +4194,9 @@ class TestNeutronDbPluginV2(base.BaseTestCase):
"""Unit Tests for NeutronDbPluginV2 IPAM Logic."""
def test_generate_ip(self):
- with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
+ with mock.patch.object(db_base_plugin_v2.NeutronCorePluginV2,
'_try_generate_ip') as generate:
- with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
+ with mock.patch.object(db_base_plugin_v2.NeutronCorePluginV2,
'_rebuild_availability_ranges') as rebuild:
db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's')
@@ -4085,15 +4205,22 @@ class TestNeutronDbPluginV2(base.BaseTestCase):
self.assertEqual(0, rebuild.call_count)
def test_generate_ip_exhausted_pool(self):
- with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
+ with mock.patch.object(db_base_plugin_v2.NeutronCorePluginV2,
'_try_generate_ip') as generate:
- with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
+ with mock.patch.object(db_base_plugin_v2.NeutronCorePluginV2,
'_rebuild_availability_ranges') as rebuild:
exception = n_exc.IpAddressGenerationFailure(net_id='n')
- # fail first call but not second
- generate.side_effect = [exception, None]
- db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's')
+ generate.side_effect = exception
+
+ # I want the side_effect to throw an exception once but I
+ # didn't see a way to do this. So, let it throw twice and
+ # catch the second one. Check below to ensure that
+ # _try_generate_ip was called twice.
+ try:
+ db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's')
+ except n_exc.IpAddressGenerationFailure:
+ pass
self.assertEqual(2, generate.call_count)
rebuild.assert_called_once_with('c', 's')
@@ -4236,6 +4363,12 @@ class NeutronDbPluginV2AsMixinTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(NeutronDbPluginV2AsMixinTestCase, self).setUp()
+
+ mock_nm_get_ipam = mock.patch(
+ 'neutron.manager.NeutronManager.get_ipam')
+ self.mock_nm_get_ipam = mock_nm_get_ipam.start()
+ self.mock_nm_get_ipam.return_value = neutron_ipam.NeutronIPAM()
+
self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
self.context = context.get_admin_context()
self.net_data = {'network': {'id': 'fake-id',
@@ -4244,6 +4377,9 @@ class NeutronDbPluginV2AsMixinTestCase(testlib_api.SqlTestCase):
'tenant_id': 'test-tenant',
'shared': False}}
+ self.addCleanup(mock_nm_get_ipam.stop)
+ self.addCleanup(db.clear_db)
+
def test_create_network_with_default_status(self):
net = self.plugin.create_network(self.context, self.net_data)
default_net_create_status = 'ACTIVE'
diff --git a/neutron/tests/unit/test_linux_dhcp_relay.py b/neutron/tests/unit/test_linux_dhcp_relay.py
new file mode 100644
index 0000000..8e24ae5
--- /dev/null
+++ b/neutron/tests/unit/test_linux_dhcp_relay.py
@@ -0,0 +1,537 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import os
+
+import mock
+
+from neutron.agent.common import config
+from neutron.agent.linux import dhcp
+from neutron.agent.linux import dhcp_relay
+from neutron.agent.linux import interface
+from neutron.agent.linux import utils
+from neutron.common import config as base_config
+from neutron.common import exceptions as exc
+from neutron.tests import base
+
+
+DHCP_RELAY_IP = '192.168.122.32'
+DNS_RELAY_IP = '192.168.122.32'
+
+
+class FakeDeviceManager():
+ def __init__(self, conf, root_helper, plugin):
+ pass
+
+ def get_interface_name(self, network, port):
+ pass
+
+ def get_device_id(self, network):
+ pass
+
+ def setup_dhcp_port(self, network):
+ pass
+
+ def setup(self, network, reuse_existing=False):
+ pass
+
+ def update(self, network):
+ pass
+
+ def destroy(self, network, device_name):
+ pass
+
+ def setup_relay(self, network, iface_name, mac_address, relay_bridge):
+ pass
+
+ def destroy_relay(self, network, device_name, relay_bridge):
+ pass
+
+
+class FakeIPAllocation:
+ def __init__(self, address):
+ self.ip_address = address
+
+
+class FakePort1:
+ id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ admin_state_up = True
+ fixed_ips = [FakeIPAllocation('192.168.0.2')]
+ mac_address = '00:00:80:aa:bb:cc'
+
+ def __init__(self):
+ self.extra_dhcp_opts = []
+
+
+class FakeV4HostRoute:
+ destination = '20.0.0.1/24'
+ nexthop = '20.0.0.1'
+
+
+class FakeV4Subnet:
+ id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
+ ip_version = 4
+ cidr = '192.168.0.0/24'
+ gateway_ip = '192.168.0.1'
+ enable_dhcp = True
+ host_routes = [FakeV4HostRoute]
+ dns_nameservers = ['8.8.8.8']
+
+
+class FakeV4Network:
+ id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ subnets = [FakeV4Subnet()]
+ ports = [FakePort1()]
+ namespace = 'qdhcp-ns'
+ dhcp_relay_ip = DHCP_RELAY_IP
+ dns_relay_ip = DNS_RELAY_IP
+
+
+class FakeOpen():
+ def __init__(self, *args):
+ pass
+
+ def read(self):
+ return 'dhcrelay -a -i tap77777777-77 %s' % DHCP_RELAY_IP
+
+
+class TestBase(base.BaseTestCase):
+ def setUp(self):
+ super(TestBase, self).setUp()
+ self.network = FakeV4Network()
+ root = os.path.dirname(os.path.dirname(__file__))
+ args = ['--config-file',
+ os.path.join(root, 'etc', 'neutron.conf.test')]
+ self.conf = config.setup_conf()
+ self.conf.register_opts(base_config.core_opts)
+ self.conf.register_opts(dhcp.OPTS)
+ self.conf.register_opts(dhcp_relay.OPTS)
+ self.conf.register_opts(interface.OPTS)
+
+ def dhcp_dns_proxy_init_mock(self, conf, network, root_helper='sudo',
+ version=None, plugin=None):
+ super(dhcp_relay.DhcpDnsProxy, self).__init__(
+ conf, network,
+ root_helper,
+ version, plugin)
+
+ external_dhcp_servers = self._get_relay_ips(
+ 'external_dhcp_servers')
+ external_dns_servers = self._get_relay_ips('external_dns_servers')
+ required_options = {
+ 'dhcp_relay_bridge': self.conf.dhcp_relay_bridge,
+ 'external_dhcp_servers': external_dhcp_servers,
+ 'external_dns_servers': external_dns_servers
+ }
+
+ for option_name, option in required_options.iteritems():
+ if not option:
+ raise exc.InvalidConfigurationOption(
+ opt_name=option_name,
+ opt_value=option
+ )
+
+ self.dev_name_len = self._calc_dev_name_len()
+ self.device_manager = mock.Mock()
+
+ dhcp_dns_proxy_mock = mock.patch(
+ "neutron.agent.linux.dhcp_relay.DhcpDnsProxy.__init__",
+ dhcp_dns_proxy_init_mock
+ )
+ dhcp_dns_proxy_mock.start()
+
+ device_manager_init = mock.patch(
+ "neutron.agent.linux.dhcp.DeviceManager.__init__",
+ lambda *args, **kwargs: None)
+ device_manager_init.start()
+
+ self.conf(args=args)
+ self.conf.set_override('state_path', '')
+ self.conf.dhcp_relay_bridge = 'br-dhcp'
+
+ self.replace_p = mock.patch('neutron.agent.linux.utils.replace_file')
+ self.execute_p = mock.patch('neutron.agent.linux.utils.execute')
+ self.addCleanup(self.replace_p.stop)
+ self.addCleanup(self.execute_p.stop)
+ self.safe = self.replace_p.start()
+ self.execute = self.execute_p.start()
+
+ def get_fixed_ddi_proxy(self):
+ def _get_relay_ips(self, data):
+ return ['192.168.122.32']
+
+ attrs_to_mock = {
+ '_get_relay_ips': _get_relay_ips
+ }
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock):
+ return dhcp_relay.DhcpDnsProxy
+
+
+class TestDnsDhcpProxy(TestBase):
+
+ def test_create_instance_no_dhcp_relay_server(self):
+ self.conf.dhcp_relay_ip = None
+ self.network.dhcp_relay_ip = None
+ self.assertRaises(exc.InvalidConfigurationOption,
+ dhcp_relay.DhcpDnsProxy,
+ self.conf,
+ self.network)
+
+ def test_create_instance_bad_dhcp_relay_server(self):
+ self.conf.dhcp_relay_ip = '192.168.122.322'
+ self.assertRaises(exc.InvalidConfigurationOption,
+ dhcp_relay.DhcpDnsProxy,
+ self.conf,
+ self.network)
+
+ def test_create_instance_no_dns_relay_server(self):
+ self.conf.dns_relay_ip = None
+ self.network.dhcp_relay_ip = None
+ self.assertRaises(exc.InvalidConfigurationOption,
+ dhcp_relay.DhcpDnsProxy,
+ self.conf,
+ self.network)
+
+ def test_create_instance_bad_dns_relay_server(self):
+ self.conf.dns_relay_ip = '192.168.122.322'
+ self.assertRaises(exc.InvalidConfigurationOption,
+ dhcp_relay.DhcpDnsProxy,
+ self.conf,
+ self.network)
+
+ def test_create_instance_no_relay_bridge(self):
+ self.conf.dhcp_relay_bridge = None
+ self.assertRaises(exc.InvalidConfigurationOption,
+ dhcp_relay.DhcpDnsProxy,
+ self.conf,
+ self.network)
+
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, "_spawn_dhcp_proxy")
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, "_spawn_dns_proxy")
+ def test_spawn(self, mock_spawn_dns, mock_spawn_dhcp):
+ attrs_to_mock = {
+ '_get_relay_ips': mock.Mock(return_value=['192.168.122.32'])
+ }
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock):
+ dm = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dm.spawn_process()
+
+ mock_spawn_dns.assert_called_once_with()
+ mock_spawn_dhcp.assert_called_once_with()
+
+ @mock.patch.object(dhcp.DhcpLocalProcess, 'get_conf_file_name')
+ def test__spawn_dhcp_proxy(self, get_conf_file_name_mock):
+ test_dns_conf_path = 'test_dir/dhcp/test_dns_pid'
+ get_conf_file_name_mock.return_value = test_dns_conf_path
+
+ expected = [
+ 'ip',
+ 'netns',
+ 'exec',
+ 'qdhcp-ns',
+ 'dhcrelay',
+ '-4',
+ '-a',
+ '-pf',
+ test_dns_conf_path,
+ '-i',
+ 'tap0',
+ '-o',
+ 'trelaaaaaaaa-a',
+ self.network.dhcp_relay_ip
+ ]
+
+ self.execute.return_value = ('', '')
+
+ attrs_to_mock = dict(
+ [(a, mock.DEFAULT) for a in
+ ['interface_name', '_get_relay_ips']]
+ )
+
+ attrs_to_mock.update({
+ '_get_relay_ips': mock.Mock(return_value=['192.168.122.32'])
+ })
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock) as mocks:
+ mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
+
+ dm = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dm._spawn_dhcp_proxy()
+ self.execute.assert_called_once_with(expected,
+ root_helper='sudo',
+ check_exit_code=True)
+
+ @mock.patch.object(dhcp.DhcpLocalProcess, 'get_conf_file_name')
+ def test__spawn_dhcp_proxy_no_namespace(self, get_conf_file_name_mock):
+ test_dns_conf_path = 'test_dir/dhcp/test_dns_pid'
+ get_conf_file_name_mock.return_value = test_dns_conf_path
+ self.network.namespace = None
+
+ expected = [
+ 'dhcrelay',
+ '-4',
+ '-a',
+ '-pf',
+ test_dns_conf_path,
+ '-i',
+ 'tap0',
+ '-o',
+ 'trelaaaaaaaa-a',
+ self.network.dhcp_relay_ip
+ ]
+
+ self.execute.return_value = ('', '')
+
+ attrs_to_mock = dict(
+ [(a, mock.DEFAULT) for a in
+ ['interface_name', '_get_relay_ips']]
+ )
+
+ attrs_to_mock.update({
+ '_get_relay_ips': mock.Mock(return_value=['192.168.122.32'])
+ })
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock) as mocks:
+ mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
+
+ dm = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dm._spawn_dhcp_proxy()
+ self.execute.assert_called_once_with(expected, 'sudo')
+
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, 'interface_name')
+ @mock.patch.object(dhcp.DhcpLocalProcess, 'get_conf_file_name')
+ def test__spawn_dns_proxy(self, get_conf_file_name_mock, iface_name_mock):
+ test_dns_conf_path = 'test_dir/dhcp/test_dns_pid'
+ get_conf_file_name_mock.return_value = test_dns_conf_path
+ iface_name_mock.__get__ = mock.Mock(return_value='test_tap0')
+ expected = [
+ 'ip',
+ 'netns',
+ 'exec',
+ 'qdhcp-ns',
+ 'dnsmasq',
+ '--no-hosts',
+ '--no-resolv',
+ '--strict-order',
+ '--bind-interfaces',
+ '--interface=test_tap0',
+ '--except-interface=lo',
+ '--all-servers',
+ '--server=%s' % self.network.dns_relay_ip,
+ '--pid-file=%s' % test_dns_conf_path
+ ]
+ self.execute.return_value = ('', '')
+
+ attrs_to_mock = {
+ '_get_relay_ips': mock.Mock(return_value=['192.168.122.32'])
+ }
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock):
+
+ dm = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dm._spawn_dns_proxy()
+
+ self.execute.assert_called_once_with(expected,
+ root_helper='sudo',
+ check_exit_code=True)
+
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, 'interface_name')
+ @mock.patch.object(dhcp.DhcpLocalProcess, 'get_conf_file_name')
+ def test__spawn_dns_proxy_no_namespace(self, get_conf_file_name_mock,
+ iface_name_mock):
+ self.network.namespace = None
+ test_dns_conf_path = 'test_dir/dhcp/test_dns_pid'
+ get_conf_file_name_mock.return_value = test_dns_conf_path
+ iface_name_mock.__get__ = mock.Mock(return_value='test_tap0')
+ expected = [
+ 'dnsmasq',
+ '--no-hosts',
+ '--no-resolv',
+ '--strict-order',
+ '--bind-interfaces',
+ '--interface=test_tap0',
+ '--except-interface=lo',
+ '--all-servers',
+ '--server=%s' % self.network.dns_relay_ip,
+ '--pid-file=%s' % test_dns_conf_path
+ ]
+ self.execute.return_value = ('', '')
+
+ attrs_to_mock = {
+ '_get_relay_ips': mock.Mock(return_value=['192.168.122.32'])}
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock):
+ dm = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dm._spawn_dns_proxy()
+
+ self.execute.assert_called_once_with(expected, 'sudo')
+
+ @mock.patch.object(dhcp_relay, '_generate_mac_address',
+ mock.Mock(return_value='77:77:77:77:77:77'))
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, '_get_relay_device_name',
+ mock.Mock(return_value='tap-relay77777'))
+ @mock.patch.object(FakeDeviceManager, 'setup',
+ mock.Mock(return_value='tap-77777777'))
+ @mock.patch.object(FakeDeviceManager, 'setup_relay', mock.Mock())
+ def test_enable_dhcp_dns_inactive(self):
+ attrs_to_mock = dict(
+ [(a, mock.DEFAULT) for a in
+ ['interface_name', '_get_relay_ips', 'restart',
+ 'is_dhcp_active', 'is_dns_active', 'spawn_process']]
+ )
+
+ attrs_to_mock.update(
+ {'_get_relay_ips': mock.Mock(return_value=['192.168.122.32']),
+ 'is_dhcp_active': mock.Mock(return_value=True),
+ 'is_dns_active': mock.Mock(return_value=True),
+ 'spawn_process': mock.Mock(return_value=None)}
+ )
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock) as mocks:
+ mocks['interface_name'].__set__ = mock.Mock()
+
+ dr = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dr.enable()
+
+ dr.device_manager.setup_relay.assert_called_once_with(
+ self.network,
+ 'tap-relay77777',
+ '77:77:77:77:77:77',
+ self.conf.dhcp_relay_bridge)
+ dr.device_manager.setup.assert_called_once_with(
+ self.network,
+ reuse_existing=True)
+
+ @mock.patch.object(dhcp_relay, '_generate_mac_address',
+ mock.Mock(return_value='77:77:77:77:77:77'))
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, '_get_relay_device_name',
+ mock.Mock(return_value='tap-relay77777'))
+ @mock.patch.object(FakeDeviceManager, 'setup',
+ mock.Mock(return_value='tap-77777777'))
+ @mock.patch.object(FakeDeviceManager, 'setup_relay', mock.Mock())
+ def test_enable_dhcp_dns_active(self):
+ attrs_to_mock = dict(
+ [(a, mock.DEFAULT) for a in
+ ['interface_name', 'is_dhcp_active', 'is_dns_active', 'restart',
+ '_get_relay_ips']]
+ )
+
+ attrs_to_mock.update(
+ {'_get_relay_ips': mock.Mock(return_value=['192.168.122.32']),
+ 'is_dhcp_active': mock.Mock(return_value=True),
+ 'is_dns_active': mock.Mock(return_value=True)}
+ )
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock) as mocks:
+ mocks['interface_name'].__set__ = mock.Mock()
+
+ dr = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dr.enable()
+
+ dr.device_manager.setup_relay.assert_called_once_with(
+ self.network,
+ 'tap-relay77777',
+ '77:77:77:77:77:77',
+ self.conf.dhcp_relay_bridge)
+ dr.device_manager.setup.assert_called_once_with(
+ self.network,
+ reuse_existing=True)
+ mocks['restart'].assert_any_call()
+
+ def test_disable_retain_port(self):
+ attrs_to_mock = dict(
+ [(a, mock.DEFAULT) for a in
+ ['is_dhcp_active', 'is_dns_active', 'get_dhcp_pid', 'get_dns_pid',
+ 'restart', '_remove_config_files']]
+ )
+
+ attrs_to_mock.update(
+ {'_get_relay_ips': mock.Mock(return_value=['192.168.122.32']),
+ 'is_dhcp_active': mock.Mock(return_value=True),
+ 'is_dns_active': mock.Mock(return_value=True),
+ 'get_dhcp_pid': mock.Mock(return_value='1111'),
+ 'get_dns_pid': mock.Mock(return_value='2222')}
+ )
+
+ kill_proc_calls = [
+ mock.call(['kill', '-9', '1111'], 'sudo'),
+ mock.call(['kill', '-9', '2222'], 'sudo')]
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock) as mocks:
+ dr = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dr.disable(retain_port=True)
+
+ self.execute.assert_has_calls(kill_proc_calls)
+ mocks['_remove_config_files'].assert_any_call()
+
+ @mock.patch.object(dhcp_relay.DhcpDnsProxy, '_get_relay_device_name',
+ mock.Mock(return_value='tap-relay77777'))
+ @mock.patch.object(FakeDeviceManager, 'destroy', mock.Mock())
+ @mock.patch.object(FakeDeviceManager, 'destroy_relay', mock.Mock())
+ def test_disable_no_retain_port(self):
+ attrs_to_mock = dict(
+ [(a, mock.DEFAULT) for a in
+ ['is_dhcp_active', 'is_dns_active', 'get_dhcp_pid',
+ 'get_dhcp_pid', 'interface_name', '_remove_config_files']]
+ )
+
+ attrs_to_mock.update(
+ {'_get_relay_ips': mock.Mock(return_value=['192.168.122.32']),
+ 'is_dhcp_active': mock.Mock(return_value=True),
+ 'is_dns_active': mock.Mock(return_value=True),
+ 'get_dhcp_pid': mock.Mock(return_value='1111'),
+ 'get_dns_pid': mock.Mock(return_value='2222')}
+ )
+
+ kill_proc_calls = [
+ mock.call(['kill', '-9', '1111'], 'sudo'),
+ mock.call(['kill', '-9', '2222'], 'sudo')]
+
+ with mock.patch.multiple(dhcp_relay.DhcpDnsProxy,
+ **attrs_to_mock) as mocks:
+ mocks['interface_name'].__get__ = mock.Mock(
+ return_value='tap-77777777')
+
+ dr = dhcp_relay.DhcpDnsProxy(self.conf, self.network)
+ dr.disable(retain_port=False)
+
+ self.execute.assert_has_calls(kill_proc_calls)
+ dr.device_manager.destroy.assert_called_once_with(self.network,
+ 'tap-77777777')
+ dr.device_manager.destroy_relay.assert_called_once_with(
+ self.network,
+ 'tap-relay77777',
+ self.conf.dhcp_relay_bridge)
+
+ mocks['_remove_config_files'].assert_any_call()
+
+ def test_generate_mac(self):
+ mac = dhcp_relay._generate_mac_address()
+ mac_array = mac.split(':')
+ self.assertEqual(6, len(mac_array))
+ for item in mac_array:
+ self.assertEqual(2, len(item))
diff --git a/neutron/tests/unit/vmware/test_nsx_sync.py b/neutron/tests/unit/vmware/test_nsx_sync.py
index cacfc24..ff8e6bf 100644
--- a/neutron/tests/unit/vmware/test_nsx_sync.py
+++ b/neutron/tests/unit/vmware/test_nsx_sync.py
@@ -304,6 +304,22 @@ class SyncTestCase(testlib_api.SqlTestCase):
super(SyncTestCase, self).setUp()
self.addCleanup(self.fc.reset_all)
+ mock_nm_get_ipam = mock.patch('neutron.manager.NeutronManager.'
+ 'get_ipam')
+ self.mock_nm_get_ipam = mock_nm_get_ipam.start()
+ ipam = mock.Mock()
+ ipam.create_network = lambda ctx, net: net
+
+ self.allocate_counter = 1
+ ipam.allocate_ip = self._allocate_ip_mock
+
+ self.mock_nm_get_ipam.return_value = ipam
+ self.addCleanup(mock_nm_get_ipam.stop)
+
+ def _allocate_ip_mock(self, *args):
+ self.allocate_counter += 1
+ return '10.20.30.%s' % self.allocate_counter
+
@contextlib.contextmanager
def _populate_data(self, ctx, net_size=2, port_size=2, router_size=2):
diff --git a/requirements.txt b/requirements.txt
index 9ab21f6..49ab3b2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -31,3 +31,5 @@ oslo.messaging<1.5.0,>=1.4.0
oslo.rootwrap<=1.5.0,>=1.3.0
python-novaclient<=2.20.0,>=2.18.0
+
+taskflow>=0.1.3,<=0.3.21
diff --git a/test-requirements.txt b/test-requirements.txt
index 04d0e59..9536425 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -16,3 +16,4 @@ oslosphinx<2.5.0,>=2.2.0 # Apache-2.0
testrepository<=0.0.20,>=0.0.18
testtools!=1.4.0,<=1.5.0,>=0.9.34
WebTest<=2.0.18,>=2.0
+taskflow