=== modified file 'ChangeLog'
--- ChangeLog	2016-06-02 16:32:14 +0000
+++ ChangeLog	2016-06-03 19:07:08 +0000
@@ -113,6 +113,8 @@
  - settings on the kernel command line (cc:) override all local settings
    rather than only those in /etc/cloud/cloud.cfg (LP: #1582323)
  - Improve merging documentation [Daniel Watkins]
+ - SmartOS: datasource improvements and support for metadata service
+   providing networking information.
 
 0.7.6:
  - open 0.7.6

=== modified file 'bin/cloud-init'
--- bin/cloud-init	2016-04-15 17:54:05 +0000
+++ bin/cloud-init	2016-06-03 19:07:08 +0000
@@ -211,27 +211,27 @@
         util.logexc(LOG, "Failed to initialize, likely bad things to come!")
     # Stage 4
     path_helper = init.paths
-    if not args.local:
+    mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
+
+    if mode == sources.DSMODE_NETWORK:
         existing = "trust"
         sys.stderr.write("%s\n" % (netinfo.debug_info()))
         LOG.debug(("Checking to see if files that we need already"
                    " exist from a previous run that would allow us"
                    " to stop early."))
+        # no-net is written by upstart cloud-init-nonet when network failed
+        # to come up
         stop_files = [
             os.path.join(path_helper.get_cpath("data"), "no-net"),
-            path_helper.get_ipath_cur("obj_pkl"),
         ]
         existing_files = []
         for fn in stop_files:
-            try:
-                c = util.load_file(fn)
-                if len(c):
-                    existing_files.append((fn, len(c)))
-            except Exception:
-                pass
+            if os.path.isfile(fn):
+                existing_files.append(fn)
+
         if existing_files:
-            LOG.debug("Exiting early due to the existence of %s files",
-                      existing_files)
+            LOG.debug("[%s] Exiting. stop file %s existed",
+                      mode, existing_files)
             return (None, [])
         else:
             LOG.debug("Execution continuing, no previous run detected that"
@@ -248,34 +248,50 @@
     # Stage 5
     try:
         init.fetch(existing=existing)
+        # if in network mode, and the datasource is local
+        # then work was done at that stage.
+        if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
+            LOG.debug("[%s] Exiting. datasource %s in local mode",
+                      mode, init.datasource)
+            return (None, [])
     except sources.DataSourceNotFoundException:
         # In the case of 'cloud-init init' without '--local' it is a bit
         # more likely that the user would consider it failure if nothing was
         # found. When using upstart it will also mentions job failure
         # in console log if exit code is != 0.
-        if args.local:
+        if mode == sources.DSMODE_LOCAL:
             LOG.debug("No local datasource found")
         else:
             util.logexc(LOG, ("No instance datasource found!"
                               " Likely bad things to come!"))
         if not args.force:
-            init.apply_network_config()
-            if args.local:
+            init.apply_network_config(bring_up=not args.local)
+            LOG.debug("[%s] Exiting without datasource in local mode", mode)
+            if mode == sources.DSMODE_LOCAL:
                 return (None, [])
             else:
                 return (None, ["No instance datasource found."])
-
-    if args.local:
-        if not init.ds_restored:
-            # if local mode and the datasource was not restored from cache
-            # (this is not first boot) then apply networking.
-            init.apply_network_config()
         else:
-            LOG.debug("skipping networking config from restored datasource.")
+            LOG.debug("[%s] barreling on in force mode without datasource",
+                      mode)
 
     # Stage 6
     iid = init.instancify()
-    LOG.debug("%s will now be targeting instance id: %s", name, iid)
+    LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
+              mode, name, iid, init.is_new_instance())
+
+    init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
+
+    if mode == sources.DSMODE_LOCAL:
+        if init.datasource.dsmode != mode:
+            LOG.debug("[%s] Exiting. datasource %s not in local mode.",
+                      mode, init.datasource)
+            return (init.datasource, [])
+        else:
+            LOG.debug("[%s] %s is in local mode, will apply init modules now.",
+                      mode, init.datasource)
+
+    # update fully realizes user-data (pulling in #include if necessary)
     init.update()
     # Stage 7
     try:
@@ -528,7 +544,7 @@
         v1[mode]['errors'] = [str(e) for e in errors]
 
     except Exception as e:
-        util.logexc(LOG, "failed of stage %s", mode)
+        util.logexc(LOG, "failed stage %s", mode)
         print_exc("failed run of stage %s" % mode)
         v1[mode]['errors'] = [str(e)]
 

=== modified file 'cloudinit/config/cc_emit_upstart.py'
--- cloudinit/config/cc_emit_upstart.py	2016-05-12 17:56:26 +0000
+++ cloudinit/config/cc_emit_upstart.py	2016-06-03 19:07:08 +0000
@@ -56,7 +56,7 @@
         event_names = ['cloud-config']
 
     if not is_upstart_system():
-        log.debug("not upstart system, '%s' disabled")
+        log.debug("not upstart system, '%s' disabled", name)
         return
 
     cfgpath = cloud.paths.get_ipath_cur("cloud_config")

=== modified file 'cloudinit/config/cc_lxd.py'
--- cloudinit/config/cc_lxd.py	2016-05-12 17:56:26 +0000
+++ cloudinit/config/cc_lxd.py	2016-06-03 19:07:08 +0000
@@ -52,7 +52,8 @@
     # Get config
     lxd_cfg = cfg.get('lxd')
     if not lxd_cfg:
-        log.debug("Skipping module named %s, not present or disabled by cfg")
+        log.debug("Skipping module named %s, not present or disabled by cfg",
+                  name)
         return
     if not isinstance(lxd_cfg, dict):
         log.warn("lxd config must be a dictionary. found a '%s'",

=== modified file 'cloudinit/distros/__init__.py'
--- cloudinit/distros/__init__.py	2016-05-12 17:56:26 +0000
+++ cloudinit/distros/__init__.py	2016-06-03 19:07:08 +0000
@@ -31,6 +31,7 @@
 
 from cloudinit import importer
 from cloudinit import log as logging
+from cloudinit import net
 from cloudinit import ssh_util
 from cloudinit import type_utils
 from cloudinit import util
@@ -128,6 +129,8 @@
                                         mirror_info=arch_info)
 
     def apply_network(self, settings, bring_up=True):
+        # this applies network where 'settings' is interfaces(5) style
+        # it is obsolete compared to apply_network_config
         # Write it out
         dev_names = self._write_network(settings)
         # Now try to bring them up
@@ -143,6 +146,9 @@
             return self._bring_up_interfaces(dev_names)
         return False
 
+    def apply_network_config_names(self, netconfig):
+        net.apply_network_config_names(netconfig)
+
     @abc.abstractmethod
     def apply_locale(self, locale, out_fn=None):
         raise NotImplementedError()

=== modified file 'cloudinit/helpers.py'
--- cloudinit/helpers.py	2016-05-12 17:56:26 +0000
+++ cloudinit/helpers.py	2016-06-03 19:07:08 +0000
@@ -328,6 +328,7 @@
         self.cfgs = path_cfgs
         # Populate all the initial paths
         self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
+        self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init')
         self.instance_link = os.path.join(self.cloud_dir, 'instance')
         self.boot_finished = os.path.join(self.instance_link, "boot-finished")
         self.upstart_conf_d = path_cfgs.get('upstart_dir')
@@ -349,26 +350,19 @@
             "data": "data",
             "vendordata_raw": "vendor-data.txt",
             "vendordata": "vendor-data.txt.i",
+            "instance_id": ".instance-id",
         }
         # Set when a datasource becomes active
         self.datasource = ds
 
     # get_ipath_cur: get the current instance path for an item
     def get_ipath_cur(self, name=None):
-        ipath = self.instance_link
-        add_on = self.lookups.get(name)
-        if add_on:
-            ipath = os.path.join(ipath, add_on)
-        return ipath
+        return self._get_path(self.instance_link, name)
 
     # get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
     # for a name in dirmap
     def get_cpath(self, name=None):
-        cpath = self.cloud_dir
-        add_on = self.lookups.get(name)
-        if add_on:
-            cpath = os.path.join(cpath, add_on)
-        return cpath
+        return self._get_path(self.cloud_dir, name)
 
     # _get_ipath : get the instance path for a name in pathmap
     # (/var/lib/cloud/instances/<instance>/<name>)
@@ -397,6 +391,14 @@
         else:
             return ipath
 
+    def _get_path(self, base, name=None):
+        if name is None:
+            return base
+        return os.path.join(base, self.lookups[name])
+
+    def get_runpath(self, name=None):
+        return self._get_path(self.run_dir, name)
+
 
 # This config parser will not throw when sections don't exist
 # and you are setting values on those sections which is useful

=== modified file 'cloudinit/net/__init__.py'
--- cloudinit/net/__init__.py	2016-05-12 17:56:26 +0000
+++ cloudinit/net/__init__.py	2016-06-03 19:07:08 +0000
@@ -201,7 +201,11 @@
             ifaces[iface]['method'] = method
             currif = iface
         elif option == "hwaddress":
-            ifaces[currif]['hwaddress'] = split[1]
+            if split[1] == "ether":
+                val = split[2]
+            else:
+                val = split[1]
+            ifaces[currif]['hwaddress'] = val
         elif option in NET_CONFIG_OPTIONS:
             ifaces[currif][option] = split[1]
         elif option in NET_CONFIG_COMMANDS:
@@ -570,6 +574,8 @@
                 content += iface_start_entry(iface, index)
                 content += iface_add_subnet(iface, subnet)
                 content += iface_add_attrs(iface)
+                for route in subnet.get('routes', []):
+                    content += render_route(route, indent="    ")
         else:
             # ifenslave docs say to auto the slave devices
             if 'bond-master' in iface:
@@ -768,4 +774,218 @@
     return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
 
 
+def convert_eni_data(eni_data):
+    # return a network config representation of what is in eni_data
+    ifaces = {}
+    parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None)
+    return _ifaces_to_net_config_data(ifaces)
+
+
+def _ifaces_to_net_config_data(ifaces):
+    """Return network config that represents the ifaces data provided.
+    ifaces = parse_deb_config("/etc/network/interfaces")
+    config = ifaces_to_net_config_data(ifaces)
+    state = parse_net_config_data(config)."""
+    devs = {}
+    for name, data in ifaces.items():
+        # devname is 'eth0' for name='eth0:1'
+        devname = name.partition(":")[0]
+        if devname == "lo":
+            # currently provding 'lo' in network config results in duplicate
+            # entries. in rendered interfaces file. so skip it.
+            continue
+        if devname not in devs:
+            devs[devname] = {'type': 'physical', 'name': devname,
+                             'subnets': []}
+            # this isnt strictly correct, but some might specify
+            # hwaddress on a nic for matching / declaring name.
+            if 'hwaddress' in data:
+                devs[devname]['mac_address'] = data['hwaddress']
+        subnet = {'_orig_eni_name': name, 'type': data['method']}
+        if data.get('auto'):
+            subnet['control'] = 'auto'
+        else:
+            subnet['control'] = 'manual'
+
+        if data.get('method') == 'static':
+            subnet['address'] = data['address']
+
+        for copy_key in ('netmask', 'gateway', 'broadcast'):
+            if copy_key in data:
+                subnet[copy_key] = data[copy_key]
+
+        if 'dns' in data:
+            for n in ('nameservers', 'search'):
+                if n in data['dns'] and data['dns'][n]:
+                    subnet['dns_' + n] = data['dns'][n]
+        devs[devname]['subnets'].append(subnet)
+
+    return {'version': 1,
+            'config': [devs[d] for d in sorted(devs)]}
+
+
+def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
+    """read the network config and rename devices accordingly.
+    if strict_present is false, then do not raise exception if no devices
+    match.  if strict_busy is false, then do not raise exception if the
+    device cannot be renamed because it is currently configured."""
+    renames = []
+    for ent in netcfg.get('config', {}):
+        if ent.get('type') != 'physical':
+            continue
+        mac = ent.get('mac_address')
+        name = ent.get('name')
+        if not mac:
+            continue
+        renames.append([mac, name])
+
+    return rename_interfaces(renames)
+
+
+def _get_current_rename_info(check_downable=True):
+    """Collect information necessary for rename_interfaces."""
+    names = get_devicelist()
+    bymac = {}
+    for n in names:
+        bymac[get_interface_mac(n)] = {
+            'name': n, 'up': is_up(n), 'downable': None}
+
+    if check_downable:
+        nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
+        ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
+                                'scope', 'global'], capture=True)
+        ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
+
+        nics_with_addresses = set()
+        for bytes_out in (ipv6, ipv4):
+            nics_with_addresses.update(nmatch.findall(bytes_out))
+
+        for d in bymac.values():
+            d['downable'] = (d['up'] is False or
+                             d['name'] not in nics_with_addresses)
+
+    return bymac
+
+
+def rename_interfaces(renames, strict_present=True, strict_busy=True,
+                      current_info=None):
+    if current_info is None:
+        current_info = _get_current_rename_info()
+
+    cur_bymac = {}
+    for mac, data in current_info.items():
+        cur = data.copy()
+        cur['mac'] = mac
+        cur_bymac[mac] = cur
+
+    def update_byname(bymac):
+        return {data['name']: data for data in bymac.values()}
+
+    def rename(cur, new):
+        util.subp(["ip", "link", "set", cur, "name", new], capture=True)
+
+    def down(name):
+        util.subp(["ip", "link", "set", name, "down"], capture=True)
+
+    def up(name):
+        util.subp(["ip", "link", "set", name, "up"], capture=True)
+
+    ops = []
+    errors = []
+    ups = []
+    cur_byname = update_byname(cur_bymac)
+    tmpname_fmt = "cirename%d"
+    tmpi = -1
+
+    for mac, new_name in renames:
+        cur = cur_bymac.get(mac, {})
+        cur_name = cur.get('name')
+        cur_ops = []
+        if cur_name == new_name:
+            # nothing to do
+            continue
+
+        if not cur_name:
+            if strict_present:
+                errors.append(
+                    "[nic not present] Cannot rename mac=%s to %s"
+                    ", not available." % (mac, new_name))
+            continue
+
+        if cur['up']:
+            msg = "[busy] Error renaming mac=%s from %s to %s"
+            if not cur['downable']:
+                if strict_busy:
+                    errors.append(msg % (mac, cur_name, new_name))
+                continue
+            cur['up'] = False
+            cur_ops.append(("down", mac, new_name, (cur_name,)))
+            ups.append(("up", mac, new_name, (new_name,)))
+
+        if new_name in cur_byname:
+            target = cur_byname[new_name]
+            if target['up']:
+                msg = "[busy-target] Error renaming mac=%s from %s to %s."
+                if not target['downable']:
+                    if strict_busy:
+                        errors.append(msg % (mac, cur_name, new_name))
+                    continue
+                else:
+                    cur_ops.append(("down", mac, new_name, (new_name,)))
+
+            tmp_name = None
+            while tmp_name is None or tmp_name in cur_byname:
+                tmpi += 1
+                tmp_name = tmpname_fmt % tmpi
+
+            cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
+            target['name'] = tmp_name
+            cur_byname = update_byname(cur_bymac)
+            if target['up']:
+                ups.append(("up", mac, new_name, (tmp_name,)))
+
+        cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
+        cur['name'] = new_name
+        cur_byname = update_byname(cur_bymac)
+        ops += cur_ops
+
+    opmap = {'rename': rename, 'down': down, 'up': up}
+
+    if len(ops) + len(ups) == 0:
+        if len(errors):
+            LOG.debug("unable to do any work for renaming of %s", renames)
+        else:
+            LOG.debug("no work necessary for renaming of %s", renames)
+    else:
+        LOG.debug("achieving renaming of %s with ops %s", renames, ops + ups)
+
+        for op, mac, new_name, params in ops + ups:
+            try:
+                opmap.get(op)(*params)
+            except Exception as e:
+                errors.append(
+                    "[unknown] Error performing %s%s for %s, %s: %s" %
+                    (op, params, mac, new_name, e))
+
+    if len(errors):
+        raise Exception('\n'.join(errors))
+
+
+def get_interface_mac(ifname):
+    """Returns the string value of an interface's MAC Address"""
+    return read_sys_net(ifname, "address", enoent=False)
+
+
+def get_interfaces_by_mac(devs=None):
+    """Build a dictionary of tuples {mac: name}"""
+    if devs is None:
+        devs = get_devicelist()
+    ret = {}
+    for name in devs:
+        mac = get_interface_mac(name)
+        # some devices may not have a mac (tun0)
+        if mac:
+            ret[mac] = name
+    return ret
+
 # vi: ts=4 expandtab syntax=python

=== modified file 'cloudinit/sources/DataSourceCloudSigma.py'
--- cloudinit/sources/DataSourceCloudSigma.py	2016-05-12 17:56:26 +0000
+++ cloudinit/sources/DataSourceCloudSigma.py	2016-06-03 19:07:08 +0000
@@ -27,8 +27,6 @@
 
 LOG = logging.getLogger(__name__)
 
-VALID_DSMODES = ("local", "net", "disabled")
-
 
 class DataSourceCloudSigma(sources.DataSource):
     """
@@ -38,7 +36,6 @@
     http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
     """
     def __init__(self, sys_cfg, distro, paths):
-        self.dsmode = 'local'
         self.cepko = Cepko()
         self.ssh_public_key = ''
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -84,11 +81,9 @@
             LOG.debug("CloudSigma: Unable to read from serial port")
             return False
 
-        dsmode = server_meta.get('cloudinit-dsmode', self.dsmode)
-        if dsmode not in VALID_DSMODES:
-            LOG.warn("Invalid dsmode %s, assuming default of 'net'", dsmode)
-            dsmode = 'net'
-        if dsmode == "disabled" or dsmode != self.dsmode:
+        self.dsmode = self._determine_dsmode(
+            [server_meta.get('cloudinit-dsmode')])
+        if dsmode == sources.DSMODE_DISABLED:
             return False
 
         base64_fields = server_meta.get('base64_fields', '').split(',')
@@ -120,17 +115,13 @@
         return self.metadata['uuid']
 
 
-class DataSourceCloudSigmaNet(DataSourceCloudSigma):
-    def __init__(self, sys_cfg, distro, paths):
-        DataSourceCloudSigma.__init__(self, sys_cfg, distro, paths)
-        self.dsmode = 'net'
-
+# Legacy: Must be present in case we load an old pkl object
+DataSourceCloudSigmaNet = DataSourceCloudSigma
 
 # Used to match classes to dependencies. Since this datasource uses the serial
 # port network is not really required, so it's okay to load without it, too.
 datasources = [
     (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)),
-    (DataSourceCloudSigmaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
 ]
 
 

=== modified file 'cloudinit/sources/DataSourceConfigDrive.py'
--- cloudinit/sources/DataSourceConfigDrive.py	2016-04-29 13:04:36 +0000
+++ cloudinit/sources/DataSourceConfigDrive.py	2016-06-03 19:07:08 +0000
@@ -22,6 +22,7 @@
 import os
 
 from cloudinit import log as logging
+from cloudinit import net
 from cloudinit import sources
 from cloudinit import util
 
@@ -35,7 +36,6 @@
 DEFAULT_METADATA = {
     "instance-id": DEFAULT_IID,
 }
-VALID_DSMODES = ("local", "net", "pass", "disabled")
 FS_TYPES = ('vfat', 'iso9660')
 LABEL_TYPES = ('config-2',)
 POSSIBLE_MOUNTS = ('sr', 'cd')
@@ -47,12 +47,12 @@
     def __init__(self, sys_cfg, distro, paths):
         super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
         self.source = None
-        self.dsmode = 'local'
         self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
         self.version = None
         self.ec2_metadata = None
         self._network_config = None
         self.network_json = None
+        self.network_eni = None
         self.files = {}
 
     def __str__(self):
@@ -98,38 +98,22 @@
 
         md = results.get('metadata', {})
         md = util.mergemanydict([md, DEFAULT_METADATA])
-        user_dsmode = results.get('dsmode', None)
-        if user_dsmode not in VALID_DSMODES + (None,):
-            LOG.warn("User specified invalid mode: %s", user_dsmode)
-            user_dsmode = None
-
-        dsmode = get_ds_mode(cfgdrv_ver=results['version'],
-                             ds_cfg=self.ds_cfg.get('dsmode'),
-                             user=user_dsmode)
-
-        if dsmode == "disabled":
-            # most likely user specified
+
+        self.dsmode = self._determine_dsmode(
+            [results.get('dsmode'), self.ds_cfg.get('dsmode'),
+             sources.DSMODE_PASS if results['version'] == 1 else None])
+
+        if self.dsmode == sources.DSMODE_DISABLED:
             return False
 
-        # TODO(smoser): fix this, its dirty.
-        # we want to do some things (writing files and network config)
-        # only on first boot, and even then, we want to do so in the
-        # local datasource (so they happen earlier) even if the configured
-        # dsmode is 'net' or 'pass'. To do this, we check the previous
-        # instance-id
+        # This is legacy and sneaky.  If dsmode is 'pass' then write
+        # 'injected files' and apply legacy ENI network format.
         prev_iid = get_previous_iid(self.paths)
         cur_iid = md['instance-id']
-        if prev_iid != cur_iid and self.dsmode == "local":
+        if prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS:
             on_first_boot(results, distro=self.distro)
-
-        # dsmode != self.dsmode here if:
-        #  * dsmode = "pass",  pass means it should only copy files and then
-        #    pass to another datasource
-        #  * dsmode = "net" and self.dsmode = "local"
-        #    so that user boothooks would be applied with network, the
-        #    local datasource just gets out of the way, and lets the net claim
-        if dsmode != self.dsmode:
-            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+                      self.dsmode)
             return False
 
         self.source = found
@@ -147,12 +131,11 @@
             LOG.warn("Invalid content in vendor-data: %s", e)
             self.vendordata_raw = None
 
-        try:
-            self.network_json = results.get('networkdata')
-        except ValueError as e:
-            LOG.warn("Invalid content in network-data: %s", e)
-            self.network_json = None
-
+        # network_config is an /etc/network/interfaces formated file and is
+        # obsolete compared to networkdata (from network_data.json) but both
+        # might be present.
+        self.network_eni = results.get("network_config")
+        self.network_json = results.get('networkdata')
         return True
 
     def check_instance_id(self, sys_cfg):
@@ -163,41 +146,16 @@
     def network_config(self):
         if self._network_config is None:
             if self.network_json is not None:
+                LOG.debug("network config provided via network_json")
                 self._network_config = convert_network_data(self.network_json)
+            elif self.network_eni is not None:
+                self._network_config = net.convert_eni_data(self.network_eni)
+                LOG.debug("network config provided via converted eni data")
+            else:
+                LOG.debug("no network configuration available")
         return self._network_config
 
 
-class DataSourceConfigDriveNet(DataSourceConfigDrive):
-    def __init__(self, sys_cfg, distro, paths):
-        DataSourceConfigDrive.__init__(self, sys_cfg, distro, paths)
-        self.dsmode = 'net'
-
-
-def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
-    """Determine what mode should be used.
-    valid values are 'pass', 'disabled', 'local', 'net'
-    """
-    # user passed data trumps everything
-    if user is not None:
-        return user
-
-    if ds_cfg is not None:
-        return ds_cfg
-
-    # at config-drive version 1, the default behavior was pass.  That
-    # meant to not use use it as primary data source, but expect a ec2 metadata
-    # source. for version 2, we default to 'net', which means
-    # the DataSourceConfigDriveNet, would be used.
-    #
-    # this could change in the future.  If there was definitive metadata
-    # that indicated presense of an openstack metadata service, then
-    # we could change to 'pass' by default also. The motivation for that
-    # would be 'cloud-init query' as the web service could be more dynamic
-    if cfgdrv_ver == 1:
-        return "pass"
-    return "net"
-
-
 def read_config_drive(source_dir):
     reader = openstack.ConfigDriveReader(source_dir)
     finders = [
@@ -231,9 +189,12 @@
                         % (type(data)))
     net_conf = data.get("network_config", '')
     if net_conf and distro:
-        LOG.debug("Updating network interfaces from config drive")
+        LOG.warn("Updating network interfaces from config drive")
         distro.apply_network(net_conf)
-    files = data.get('files', {})
+    write_injected_files(data.get('files'))
+
+
+def write_injected_files(files):
     if files:
         LOG.debug("Writing %s injected files", len(files))
         for (filename, content) in files.items():
@@ -293,20 +254,8 @@
     return devices
 
 
-# Used to match classes to dependencies
-datasources = [
-    (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
-    (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
-    return sources.list_from_depends(depends, datasources)
-
-
 # Convert OpenStack ConfigDrive NetworkData json to network_config yaml
-def convert_network_data(network_json=None):
+def convert_network_data(network_json=None, known_macs=None):
     """Return a dictionary of network_config by parsing provided
        OpenStack ConfigDrive NetworkData json format
 
@@ -344,6 +293,7 @@
             'mac_address',
             'subnets',
             'params',
+            'mtu',
         ],
         'subnet': [
             'type',
@@ -353,7 +303,6 @@
             'metric',
             'gateway',
             'pointopoint',
-            'mtu',
             'scope',
             'dns_nameservers',
             'dns_search',
@@ -370,9 +319,15 @@
         subnets = []
         cfg = {k: v for k, v in link.items()
                if k in valid_keys['physical']}
-        cfg.update({'name': link['id']})
-        for network in [net for net in networks
-                        if net['link'] == link['id']]:
+        # 'name' is not in openstack spec yet, but we will support it if it is
+        # present.  The 'id' in the spec is currently implemented as the host
+        # nic's name, meaning something like 'tap-adfasdffd'.  We do not want
+        # to name guest devices with such ugly names.
+        if 'name' in link:
+            cfg['name'] = link['name']
+
+        for network in [n for n in networks
+                        if n['link'] == link['id']]:
             subnet = {k: v for k, v in network.items()
                       if k in valid_keys['subnet']}
             if 'dhcp' in network['type']:
@@ -387,7 +342,7 @@
                 })
             subnets.append(subnet)
         cfg.update({'subnets': subnets})
-        if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
+        if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
             cfg.update({
                 'type': 'physical',
                 'mac_address': link['ethernet_mac_address']})
@@ -416,9 +371,38 @@
 
         config.append(cfg)
 
+    need_names = [d for d in config
+                  if d.get('type') == 'physical' and 'name' not in d]
+
+    if need_names:
+        if known_macs is None:
+            known_macs = net.get_interfaces_by_mac()
+
+        for d in need_names:
+            mac = d.get('mac_address')
+            if not mac:
+                raise ValueError("No mac_address or name entry for %s" % d)
+            if mac not in known_macs:
+                raise ValueError("Unable to find a system nic for %s" % d)
+            d['name'] = known_macs[mac]
+
     for service in services:
         cfg = service
         cfg.update({'type': 'nameserver'})
         config.append(cfg)
 
     return {'version': 1, 'config': config}
+
+
+# Legacy: Must be present in case we load an old pkl object
+DataSourceConfigDriveNet = DataSourceConfigDrive
+
+# Used to match classes to dependencies
+datasources = [
+    (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+    return sources.list_from_depends(depends, datasources)

=== modified file 'cloudinit/sources/DataSourceNoCloud.py'
--- cloudinit/sources/DataSourceNoCloud.py	2016-05-12 17:56:26 +0000
+++ cloudinit/sources/DataSourceNoCloud.py	2016-06-03 19:07:08 +0000
@@ -24,6 +24,7 @@
 import os
 
 from cloudinit import log as logging
+from cloudinit import net
 from cloudinit import sources
 from cloudinit import util
 
@@ -35,7 +36,6 @@
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
         self.dsmode = 'local'
         self.seed = None
-        self.cmdline_id = "ds=nocloud"
         self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
                           os.path.join(paths.seed_dir, 'nocloud-net')]
         self.seed_dir = None
@@ -58,7 +58,7 @@
         try:
             # Parse the kernel command line, getting data passed in
             md = {}
-            if parse_cmdline_data(self.cmdline_id, md):
+            if load_cmdline_data(md):
                 found.append("cmdline")
                 mydata = _merge_new_seed(mydata, {'meta-data': md})
         except Exception:
@@ -123,12 +123,6 @@
 
                     mydata = _merge_new_seed(mydata, seeded)
 
-                    # For seed from a device, the default mode is 'net'.
-                    # that is more likely to be what is desired.  If they want
-                    # dsmode of local, then they must specify that.
-                    if 'dsmode' not in mydata['meta-data']:
-                        mydata['meta-data']['dsmode'] = "net"
-
                     LOG.debug("Using data from %s", dev)
                     found.append(dev)
                     break
@@ -144,7 +138,6 @@
         if len(found) == 0:
             return False
 
-        seeded_network = None
         # The special argument "seedfrom" indicates we should
         # attempt to seed the userdata / metadata from its value
         # its primarily value is in allowing the user to type less
@@ -160,10 +153,6 @@
                 LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                 return False
 
-            if (mydata['meta-data'].get('network-interfaces') or
-                    mydata.get('network-config')):
-                seeded_network = self.dsmode
-
             # This could throw errors, but the user told us to do it
             # so if errors are raised, let them raise
             (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
@@ -179,35 +168,21 @@
         mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                   defaults])
 
-        netdata = {'format': None, 'data': None}
-        if mydata['meta-data'].get('network-interfaces'):
-            netdata['format'] = 'interfaces'
-            netdata['data'] = mydata['meta-data']['network-interfaces']
-        elif mydata.get('network-config'):
-            netdata['format'] = 'network-config'
-            netdata['data'] = mydata['network-config']
-
-        # if this is the local datasource or 'seedfrom' was used
-        # and the source of the seed was self.dsmode.
-        # Then see if there is network config to apply.
-        # note this is obsolete network-interfaces style seeding.
-        if self.dsmode in ("local", seeded_network):
-            if mydata['meta-data'].get('network-interfaces'):
-                LOG.debug("Updating network interfaces from %s", self)
-                self.distro.apply_network(
-                    mydata['meta-data']['network-interfaces'])
-
-        if mydata['meta-data']['dsmode'] == self.dsmode:
-            self.seed = ",".join(found)
-            self.metadata = mydata['meta-data']
-            self.userdata_raw = mydata['user-data']
-            self.vendordata_raw = mydata['vendor-data']
-            self._network_config = mydata['network-config']
-            return True
-
-        LOG.debug("%s: not claiming datasource, dsmode=%s", self,
-                  mydata['meta-data']['dsmode'])
-        return False
+        self.dsmode = self._determine_dsmode(
+            [mydata['meta-data'].get('dsmode')])
+
+        if self.dsmode == sources.DSMODE_DISABLED:
+            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+                      self.dsmode)
+            return False
+
+        self.seed = ",".join(found)
+        self.metadata = mydata['meta-data']
+        self.userdata_raw = mydata['user-data']
+        self.vendordata_raw = mydata['vendor-data']
+        self._network_config = mydata['network-config']
+        self._network_eni = mydata['meta-data'].get('network-interfaces')
+        return True
 
     def check_instance_id(self, sys_cfg):
         # quickly (local check only) if self.instance_id is still valid
@@ -227,6 +202,9 @@
 
     @property
     def network_config(self):
+        if self._network_config is None:
+            if self.network_eni is not None:
+                self._network_config = net.convert_eni_data(self.network_eni)
         return self._network_config
 
 
@@ -254,8 +232,22 @@
     return None
 
 
+def load_cmdline_data(fill, cmdline=None):
+    pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
+             ("ds=nocloud-net", sources.DSMODE_NETWORK)]
+    for idstr, dsmode in pairs:
+        if parse_cmdline_data(idstr, fill, cmdline):
+            # if dsmode was explicitly in the commanad line, then
+            # prefer it to the dsmode based on the command line id
+            if 'dsmode' not in fill:
+                fill['dsmode'] = dsmode
+            return True
+    return False
+
+
 # Returns true or false indicating if cmdline indicated
-# that this module should be used
+# that this module should be used.  Updates dictionary 'fill'
+# with data that was found.
 # Example cmdline:
 #  root=LABEL=uec-rootfs ro ds=nocloud
 def parse_cmdline_data(ds_id, fill, cmdline=None):
@@ -319,9 +311,7 @@
 class DataSourceNoCloudNet(DataSourceNoCloud):
     def __init__(self, sys_cfg, distro, paths):
         DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
-        self.cmdline_id = "ds=nocloud-net"
         self.supported_seed_starts = ("http://", "https://", "ftp://")
-        self.dsmode = "net"
 
 
 # Used to match classes to dependencies

=== modified file 'cloudinit/sources/DataSourceOpenNebula.py'
--- cloudinit/sources/DataSourceOpenNebula.py	2016-03-04 06:45:58 +0000
+++ cloudinit/sources/DataSourceOpenNebula.py	2016-06-03 19:07:08 +0000
@@ -37,16 +37,13 @@
 LOG = logging.getLogger(__name__)
 
 DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_MODE = 'net'
 DEFAULT_PARSEUSER = 'nobody'
 CONTEXT_DISK_FILES = ["context.sh"]
-VALID_DSMODES = ("local", "net", "disabled")
 
 
 class DataSourceOpenNebula(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
-        self.dsmode = 'local'
         self.seed = None
         self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
 
@@ -93,52 +90,27 @@
         md = util.mergemanydict([md, defaults])
 
         # check for valid user specified dsmode
-        user_dsmode = results['metadata'].get('DSMODE', None)
-        if user_dsmode not in VALID_DSMODES + (None,):
-            LOG.warn("user specified invalid mode: %s", user_dsmode)
-            user_dsmode = None
-
-        # decide dsmode
-        if user_dsmode:
-            dsmode = user_dsmode
-        elif self.ds_cfg.get('dsmode'):
-            dsmode = self.ds_cfg.get('dsmode')
-        else:
-            dsmode = DEFAULT_MODE
-
-        if dsmode == "disabled":
-            # most likely user specified
-            return False
-
-        # apply static network configuration only in 'local' dsmode
-        if ('network-interfaces' in results and self.dsmode == "local"):
-            LOG.debug("Updating network interfaces from %s", self)
-            self.distro.apply_network(results['network-interfaces'])
-
-        if dsmode != self.dsmode:
-            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
+        self.dsmode = self._determine_dsmode(
+            [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
+
+        if self.dsmode == sources.DSMODE_DISABLED:
             return False
 
         self.seed = seed
+        self.network_eni = results.get("network_config")
         self.metadata = md
         self.userdata_raw = results.get('userdata')
         return True
 
     def get_hostname(self, fqdn=False, resolve_ip=None):
         if resolve_ip is None:
-            if self.dsmode == 'net':
+            if self.dsmode == sources.DSMODE_NET:
                 resolve_ip = True
             else:
                 resolve_ip = False
         return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
 
 
-class DataSourceOpenNebulaNet(DataSourceOpenNebula):
-    def __init__(self, sys_cfg, distro, paths):
-        DataSourceOpenNebula.__init__(self, sys_cfg, distro, paths)
-        self.dsmode = 'net'
-
-
 class NonContextDiskDir(Exception):
     pass
 
@@ -443,10 +415,12 @@
     return results
 
 
+# Legacy: Must be present in case we load an old pkl object
+DataSourceOpenNebulaNet = DataSourceOpenNebula
+
 # Used to match classes to dependencies
 datasources = [
     (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
-    (DataSourceOpenNebulaNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
 ]
 
 

=== modified file 'cloudinit/sources/DataSourceOpenStack.py'
--- cloudinit/sources/DataSourceOpenStack.py	2016-05-16 23:08:19 +0000
+++ cloudinit/sources/DataSourceOpenStack.py	2016-06-03 19:07:08 +0000
@@ -33,13 +33,11 @@
 DEFAULT_METADATA = {
     "instance-id": DEFAULT_IID,
 }
-VALID_DSMODES = ("net", "disabled")
 
 
 class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
-        self.dsmode = 'net'
         self.metadata_address = None
         self.ssl_details = util.fetch_ssl_details(self.paths)
         self.version = None
@@ -125,11 +123,8 @@
                         self.metadata_address)
             return False
 
-        user_dsmode = results.get('dsmode', None)
-        if user_dsmode not in VALID_DSMODES + (None,):
-            LOG.warn("User specified invalid mode: %s", user_dsmode)
-            user_dsmode = None
-        if user_dsmode == 'disabled':
+        self.dsmode = self._determine_dsmode([results.get('dsmode')])
+        if self.dsmode == sources.DSMODE_DISABLED:
             return False
 
         md = results.get('metadata', {})

=== modified file 'cloudinit/sources/DataSourceSmartOS.py'
--- cloudinit/sources/DataSourceSmartOS.py	2016-04-12 16:57:50 +0000
+++ cloudinit/sources/DataSourceSmartOS.py	2016-06-03 19:07:08 +0000
@@ -32,13 +32,13 @@
 #       http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
 #       Comments with "@datadictionary" are snippets of the definition
 
+import base64
 import binascii
-import contextlib
+import json
 import os
 import random
 import re
 import socket
-import stat
 
 import serial
 
@@ -64,14 +64,36 @@
     'operator-script': ('sdc:operator-script', False),
 }
 
+SMARTOS_ATTRIB_JSON = {
+    # Cloud-init Key : (SmartOS Key known JSON)
+    'network-data': 'sdc:nics',
+}
+
+SMARTOS_ENV_LX_BRAND = "lx-brand"
+SMARTOS_ENV_KVM = "kvm"
+
 DS_NAME = 'SmartOS'
 DS_CFG_PATH = ['datasource', DS_NAME]
+NO_BASE64_DECODE = [
+    'iptables_disable',
+    'motd_sys_info',
+    'root_authorized_keys',
+    'sdc:datacenter_name',
+    'sdc:uuid'
+    'user-data',
+    'user-script',
+]
+
+METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
+SERIAL_DEVICE = '/dev/ttyS1'
+SERIAL_TIMEOUT = 60
+
 # BUILT-IN DATASOURCE CONFIGURATION
 #  The following is the built-in configuration. If the values
 #  are not set via the system configuration, then these default
 #  will be used:
 #    serial_device: which serial device to use for the meta-data
-#    seed_timeout: how long to wait on the device
+#    serial_timeout: how long to wait on the device
 #    no_base64_decode: values which are not base64 encoded and
 #            are fetched directly from SmartOS, not meta-data values
 #    base64_keys: meta-data keys that are delivered in base64
@@ -81,16 +103,10 @@
 #    fs_setup: describes how to format the ephemeral drive
 #
 BUILTIN_DS_CONFIG = {
-    'serial_device': '/dev/ttyS1',
-    'metadata_sockfile': '/native/.zonecontrol/metadata.sock',
-    'seed_timeout': 60,
-    'no_base64_decode': ['root_authorized_keys',
-                         'motd_sys_info',
-                         'iptables_disable',
-                         'user-data',
-                         'user-script',
-                         'sdc:datacenter_name',
-                         'sdc:uuid'],
+    'serial_device': SERIAL_DEVICE,
+    'serial_timeout': SERIAL_TIMEOUT,
+    'metadata_sockfile': METADATA_SOCKFILE,
+    'no_base64_decode': NO_BASE64_DECODE,
     'base64_keys': [],
     'base64_all': False,
     'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -154,59 +170,41 @@
 
 
 class DataSourceSmartOS(sources.DataSource):
+    _unset = "_unset"
+    smartos_type = _unset
+    md_client = _unset
+
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
-        self.is_smartdc = None
         self.ds_cfg = util.mergemanydict([
             self.ds_cfg,
             util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
             BUILTIN_DS_CONFIG])
 
         self.metadata = {}
+        self.network_data = None
+        self._network_config = None
 
-        # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
-        # report 'BrandZ virtual linux' as the kernel version
-        if os.uname()[3].lower() == 'brandz virtual linux':
-            LOG.debug("Host is SmartOS, guest in Zone")
-            self.is_smartdc = True
-            self.smartos_type = 'lx-brand'
-            self.cfg = {}
-            self.seed = self.ds_cfg.get("metadata_sockfile")
-        else:
-            self.is_smartdc = True
-            self.smartos_type = 'kvm'
-            self.seed = self.ds_cfg.get("serial_device")
-            self.cfg = BUILTIN_CLOUD_CONFIG
-            self.seed_timeout = self.ds_cfg.get("serial_timeout")
-        self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
-        self.b64_keys = self.ds_cfg.get('base64_keys')
-        self.b64_all = self.ds_cfg.get('base64_all')
         self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
 
+        self._init()
+
     def __str__(self):
         root = sources.DataSource.__str__(self)
-        return "%s [seed=%s]" % (root, self.seed)
-
-    def _get_seed_file_object(self):
-        if not self.seed:
-            raise AttributeError("seed device is not set")
-
-        if self.smartos_type == 'lx-brand':
-            if not stat.S_ISSOCK(os.stat(self.seed).st_mode):
-                LOG.debug("Seed %s is not a socket", self.seed)
-                return None
-            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            sock.connect(self.seed)
-            return sock.makefile('rwb')
-        else:
-            if not stat.S_ISCHR(os.stat(self.seed).st_mode):
-                LOG.debug("Seed %s is not a character device")
-                return None
-            ser = serial.Serial(self.seed, timeout=self.seed_timeout)
-            if not ser.isOpen():
-                raise SystemError("Unable to open %s" % self.seed)
-            return ser
-        return None
+        return "%s [client=%s]" % (root, self.md_client)
+
+    def _init(self):
+        if self.smartos_type == self._unset:
+            self.smartos_type = get_smartos_environ()
+            if self.smartos_type is None:
+                self.md_client = None
+
+        if self.md_client == self._unset:
+            self.md_client = jmc_client_factory(
+                smartos_type=self.smartos_type,
+                metadata_sockfile=self.ds_cfg['metadata_sockfile'],
+                serial_device=self.ds_cfg['serial_device'],
+                serial_timeout=self.ds_cfg['serial_timeout'])
 
     def _set_provisioned(self):
         '''Mark the instance provisioning state as successful.
@@ -225,50 +223,26 @@
                       '/'.join([svc_path, 'provision_success']))
 
     def get_data(self):
+        self._init()
+
         md = {}
         ud = ""
 
-        if not device_exists(self.seed):
-            LOG.debug("No metadata device '%s' found for SmartOS datasource",
-                      self.seed)
-            return False
-
-        uname_arch = os.uname()[4]
-        if uname_arch.startswith("arm") or uname_arch == "aarch64":
-            # Disabling because dmidcode in dmi_data() crashes kvm process
-            LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
-            return False
-
-        # SDC KVM instances will provide dmi data, LX-brand does not
-        if self.smartos_type == 'kvm':
-            dmi_info = dmi_data()
-            if dmi_info is None:
-                LOG.debug("No dmidata utility found")
-                return False
-
-            system_type = dmi_info
-            if 'smartdc' not in system_type.lower():
-                LOG.debug("Host is not on SmartOS. system_type=%s",
-                          system_type)
-                return False
-            LOG.debug("Host is SmartOS, guest in KVM")
-
-        seed_obj = self._get_seed_file_object()
-        if seed_obj is None:
-            LOG.debug('Seed file object not found.')
-            return False
-        with contextlib.closing(seed_obj) as seed:
-            b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
-            if b64_keys is not None:
-                self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
-
-            b64_all = self.query('base64_all', seed, strip=True, b64=False)
-            if b64_all is not None:
-                self.b64_all = util.is_true(b64_all)
-
-            for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
-                smartos_noun, strip = attribute
-                md[ci_noun] = self.query(smartos_noun, seed, strip=strip)
+        if not self.smartos_type:
+            LOG.debug("Not running on smartos")
+            return False
+
+        if not self.md_client.exists():
+            LOG.debug("No metadata device '%r' found for SmartOS datasource",
+                      self.md_client)
+            return False
+
+        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
+            smartos_noun, strip = attribute
+            md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
+
+        for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
+            md[ci_noun] = self.md_client.get_json(smartos_noun)
 
         # @datadictionary: This key may contain a program that is written
         # to a file in the filesystem of the guest on each boot and then
@@ -318,6 +292,7 @@
         self.metadata = util.mergemanydict([md, self.metadata])
         self.userdata_raw = ud
         self.vendordata_raw = md['vendor-data']
+        self.network_data = md['network-data']
 
         self._set_provisioned()
         return True
@@ -326,69 +301,20 @@
         return self.ds_cfg['disk_aliases'].get(name)
 
     def get_config_obj(self):
-        return self.cfg
+        if self.smartos_type == SMARTOS_ENV_KVM:
+            return BUILTIN_CLOUD_CONFIG
+        return {}
 
     def get_instance_id(self):
         return self.metadata['instance-id']
 
-    def query(self, noun, seed_file, strip=False, default=None, b64=None):
-        if b64 is None:
-            if noun in self.smartos_no_base64:
-                b64 = False
-            elif self.b64_all or noun in self.b64_keys:
-                b64 = True
-
-        return self._query_data(noun, seed_file, strip=strip,
-                                default=default, b64=b64)
-
-    def _query_data(self, noun, seed_file, strip=False,
-                    default=None, b64=None):
-        """Makes a request via "GET <NOUN>"
-
-           In the response, the first line is the status, while subsequent
-           lines are is the value. A blank line with a "." is used to
-           indicate end of response.
-
-           If the response is expected to be base64 encoded, then set
-           b64encoded to true. Unfortantely, there is no way to know if
-           something is 100% encoded, so this method relies on being told
-           if the data is base64 or not.
-        """
-
-        if not noun:
-            return False
-
-        response = JoyentMetadataClient(seed_file).get_metadata(noun)
-
-        if response is None:
-            return default
-
-        if b64 is None:
-            b64 = self._query_data('b64-%s' % noun, seed_file, b64=False,
-                                   default=False, strip=True)
-            b64 = util.is_true(b64)
-
-        resp = None
-        if b64 or strip:
-            resp = "".join(response).rstrip()
-        else:
-            resp = "".join(response)
-
-        if b64:
-            try:
-                return util.b64d(resp)
-            # Bogus input produces different errors in Python 2 and 3;
-            # catch both.
-            except (TypeError, binascii.Error):
-                LOG.warn("Failed base64 decoding key '%s'", noun)
-                return resp
-
-        return resp
-
-
-def device_exists(device):
-    """Symplistic method to determine if the device exists or not"""
-    return os.path.exists(device)
+    @property
+    def network_config(self):
+        if self._network_config is None:
+            if self.network_data is not None:
+                self._network_config = (
+                    convert_smartos_network_data(self.network_data))
+        return self._network_config
 
 
 class JoyentMetadataFetchException(Exception):
@@ -407,8 +333,11 @@
         r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
         r'( (?P<payload>.+))?)')
 
-    def __init__(self, metasource):
-        self.metasource = metasource
+    def __init__(self, smartos_type=None, fp=None):
+        if smartos_type is None:
+            smartos_type = get_smartos_environ()
+        self.smartos_type = smartos_type
+        self.fp = fp
 
     def _checksum(self, body):
         return '{0:08x}'.format(
@@ -436,37 +365,229 @@
         LOG.debug('Value "%s" found.', value)
         return value
 
-    def get_metadata(self, metadata_key):
-        LOG.debug('Fetching metadata key "%s"...', metadata_key)
+    def request(self, rtype, param=None):
         request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
-        message_body = '{0} GET {1}'.format(request_id,
-                                            util.b64e(metadata_key))
+        message_body = ' '.join((request_id, rtype,))
+        if param:
+            message_body += ' ' + base64.b64encode(param.encode()).decode()
         msg = 'V2 {0} {1} {2}\n'.format(
             len(message_body), self._checksum(message_body), message_body)
         LOG.debug('Writing "%s" to metadata transport.', msg)
-        self.metasource.write(msg.encode('ascii'))
-        self.metasource.flush()
+
+        need_close = False
+        if not self.fp:
+            self.open_transport()
+            need_close = True
+
+        self.fp.write(msg.encode('ascii'))
+        self.fp.flush()
 
         response = bytearray()
-        response.extend(self.metasource.read(1))
+        response.extend(self.fp.read(1))
         while response[-1:] != b'\n':
-            response.extend(self.metasource.read(1))
+            response.extend(self.fp.read(1))
+
+        if need_close:
+            self.close_transport()
+
         response = response.rstrip().decode('ascii')
         LOG.debug('Read "%s" from metadata transport.', response)
 
         if 'SUCCESS' not in response:
             return None
 
-        return self._get_value_from_frame(request_id, response)
-
-
-def dmi_data():
-    sys_type = util.read_dmi_data("system-product-name")
-
-    if not sys_type:
+        value = self._get_value_from_frame(request_id, response)
+        return value
+
+    def get(self, key, default=None, strip=False):
+        result = self.request(rtype='GET', param=key)
+        if result is None:
+            return default
+        if result and strip:
+            result = result.strip()
+        return result
+
+    def get_json(self, key, default=None):
+        result = self.get(key, default=default)
+        if result is None:
+            return default
+        return json.loads(result)
+
+    def list(self):
+        result = self.request(rtype='KEYS')
+        if result:
+            result = result.split('\n')
+        return result
+
+    def put(self, key, val):
+        param = b' '.join([base64.b64encode(i.encode())
+                           for i in (key, val)]).decode()
+        return self.request(rtype='PUT', param=param)
+
+    def delete(self, key):
+        return self.request(rtype='DELETE', param=key)
+
+    def close_transport(self):
+        if self.fp:
+            self.fp.close()
+            self.fp = None
+
+    def __enter__(self):
+        if self.fp:
+            return self
+        self.open_transport()
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.close_transport()
+        return
+
+    def open_transport(self):
+        raise NotImplementedError
+
+
+class JoyentMetadataSocketClient(JoyentMetadataClient):
+    def __init__(self, socketpath):
+        self.socketpath = socketpath
+
+    def open_transport(self):
+        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        sock.connect(self.socketpath)
+        self.fp = sock.makefile('rwb')
+
+    def exists(self):
+        return os.path.exists(self.socketpath)
+
+    def __repr__(self):
+        return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
+
+
+class JoyentMetadataSerialClient(JoyentMetadataClient):
+    def __init__(self, device, timeout=10, smartos_type=None):
+        super(JoyentMetadataSerialClient, self).__init__(smartos_type)
+        self.device = device
+        self.timeout = timeout
+
+    def exists(self):
+        return os.path.exists(self.device)
+
+    def open_transport(self):
+        ser = serial.Serial(self.device, timeout=self.timeout)
+        if not ser.isOpen():
+            raise SystemError("Unable to open %s" % self.device)
+        self.fp = ser
+
+    def __repr__(self):
+        return "%s(device=%s, timeout=%s)" % (
+            self.__class__.__name__, self.device, self.timeout)
+
+
+class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
+    """V1 of the protocol was not safe for all values.
+    Thus, we allowed the user to pass values in as base64 encoded.
+    Users may still reasonably expect to be able to send base64 data
+    and have it transparently decoded.  So even though the V2 format is
+    now used, and is safe (using base64 itself), we keep legacy support.
+
+    The way for a user to do this was:
+      a.) specify 'base64_keys' key whose value is a comma delimited
+          list of keys that were base64 encoded.
+      b.) base64_all: string interpreted as a boolean that indicates
+          if all keys are base64 encoded.
+      c.) set a key named b64-<keyname> with a boolean indicating that
+          <keyname> is base64 encoded."""
+
+    def __init__(self, device, timeout=10, smartos_type=None):
+        s = super(JoyentMetadataLegacySerialClient, self)
+        s.__init__(device, timeout, smartos_type)
+        self.base64_keys = None
+        self.base64_all = None
+
+    def _init_base64_keys(self, reset=False):
+        if reset:
+            self.base64_keys = None
+            self.base64_all = None
+
+        keys = None
+        if self.base64_all is None:
+            keys = self.list()
+            if 'base64_all' in keys:
+                self.base64_all = util.is_true(self._get("base64_all"))
+            else:
+                self.base64_all = False
+
+        if self.base64_all:
+            # short circuit if base64_all is true
+            return
+
+        if self.base64_keys is None:
+            if keys is None:
+                keys = self.list()
+            b64_keys = set()
+            if 'base64_keys' in keys:
+                b64_keys = set(self._get("base64_keys").split(","))
+
+            # now add any b64-<keyname> that has a true value
+            for key in [k[3:] for k in keys if k.startswith("b64-")]:
+                if util.is_true(self._get(key)):
+                    b64_keys.add(key)
+                else:
+                    if key in b64_keys:
+                        b64_keys.remove(key)
+
+            self.base64_keys = b64_keys
+
+    def _get(self, key, default=None, strip=False):
+        return (super(JoyentMetadataLegacySerialClient, self).
+                get(key, default=default, strip=strip))
+
+    def is_b64_encoded(self, key, reset=False):
+        if key in NO_BASE64_DECODE:
+            return False
+
+        self._init_base64_keys(reset=reset)
+        if self.base64_all:
+            return True
+
+        return key in self.base64_keys
+
+    def get(self, key, default=None, strip=False):
+        mdefault = object()
+        val = self._get(key, strip=False, default=mdefault)
+        if val is mdefault:
+            return default
+
+        if self.is_b64_encoded(key):
+            try:
+                val = base64.b64decode(val.encode()).decode()
+            # Bogus input produces different errors in Python 2 and 3
+            except (TypeError, binascii.Error):
+                LOG.warn("Failed base64 decoding key '%s': %s", key, val)
+
+        if strip:
+            val = val.strip()
+
+        return val
+
+
+def jmc_client_factory(
+        smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
+        serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
+        uname_version=None):
+
+    if smartos_type is None:
+        smartos_type = get_smartos_environ(uname_version)
+
+    if smartos_type is None:
         return None
+    elif smartos_type == SMARTOS_ENV_KVM:
+        return JoyentMetadataLegacySerialClient(
+            device=serial_device, timeout=serial_timeout,
+            smartos_type=smartos_type)
+    elif smartos_type == SMARTOS_ENV_LX_BRAND:
+        return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
 
-    return sys_type
+    raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
 
 
 def write_boot_content(content, content_f, link=None, shebang=False,
@@ -522,15 +643,141 @@
                 util.ensure_dir(os.path.dirname(link))
                 os.symlink(content_f, link)
         except IOError as e:
-            util.logexc(LOG, "failed establishing content link", e)
+            util.logexc(LOG, "failed establishing content link: %s", e)
+
+
+def get_smartos_environ(uname_version=None, product_name=None,
+                        uname_arch=None):
+    uname = os.uname()
+    if uname_arch is None:
+        uname_arch = uname[4]
+
+    if uname_arch.startswith("arm") or uname_arch == "aarch64":
+        return None
+
+    # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
+    # report 'BrandZ virtual linux' as the kernel version
+    if uname_version is None:
+        uname_version = uname[3]
+    if uname_version.lower() == 'brandz virtual linux':
+        return SMARTOS_ENV_LX_BRAND
+
+    if product_name is None:
+        system_type = util.read_dmi_data("system-product-name")
+    else:
+        system_type = product_name
+
+    if system_type and 'smartdc' in system_type.lower():
+        return SMARTOS_ENV_KVM
+
+    return None
+
+
+# Covert SMARTOS 'sdc:nics' data to network_config yaml
+def convert_smartos_network_data(network_data=None):
+    """Return a dictionary of network_config by parsing provided
+       SMARTOS sdc:nics configuration data
+
+    sdc:nics data is a dictionary of properties of a nic and the ip
+    configuration desired.  Additional nic dictionaries are appended
+    to the list.
+
+    Converting the format is straightforward though it does include
+    duplicate information as well as data which appears to be relevant
+    to the hostOS rather than the guest.
+
+    For each entry in the nics list returned from query sdc:nics, we
+    create a type: physical entry, and extract the interface properties:
+    'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'.  The remaining
+    keys are related to ip configuration.  For each ip in the 'ips' list
+    we create a subnet entry under 'subnets' pairing the ip to a one in
+    the 'gateways' list.
+    """
+
+    valid_keys = {
+        'physical': [
+            'mac_address',
+            'mtu',
+            'name',
+            'params',
+            'subnets',
+            'type',
+        ],
+        'subnet': [
+            'address',
+            'broadcast',
+            'dns_nameservers',
+            'dns_search',
+            'gateway',
+            'metric',
+            'netmask',
+            'pointopoint',
+            'routes',
+            'scope',
+            'type',
+        ],
+    }
+
+    config = []
+    for nic in network_data:
+        cfg = {k: v for k, v in nic.items()
+               if k in valid_keys['physical']}
+        cfg.update({
+            'type': 'physical',
+            'name': nic['interface']})
+        if 'mac' in nic:
+            cfg.update({'mac_address': nic['mac']})
+
+        subnets = []
+        for ip, gw in zip(nic['ips'], nic['gateways']):
+            subnet = {k: v for k, v in nic.items()
+                      if k in valid_keys['subnet']}
+            subnet.update({
+                'type': 'static',
+                'address': ip,
+                'gateway': gw,
+            })
+            subnets.append(subnet)
+        cfg.update({'subnets': subnets})
+        config.append(cfg)
+
+    return {'version': 1, 'config': config}
 
 
 # Used to match classes to dependencies
 datasources = [
-    (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+    (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
 ]
 
 
 # Return a list of data sources that match this set of dependencies
 def get_datasource_list(depends):
     return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+    import sys
+    jmc = jmc_client_factory()
+    if jmc is None:
+        print("Do not appear to be on smartos.")
+        sys.exit(1)
+    if len(sys.argv) == 1:
+        keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
+                list(SMARTOS_ATTRIB_MAP.keys()))
+    else:
+        keys = sys.argv[1:]
+
+    data = {}
+    for key in keys:
+        if key in SMARTOS_ATTRIB_JSON:
+            keyname = SMARTOS_ATTRIB_JSON[key]
+            data[key] = jmc.get_json(keyname)
+        else:
+            if key in SMARTOS_ATTRIB_MAP:
+                keyname, strip = SMARTOS_ATTRIB_MAP[key]
+            else:
+                keyname, strip = (key, False)
+            val = jmc.get(keyname, strip=strip)
+            data[key] = jmc.get(keyname, strip=strip)
+
+    print(json.dumps(data, indent=1))

=== modified file 'cloudinit/sources/__init__.py'
--- cloudinit/sources/__init__.py	2016-05-12 17:56:26 +0000
+++ cloudinit/sources/__init__.py	2016-06-03 19:07:08 +0000
@@ -34,6 +34,13 @@
 from cloudinit.filters import launch_index
 from cloudinit.reporting import events
 
+DSMODE_DISABLED = "disabled"
+DSMODE_LOCAL = "local"
+DSMODE_NETWORK = "net"
+DSMODE_PASS = "pass"
+
+VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
+
 DEP_FILESYSTEM = "FILESYSTEM"
 DEP_NETWORK = "NETWORK"
 DS_PREFIX = 'DataSource'
@@ -57,6 +64,7 @@
         self.userdata_raw = None
         self.vendordata = None
         self.vendordata_raw = None
+        self.dsmode = DSMODE_NETWORK
 
         # find the datasource config name.
         # remove 'DataSource' from classname on front, and remove 'Net' on end.
@@ -223,10 +231,35 @@
         # quickly (local check only) if self.instance_id is still
         return False
 
+    @staticmethod
+    def _determine_dsmode(candidates, default=None, valid=None):
+        # return the first candidate that is non None, warn if not valid
+        if default is None:
+            default = DSMODE_NETWORK
+
+        if valid is None:
+            valid = VALID_DSMODES
+
+        for candidate in candidates:
+            if candidate is None:
+                continue
+            if candidate in valid:
+                return candidate
+            else:
+                LOG.warn("invalid dsmode '%s', using default=%s",
+                         candidate, default)
+                return default
+
+        return default
+
     @property
     def network_config(self):
         return None
 
+    @property
+    def first_instance_boot(self):
+        return
+
 
 def normalize_pubkey_data(pubkey_data):
     keys = []

=== modified file 'cloudinit/sources/helpers/openstack.py'
--- cloudinit/sources/helpers/openstack.py	2016-05-12 17:56:26 +0000
+++ cloudinit/sources/helpers/openstack.py	2016-06-03 19:07:08 +0000
@@ -190,14 +190,14 @@
                   versions_available)
         return selected_version
 
-    def _read_content_path(self, item):
+    def _read_content_path(self, item, decode=False):
         path = item.get('content_path', '').lstrip("/")
         path_pieces = path.split("/")
         valid_pieces = [p for p in path_pieces if len(p)]
         if not valid_pieces:
             raise BrokenMetadata("Item %s has no valid content path" % (item))
         path = self._path_join(self.base_path, "openstack", *path_pieces)
-        return self._path_read(path)
+        return self._path_read(path, decode=decode)
 
     def read_v2(self):
         """Reads a version 2 formatted location.
@@ -298,7 +298,8 @@
         net_item = metadata.get("network_config", None)
         if net_item:
             try:
-                results['network_config'] = self._read_content_path(net_item)
+                content = self._read_content_path(net_item, decode=True)
+                results['network_config'] = content
             except IOError as e:
                 raise BrokenMetadata("Failed to read network"
                                      " configuration: %s" % (e))
@@ -333,8 +334,8 @@
         components = [base] + list(add_ons)
         return os.path.join(*components)
 
-    def _path_read(self, path):
-        return util.load_file(path, decode=False)
+    def _path_read(self, path, decode=False):
+        return util.load_file(path, decode=decode)
 
     def _fetch_available_versions(self):
         if self._versions is None:
@@ -446,7 +447,7 @@
         self._versions = found
         return self._versions
 
-    def _path_read(self, path):
+    def _path_read(self, path, decode=False):
 
         def should_retry_cb(_request_args, cause):
             try:
@@ -463,7 +464,10 @@
                                       ssl_details=self.ssl_details,
                                       timeout=self.timeout,
                                       exception_cb=should_retry_cb)
-        return response.contents
+        if decode:
+            return response.contents.decode()
+        else:
+            return response.contents
 
     def _path_join(self, base, *add_ons):
         return url_helper.combine_url(base, *add_ons)

=== modified file 'cloudinit/stages.py'
--- cloudinit/stages.py	2016-05-26 13:02:17 +0000
+++ cloudinit/stages.py	2016-06-03 19:07:08 +0000
@@ -52,6 +52,7 @@
 LOG = logging.getLogger(__name__)
 
 NULL_DATA_SOURCE = None
+NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
 
 
 class Init(object):
@@ -67,6 +68,7 @@
         # Changed only when a fetch occurs
         self.datasource = NULL_DATA_SOURCE
         self.ds_restored = False
+        self._previous_iid = None
 
         if reporter is None:
             reporter = events.ReportEventStack(
@@ -213,6 +215,31 @@
         cfg_list = self.cfg.get('datasource_list') or []
         return (cfg_list, pkg_list)
 
+    def _restore_from_checked_cache(self, existing):
+        if existing not in ("check", "trust"):
+            raise ValueError("Unexpected value for existing: %s" % existing)
+
+        ds = self._restore_from_cache()
+        if not ds:
+            return (None, "no cache found")
+
+        run_iid_fn = self.paths.get_runpath('instance_id')
+        if os.path.exists(run_iid_fn):
+            run_iid = util.load_file(run_iid_fn).strip()
+        else:
+            run_iid = None
+
+        if run_iid == ds.get_instance_id():
+            return (ds, "restored from cache with run check: %s" % ds)
+        elif existing == "trust":
+            return (ds, "restored from cache: %s" % ds)
+        else:
+            if (hasattr(ds, 'check_instance_id') and
+                    ds.check_instance_id(self.cfg)):
+                return (ds, "restored from checked cache: %s" % ds)
+            else:
+                return (None, "cache invalid in datasource: %s" % ds)
+
     def _get_data_source(self, existing):
         if self.datasource is not NULL_DATA_SOURCE:
             return self.datasource
@@ -221,19 +248,9 @@
                 name="check-cache",
                 description="attempting to read from cache [%s]" % existing,
                 parent=self.reporter) as myrep:
-            ds = self._restore_from_cache()
-            if ds and existing == "trust":
-                myrep.description = "restored from cache: %s" % ds
-            elif ds and existing == "check":
-                if (hasattr(ds, 'check_instance_id') and
-                        ds.check_instance_id(self.cfg)):
-                    myrep.description = "restored from checked cache: %s" % ds
-                else:
-                    myrep.description = "cache invalid in datasource: %s" % ds
-                    ds = None
-            else:
-                myrep.description = "no cache found"
 
+            ds, desc = self._restore_from_checked_cache(existing)
+            myrep.description = desc
             self.ds_restored = bool(ds)
             LOG.debug(myrep.description)
 
@@ -301,23 +318,41 @@
 
         # What the instance id was and is...
         iid = self.datasource.get_instance_id()
-        previous_iid = None
         iid_fn = os.path.join(dp, 'instance-id')
-        try:
-            previous_iid = util.load_file(iid_fn).strip()
-        except Exception:
-            pass
-        if not previous_iid:
-            previous_iid = iid
+
+        previous_iid = self.previous_iid()
         util.write_file(iid_fn, "%s\n" % iid)
+        util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
         util.write_file(os.path.join(dp, 'previous-instance-id'),
                         "%s\n" % (previous_iid))
+
+        self._write_to_cache()
         # Ensure needed components are regenerated
         # after change of instance which may cause
         # change of configuration
         self._reset()
         return iid
 
+    def previous_iid(self):
+        if self._previous_iid is not None:
+            return self._previous_iid
+
+        dp = self.paths.get_cpath('data')
+        iid_fn = os.path.join(dp, 'instance-id')
+        try:
+            self._previous_iid = util.load_file(iid_fn).strip()
+        except Exception:
+            self._previous_iid = NO_PREVIOUS_INSTANCE_ID
+
+        LOG.debug("previous iid found to be %s", self._previous_iid)
+        return self._previous_iid
+
+    def is_new_instance(self):
+        previous = self.previous_iid()
+        ret = (previous == NO_PREVIOUS_INSTANCE_ID or
+               previous != self.datasource.get_instance_id())
+        return ret
+
     def fetch(self, existing="check"):
         return self._get_data_source(existing=existing)
 
@@ -332,8 +367,6 @@
                            reporter=self.reporter)
 
     def update(self):
-        if not self._write_to_cache():
-            return
         self._store_userdata()
         self._store_vendordata()
 
@@ -593,15 +626,27 @@
                 return (ncfg, loc)
         return (net.generate_fallback_config(), "fallback")
 
-    def apply_network_config(self):
+    def apply_network_config(self, bring_up):
         netcfg, src = self._find_networking_config()
         if netcfg is None:
             LOG.info("network config is disabled by %s", src)
             return
 
-        LOG.info("Applying network configuration from %s: %s", src, netcfg)
-        try:
-            return self.distro.apply_network_config(netcfg)
+        try:
+            LOG.debug("applying net config names for %s" % netcfg)
+            self.distro.apply_network_config_names(netcfg)
+        except Exception as e:
+            LOG.warn("Failed to rename devices: %s", e)
+
+        if (self.datasource is not NULL_DATA_SOURCE and
+                not self.is_new_instance()):
+            LOG.debug("not a new instance. network config is not applied.")
+            return
+
+        LOG.info("Applying network configuration from %s bringup=%s: %s",
+                 src, bring_up, netcfg)
+        try:
+            return self.distro.apply_network_config(netcfg, bring_up=bring_up)
         except NotImplementedError:
             LOG.warn("distro '%s' does not implement apply_network_config. "
                      "networking may not be configured properly." %

=== modified file 'setup.py'
--- setup.py	2016-05-12 20:49:10 +0000
+++ setup.py	2016-06-03 19:07:08 +0000
@@ -184,7 +184,6 @@
         (USR + '/share/doc/cloud-init/examples/seed',
             [f for f in glob('doc/examples/seed/*') if is_f(f)]),
         (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]),
-        (LIB + '/udev', ['udev/cloud-init-wait']),
     ]
     # Use a subclass for install that handles
     # adding on the right init system configuration files

=== modified file 'systemd/cloud-init-generator'
--- systemd/cloud-init-generator	2016-03-19 00:40:54 +0000
+++ systemd/cloud-init-generator	2016-06-03 19:07:08 +0000
@@ -107,9 +107,6 @@
                     "ln $CLOUD_SYSTEM_TARGET $link_path"
             fi
         fi
-        # this touches /run/cloud-init/enabled, which is read by 
-        # udev/cloud-init-wait.  If not present, it will exit quickly.
-        touch "$LOG_D/$ENABLE"
     elif [ "$result" = "$DISABLE" ]; then
         if [ -f "$link_path" ]; then
             if rm -f "$link_path"; then

=== modified file 'tests/unittests/test_datasource/test_configdrive.py'
--- tests/unittests/test_datasource/test_configdrive.py	2016-05-12 20:43:11 +0000
+++ tests/unittests/test_datasource/test_configdrive.py	2016-06-03 19:07:08 +0000
@@ -15,6 +15,7 @@
     from contextlib2 import ExitStack
 
 from cloudinit import helpers
+from cloudinit import net
 from cloudinit import settings
 from cloudinit.sources import DataSourceConfigDrive as ds
 from cloudinit.sources.helpers import openstack
@@ -73,7 +74,7 @@
          'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
         {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
          'ethernet_mac_address': 'fa:16:3e:05:30:fe',
-         'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04'}
+         'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'}
     ],
     'networks': [
         {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
@@ -88,6 +89,34 @@
     ]
 }
 
+NETWORK_DATA_2 = {
+    "services": [
+        {"type": "dns", "address": "1.1.1.191"},
+        {"type": "dns", "address": "1.1.1.4"}],
+    "networks": [
+        {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4",
+         "netmask": "255.255.255.248", "link": "eth0",
+         "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
+                     "gateway": "2.2.2.9"}],
+         "ip_address": "2.2.2.10", "id": "network0-ipv4"},
+        {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4",
+         "netmask": "255.255.255.224", "link": "eth1",
+         "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}],
+    "links": [
+        {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500,
+         "type": "vif", "id": "eth0", "vif_id": "vif-foo1"},
+        {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500,
+         "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
+}
+
+
+KNOWN_MACS = {
+    'fa:16:3e:69:b0:58': 'enp0s1',
+    'fa:16:3e:d4:57:ad': 'enp0s2',
+    'fa:16:3e:dd:50:9a': 'foo1',
+    'fa:16:3e:a8:14:69': 'foo2',
+}
+
 CFG_DRIVE_FILES_V2 = {
     'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
     'ec2/2009-04-04/user-data': USER_DATA,
@@ -365,10 +394,54 @@
         """Verify that network_data is converted and present on ds object."""
         populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
         myds = cfg_ds_from_dir(self.tmp)
-        network_config = ds.convert_network_data(NETWORK_DATA)
+        network_config = ds.convert_network_data(NETWORK_DATA,
+                                                 known_macs=KNOWN_MACS)
         self.assertEqual(myds.network_config, network_config)
 
 
+class TestConvertNetworkData(TestCase):
+    def _getnames_in_config(self, ncfg):
+        return set([n['name'] for n in ncfg['config']
+                    if n['type'] == 'physical'])
+
+    def test_conversion_fills_names(self):
+        ncfg = ds.convert_network_data(NETWORK_DATA, known_macs=KNOWN_MACS)
+        expected = set(['nic0', 'enp0s1', 'enp0s2'])
+        found = self._getnames_in_config(ncfg)
+        self.assertEqual(found, expected)
+
+    @mock.patch('cloudinit.net.get_interfaces_by_mac')
+    def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
+        macs = KNOWN_MACS.copy()
+        macs.update({'fa:16:3e:05:30:fe': 'foonic1',
+                     'fa:16:3e:69:b0:58': 'ens1'})
+        get_interfaces_by_mac.return_value = macs
+
+        ncfg = ds.convert_network_data(NETWORK_DATA)
+        expected = set(['nic0', 'ens1', 'enp0s2'])
+        found = self._getnames_in_config(ncfg)
+        self.assertEqual(found, expected)
+
+    def test_convert_raises_value_error_on_missing_name(self):
+        macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
+        self.assertRaises(ValueError, ds.convert_network_data,
+                          NETWORK_DATA, known_macs=macs)
+
+    def test_conversion_with_route(self):
+        ncfg = ds.convert_network_data(NETWORK_DATA_2, known_macs=KNOWN_MACS)
+        # not the best test, but see that we get a route in the
+        # network config and that it gets rendered to an ENI file
+        routes = []
+        for n in ncfg['config']:
+            for s in n.get('subnets', []):
+                routes.extend(s.get('routes', []))
+        self.assertIn(
+            {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
+            routes)
+        eni = net.render_interfaces(net.parse_net_config_data(ncfg))
+        self.assertIn("route add default gw 2.2.2.9", eni)
+
+
 def cfg_ds_from_dir(seed_d):
     found = ds.read_config_drive(seed_d)
     cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
@@ -387,7 +460,8 @@
     cfg_ds.userdata_raw = results.get('userdata')
     cfg_ds.version = results.get('version')
     cfg_ds.network_json = results.get('networkdata')
-    cfg_ds._network_config = ds.convert_network_data(cfg_ds.network_json)
+    cfg_ds._network_config = ds.convert_network_data(
+        cfg_ds.network_json, known_macs=KNOWN_MACS)
 
 
 def populate_dir(seed_dir, files):

=== modified file 'tests/unittests/test_datasource/test_smartos.py'
--- tests/unittests/test_datasource/test_smartos.py	2016-05-12 20:43:11 +0000
+++ tests/unittests/test_datasource/test_smartos.py	2016-06-03 19:07:08 +0000
@@ -25,6 +25,7 @@
 from __future__ import print_function
 
 from binascii import crc32
+import json
 import os
 import os.path
 import re
@@ -40,12 +41,49 @@
 from cloudinit.sources import DataSourceSmartOS
 from cloudinit.util import b64e
 
-from .. import helpers
+from ..helpers import mock, FilesystemMockingTestCase, TestCase
 
-try:
-    from unittest import mock
-except ImportError:
-    import mock
+SDC_NICS = json.loads("""
+[
+    {
+        "nic_tag": "external",
+        "primary": true,
+        "mtu": 1500,
+        "model": "virtio",
+        "gateway": "8.12.42.1",
+        "netmask": "255.255.255.0",
+        "ip": "8.12.42.102",
+        "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
+        "gateways": [
+            "8.12.42.1"
+        ],
+        "vlan_id": 324,
+        "mac": "90:b8:d0:f5:e4:f5",
+        "interface": "net0",
+        "ips": [
+            "8.12.42.102/24"
+        ]
+    },
+    {
+        "nic_tag": "sdc_overlay/16187209",
+        "gateway": "192.168.128.1",
+        "model": "virtio",
+        "mac": "90:b8:d0:a5:ff:cd",
+        "netmask": "255.255.252.0",
+        "ip": "192.168.128.93",
+        "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9",
+        "gateways": [
+            "192.168.128.1"
+        ],
+        "vlan_id": 2,
+        "mtu": 8500,
+        "interface": "net1",
+        "ips": [
+            "192.168.128.93/22"
+        ]
+    }
+]
+""")
 
 MOCK_RETURNS = {
     'hostname': 'test-host',
@@ -60,79 +98,66 @@
     'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
     'user-data': '\n'.join(['something', '']),
     'user-script': '\n'.join(['/bin/true', '']),
+    'sdc:nics': json.dumps(SDC_NICS),
 }
 
 DMI_DATA_RETURN = 'smartdc'
 
 
-def get_mock_client(mockdata):
-    class MockMetadataClient(object):
-
-        def __init__(self, serial):
-            pass
-
-        def get_metadata(self, metadata_key):
-            return mockdata.get(metadata_key)
-    return MockMetadataClient
-
-
-class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
+class PsuedoJoyentClient(object):
+    def __init__(self, data=None):
+        if data is None:
+            data = MOCK_RETURNS.copy()
+        self.data = data
+        return
+
+    def get(self, key, default=None, strip=False):
+        if key in self.data:
+            r = self.data[key]
+            if strip:
+                r = r.strip()
+        else:
+            r = default
+        return r
+
+    def get_json(self, key, default=None):
+        result = self.get(key, default=default)
+        if result is None:
+            return default
+        return json.loads(result)
+
+    def exists(self):
+        return True
+
+
+class TestSmartOSDataSource(FilesystemMockingTestCase):
     def setUp(self):
         super(TestSmartOSDataSource, self).setUp()
 
+        dsmos = 'cloudinit.sources.DataSourceSmartOS'
+        patcher = mock.patch(dsmos + ".jmc_client_factory")
+        self.jmc_cfact = patcher.start()
+        self.addCleanup(patcher.stop)
+        patcher = mock.patch(dsmos + ".get_smartos_environ")
+        self.get_smartos_environ = patcher.start()
+        self.addCleanup(patcher.stop)
+
         self.tmp = tempfile.mkdtemp()
         self.addCleanup(shutil.rmtree, self.tmp)
+        self.paths = c_helpers.Paths({'cloud_dir': self.tmp})
+
         self.legacy_user_d = tempfile.mkdtemp()
-        self.addCleanup(shutil.rmtree, self.legacy_user_d)
-
-        # If you should want to watch the logs...
-        self._log = None
-        self._log_file = None
-        self._log_handler = None
-
-        # patch cloud_dir, so our 'seed_dir' is guaranteed empty
-        self.paths = c_helpers.Paths({'cloud_dir': self.tmp})
-
-        self.unapply = []
-        super(TestSmartOSDataSource, self).setUp()
+        self.orig_lud = DataSourceSmartOS.LEGACY_USER_D
+        DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d
 
     def tearDown(self):
-        helpers.FilesystemMockingTestCase.tearDown(self)
-        if self._log_handler and self._log:
-            self._log.removeHandler(self._log_handler)
-        apply_patches([i for i in reversed(self.unapply)])
+        DataSourceSmartOS.LEGACY_USER_D = self.orig_lud
         super(TestSmartOSDataSource, self).tearDown()
 
-    def _patchIn(self, root):
-        self.restore()
-        self.patchOS(root)
-        self.patchUtils(root)
-
-    def apply_patches(self, patches):
-        ret = apply_patches(patches)
-        self.unapply += ret
-
-    def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None,
-                is_lxbrand=False):
-        mod = DataSourceSmartOS
-
-        if mockdata is None:
-            mockdata = MOCK_RETURNS
-
-        if dmi_data is None:
-            dmi_data = DMI_DATA_RETURN
-
-        def _dmi_data():
-            return dmi_data
-
-        def _os_uname():
-            if not is_lxbrand:
-                # LP: #1243287. tests assume this runs, but running test on
-                # arm would cause them all to fail.
-                return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64')
-            else:
-                return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX',
-                        'X86_64')
+    def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
+                sys_cfg=None, ds_cfg=None):
+        self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
+        self.get_smartos_environ.return_value = mode
 
         if sys_cfg is None:
             sys_cfg = {}
@@ -141,44 +166,8 @@
             sys_cfg['datasource'] = sys_cfg.get('datasource', {})
             sys_cfg['datasource']['SmartOS'] = ds_cfg
 
-        self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
-        self.apply_patches([
-            (mod, 'JoyentMetadataClient', get_mock_client(mockdata))])
-        self.apply_patches([(mod, 'dmi_data', _dmi_data)])
-        self.apply_patches([(os, 'uname', _os_uname)])
-        self.apply_patches([(mod, 'device_exists', lambda d: True)])
-        dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
-                                     paths=self.paths)
-        self.apply_patches([(dsrc, '_get_seed_file_object', mock.MagicMock())])
-        return dsrc
-
-    def test_seed(self):
-        # default seed should be /dev/ttyS1
-        dsrc = self._get_ds()
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertEqual('kvm', dsrc.smartos_type)
-        self.assertEqual('/dev/ttyS1', dsrc.seed)
-
-    def test_seed_lxbrand(self):
-        # default seed should be /dev/ttyS1
-        dsrc = self._get_ds(is_lxbrand=True)
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertEqual('lx-brand', dsrc.smartos_type)
-        self.assertEqual('/native/.zonecontrol/metadata.sock', dsrc.seed)
-
-    def test_issmartdc(self):
-        dsrc = self._get_ds()
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertTrue(dsrc.is_smartdc)
-
-    def test_issmartdc_lxbrand(self):
-        dsrc = self._get_ds(is_lxbrand=True)
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertTrue(dsrc.is_smartdc)
+        return DataSourceSmartOS.DataSourceSmartOS(
+            sys_cfg, distro=None, paths=self.paths)
 
     def test_no_base64(self):
         ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
@@ -214,58 +203,6 @@
         self.assertEqual(MOCK_RETURNS['hostname'],
                          dsrc.metadata['local-hostname'])
 
-    def test_base64_all(self):
-        # metadata provided base64_all of true
-        my_returns = MOCK_RETURNS.copy()
-        my_returns['base64_all'] = "true"
-        for k in ('hostname', 'cloud-init:user-data'):
-            my_returns[k] = b64e(my_returns[k])
-
-        dsrc = self._get_ds(mockdata=my_returns)
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertEqual(MOCK_RETURNS['hostname'],
-                         dsrc.metadata['local-hostname'])
-        self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
-                         dsrc.userdata_raw)
-        self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
-                         dsrc.metadata['public-keys'])
-        self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
-                         dsrc.metadata['iptables_disable'])
-        self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
-                         dsrc.metadata['motd_sys_info'])
-
-    def test_b64_userdata(self):
-        my_returns = MOCK_RETURNS.copy()
-        my_returns['b64-cloud-init:user-data'] = "true"
-        my_returns['b64-hostname'] = "true"
-        for k in ('hostname', 'cloud-init:user-data'):
-            my_returns[k] = b64e(my_returns[k])
-
-        dsrc = self._get_ds(mockdata=my_returns)
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertEqual(MOCK_RETURNS['hostname'],
-                         dsrc.metadata['local-hostname'])
-        self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
-                         dsrc.userdata_raw)
-        self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
-                         dsrc.metadata['public-keys'])
-
-    def test_b64_keys(self):
-        my_returns = MOCK_RETURNS.copy()
-        my_returns['base64_keys'] = 'hostname,ignored'
-        for k in ('hostname',):
-            my_returns[k] = b64e(my_returns[k])
-
-        dsrc = self._get_ds(mockdata=my_returns)
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertEqual(MOCK_RETURNS['hostname'],
-                         dsrc.metadata['local-hostname'])
-        self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
-                         dsrc.userdata_raw)
-
     def test_userdata(self):
         dsrc = self._get_ds(mockdata=MOCK_RETURNS)
         ret = dsrc.get_data()
@@ -275,6 +212,13 @@
         self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
                          dsrc.userdata_raw)
 
+    def test_sdc_nics(self):
+        dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+        ret = dsrc.get_data()
+        self.assertTrue(ret)
+        self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
+                         dsrc.metadata['network-data'])
+
     def test_sdc_scripts(self):
         dsrc = self._get_ds(mockdata=MOCK_RETURNS)
         ret = dsrc.get_data()
@@ -430,18 +374,7 @@
                          mydscfg['disk_aliases']['FOO'])
 
 
-def apply_patches(patches):
-    ret = []
-    for (ref, name, replace) in patches:
-        if replace is None:
-            continue
-        orig = getattr(ref, name)
-        setattr(ref, name, replace)
-        ret.append((ref, name, orig))
-    return ret
-
-
-class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
+class TestJoyentMetadataClient(FilesystemMockingTestCase):
 
     def setUp(self):
         super(TestJoyentMetadataClient, self).setUp()
@@ -481,7 +414,8 @@
                        mock.Mock(return_value=self.request_id)))
 
     def _get_client(self):
-        return DataSourceSmartOS.JoyentMetadataClient(self.serial)
+        return DataSourceSmartOS.JoyentMetadataClient(
+            fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
 
     def assertEndsWith(self, haystack, prefix):
         self.assertTrue(haystack.endswith(prefix),
@@ -495,7 +429,7 @@
 
     def test_get_metadata_writes_a_single_line(self):
         client = self._get_client()
-        client.get_metadata('some_key')
+        client.get('some_key')
         self.assertEqual(1, self.serial.write.call_count)
         written_line = self.serial.write.call_args[0][0]
         print(type(written_line))
@@ -505,7 +439,7 @@
 
     def _get_written_line(self, key='some_key'):
         client = self._get_client()
-        client.get_metadata(key)
+        client.get(key)
         return self.serial.write.call_args[0][0]
 
     def test_get_metadata_writes_bytes(self):
@@ -549,32 +483,32 @@
 
     def test_get_metadata_reads_a_line(self):
         client = self._get_client()
-        client.get_metadata('some_key')
+        client.get('some_key')
         self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
 
     def test_get_metadata_returns_valid_value(self):
         client = self._get_client()
-        value = client.get_metadata('some_key')
+        value = client.get('some_key')
         self.assertEqual(self.metadata_value, value)
 
     def test_get_metadata_throws_exception_for_incorrect_length(self):
         self.response_parts['length'] = 0
         client = self._get_client()
         self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
-                          client.get_metadata, 'some_key')
+                          client.get, 'some_key')
 
     def test_get_metadata_throws_exception_for_incorrect_crc(self):
         self.response_parts['crc'] = 'deadbeef'
         client = self._get_client()
         self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
-                          client.get_metadata, 'some_key')
+                          client.get, 'some_key')
 
     def test_get_metadata_throws_exception_for_request_id_mismatch(self):
         self.response_parts['request_id'] = 'deadbeef'
         client = self._get_client()
         client._checksum = lambda _: self.response_parts['crc']
         self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
-                          client.get_metadata, 'some_key')
+                          client.get, 'some_key')
 
     def test_get_metadata_returns_None_if_value_not_found(self):
         self.response_parts['payload'] = ''
@@ -582,4 +516,24 @@
         self.response_parts['length'] = 17
         client = self._get_client()
         client._checksum = lambda _: self.response_parts['crc']
-        self.assertIsNone(client.get_metadata('some_key'))
+        self.assertIsNone(client.get('some_key'))
+
+
+class TestNetworkConversion(TestCase):
+
+    def test_convert_simple(self):
+        expected = {
+            'version': 1,
+            'config': [
+                {'name': 'net0', 'type': 'physical',
+                 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
+                              'netmask': '255.255.255.0',
+                              'address': '8.12.42.102/24'}],
+                 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
+                {'name': 'net1', 'type': 'physical',
+                 'subnets': [{'type': 'static', 'gateway': '192.168.128.1',
+                              'netmask': '255.255.252.0',
+                              'address': '192.168.128.93/22'}],
+                 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
+        found = DataSourceSmartOS.convert_smartos_network_data(SDC_NICS)
+        self.assertEqual(expected, found)

=== removed file 'udev/79-cloud-init-net-wait.rules'
--- udev/79-cloud-init-net-wait.rules	2016-03-19 00:40:54 +0000
+++ udev/79-cloud-init-net-wait.rules	1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@
-# cloud-init cold/hot-plug blocking mechanism
-# this file blocks further processing of network events
-# until cloud-init local has had a chance to read and apply network
-SUBSYSTEM!="net", GOTO="cloudinit_naming_end"
-ACTION!="add", GOTO="cloudinit_naming_end"
-
-IMPORT{program}="/lib/udev/cloud-init-wait"
-
-LABEL="cloudinit_naming_end"
-# vi: ts=4 expandtab syntax=udevrules

=== removed file 'udev/cloud-init-wait'
--- udev/cloud-init-wait	2016-03-29 13:11:25 +0000
+++ udev/cloud-init-wait	1970-01-01 00:00:00 +0000
@@ -1,70 +0,0 @@
-#!/bin/sh
-
-CI_NET_READY="/run/cloud-init/network-config-ready"
-LOG="/run/cloud-init/${0##*/}.log"
-LOG_INIT=0
-MAX_WAIT=60
-DEBUG=0
-
-block_until_ready() {
-    local fname="$1" max="$2"
-    [ -f "$fname" ] && return 0
-    # udevadm settle below will exit at the first of 3 conditions
-    #  1.) timeout 2.) file exists 3.) all in-flight udev events are processed
-    # since this is being run from a udev event, the 3 wont happen.
-    # thus, this is essentially a inotify wait or timeout on a file in /run
-    # that is created by cloud-init-local.
-    udevadm settle "--timeout=$max" "--exit-if-exists=$fname"
-}
-
-log() {
-    [ -n "${LOG}" ] || return
-    [ "${DEBUG:-0}" = "0" ] && return
-
-    if [ $LOG_INIT = 0 ]; then
-        if [ -d "${LOG%/*}" ] || mkdir -p "${LOG%/*}"; then
-            LOG_INIT=1
-        else
-            echo "${0##*/}: WARN: log init to ${LOG%/*}" 1>&2
-            return
-        fi
-    elif [ "$LOG_INIT" = "-1" ]; then
-        return
-    fi
-    local info="$$ $INTERFACE"
-    if [ "$DEBUG" -gt 1 ]; then
-       local up idle
-       read up idle < /proc/uptime
-       info="$$ $INTERFACE $up"
-    fi
-    echo "[$info]" "$@" >> "$LOG"
-}
-
-main() {
-    local name="" readyfile="$CI_NET_READY"
-    local info="INTERFACE=${INTERFACE} ID_NET_NAME=${ID_NET_NAME}"
-    info="$info ID_NET_NAME_PATH=${ID_NET_NAME_PATH}"
-    info="$info MAC_ADDRESS=${MAC_ADDRESS}"
-    log "$info"
-
-    ## Check to see if cloud-init.target is set.  If cloud-init is 
-    ## disabled we do not want to do anything.
-    if [ ! -f "/run/cloud-init/enabled" ]; then
-        log "cloud-init disabled"
-        return 0
-    fi
-
-    if [ "${INTERFACE#lo}" != "$INTERFACE" ]; then
-        return 0
-    fi
-
-    block_until_ready "$readyfile" "$MAX_WAIT" ||
-       { log "failed waiting for ready on $INTERFACE"; return 1; }
-
-    log "net config ready"
-}
-
-main "$@"
-exit
-
-# vi: ts=4 expandtab

