import random if __name__ == "__main__": d = {} r = [ random.randrange(100) for _ inrange(100)] a = list(set(r)) t = 100 for i, v inenumerate(a): d[v] = i for i, v inenumerate(a): needed = t - v try: x = d[needed] if x != i: print(i, x, a[i], a[x]) except KeyError: pass
def network_create(request, **kwargs): “””Create a network object. :param request: request context :param tenant_id: (optional) tenant id of the network created :param name: (optional) name of the network created :returns: Network object “”” LOG.debug(“network_create(): kwargs = %s” % kwargs)
In the case network profiles are being used, profile id is needed.
if ‘net_profile_id’ in kwargs: kwargs[‘n1kv:profile’] = kwargs.pop(‘net_profile_id’) if ‘tenant_id’ not in kwargs: kwargs[‘tenant_id’] = request.user.project_id body = {‘network’: kwargs} network = neutronclient(request).create_network(body=body).get(‘network’) return Network(network) 这里会调用neutronclient来发送实际的api请求: neutronclient/v2_0/client.py:
def create_network(self, body=None): “””Creates a new network.””” return self.post(self.networks_path, body=body) 进一步分析代码,可以看到会将body序列化后发送实际的请求。
@webob.dec.wsgify(RequestClass=Request) def call(self, request): “””WSGI method that controls (de)serialization and method dispatch.””” #print(“nova api wsgi call”,request) if self.support_api_request_version:
Set the version of the API requested based on the header
try: request.set_api_version_request() except exception.InvalidAPIVersionString as e: return Fault(webob.exc.HTTPBadRequest( explanation=e.format_message())) except exception.InvalidGlobalAPIVersion as e: return Fault(webob.exc.HTTPNotAcceptable( explanation=e.format_message()))
# Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None)
# NOTE(sdague): we filter out InvalidContentTypes early so we # know everything is good from here on out. try: content_type, body = self.get_body(request) accept = request.best_match_content_type() except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPUnsupportedMediaType(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept)
@hooks.add_hook(“create_instance”) def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, key_name=None, key_data=None, security_group=None, availability_zone=None, forced_host=None, forced_node=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None, legacy_bdm=True, shutdown_terminate=False, check_server_group_quota=False): “””Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. Returns a tuple of (instances, reservation_id) “”” if requested_networks and max_count is not None and max_count > 1: self._check_multiple_instances_with_specified_ip( requested_networks) if utils.is_neutron(): self._check_multiple_instances_with_neutron_ports( requested_networks)
if availability_zone: available_zones = availability_zones. get_availability_zones(context.elevated(), True) if forced_host is None and availability_zone not in available_zones: msg = _('The requested availability zone is not available') raise exception.InvalidRequest(msg)
# Normalize and setup some parameters if reservation_id is None: reservation_id = utils.generate_uid('r') security_groups = security_groups or ['default'] min_count = min_count or 1 max_count = max_count or min_count block_device_mapping = block_device_mapping or []
# max_net_count is the maximum number of instances requested by the # user adjusted for any network quota constraints, including # consideration of connections to each requested network if max_net_count < min_count: raise exception.PortLimitExceeded() elif max_net_count < max_count: LOG.info(_LI("max count reduced from %(max_count)d to " "%(max_net_count)d due to network port quota"), {'max_count': max_count, 'max_net_count': max_net_count}) max_count = max_net_count
# We can't do this check earlier because we need bdms from all sources # to have been merged in order to get the root bdm. self._checks_for_create_and_rebuild(context, image_id, boot_meta, instance_type, metadata, injected_files, block_device_mapping.root_bdm())
TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
2.0 of the RPC API.
TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList.from_tuples( requested_networks)
TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get(‘instance_type’) if flavor and not isinstance(flavor, objects.Flavor):
Code downstream may expect extra_specs to be populated since it
is receiving an object, so lookup the flavor to ensure this.
request_spec = {} try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry( filter_properties, instances[0].uuid) request_spec = scheduler_utils.build_request_spec( context, image, instances) hosts = self._schedule_instances( context, request_spec, filter_properties) except Exception as exc: updates = {'vm_state': vm_states.ERROR, 'task_state': None} for instance in instances: self._set_vm_state_and_notify( context, instance.uuid, 'build_instances', updates, exc, request_spec) try: # If the BuildRequest stays around then instance show/lists # will pull from it rather than the errored instance. self._destroy_build_request(context, instance) except exception.BuildRequestNotFound: pass self._cleanup_allocated_networks( context, instance, requested_networks) return
for (instance, host) in six.moves.zip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid)
# This is populated in scheduler_utils.populate_retry num_attempts = local_filter_props.get('retry', {}).get('num_attempts', 1) if num_attempts <= 1: # If this is a reschedule the instance is already mapped to # this cell and the BuildRequest is already deleted so ignore # the logic below. inst_mapping = self._populate_instance_mapping(context, instance, host) try: self._destroy_build_request(context, instance) except exception.BuildRequestNotFound: # This indicates an instance delete has been requested in # the API. Stop the build, cleanup the instance_mapping and # potentially the block_device_mappings # TODO(alaski): Handle block_device_mapping cleanup if inst_mapping: inst_mapping.destroy() return
if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList.from_tuples( requested_networks)
def select_destinations(self, context, spec_obj): """Returns destinations(s) best suited for this request_spec and filter_properties. The result should be a list of dicts with 'host', 'nodename' and 'limits' as keys. """ return self.scheduler_rpcapi.select_destinations(context, spec_obj)
@messaging.expected_exceptions(exception.NoValidHost) def select_destinations(self, ctxt, request_spec=None, filter_properties=None, spec_obj=_sentinel): “””Returns destinations(s) best suited for this RequestSpec. The result should be a list of dicts with ‘host’, ‘nodename’ and ‘limits’ as keys. “””
1 2 3 4 5 6 7 8
# TODO(sbauza): Change the method signature to only accept a spec_obj # argument once API v5 is provided. if spec_obj is self._sentinel: spec_obj = objects.RequestSpec.from_primitives(ctxt, request_spec, filter_properties) dests = self.driver.select_destinations(ctxt, spec_obj) return jsonutils.to_primitive(dests)
nova.scheduler.filter_scheduler.FilterScheduler :
def select_destinations(self, context, spec_obj): “””Selects a filtered set of hosts and nodes.””” self.notifier.info( context, ‘scheduler.select_destinations.start’, dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
# Couldn't fulfill the request_spec if len(selected_hosts) < num_instances: # NOTE(Rui Chen): If multiple creates failed, set the updated time # of selected HostState to None so that these HostStates are # refreshed according to database in next schedule, and release # the resource consumed by instance in the process of selecting # host. for host in selected_hosts: host.obj.updated = None
# Log the details but don't put those into the reason since # we don't want to give away too much information about our # actual environment. LOG.debug('There are %(hosts)d hosts available but ' '%(num_instances)d instances requested to build.', {'hosts': len(selected_hosts), 'num_instances': num_instances})
reason = _('There are not enough hosts available.') raise exception.NoValidHost(reason=reason)
dests = [dict(host=host.obj.host, nodename=host.obj.nodename, limits=host.obj.limits) for host in selected_hosts]
def _schedule(self, context, spec_obj): “””Returns a list of hosts that meet the required specs, ordered by their fitness. “”” … for num in range(num_instances):
Filter local hosts based on requirements …
hosts = self.host_manager.get_filtered_hosts(hosts, spec_obj, index=num) if not hosts:
# Now consume the resources so the filter/weights # will change for the next instance. chosen_host.obj.consume_from_request(spec_obj) if spec_obj.instance_group is not None: spec_obj.instance_group.hosts.append(chosen_host.obj.host) # hosts has to be not part of the updates when saving spec_obj.instance_group.obj_reset_changes(['hosts']) return selected_hosts
@utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, kwargs): # NOTE(danms): We grab the semaphore with the instance uuid # locked because we could wait in line to build this instance # for a while and we want to make sure that nothing else tries # to do anything with this instance while we wait. with self._build_semaphore: self._do_build_and_run_instance(*args, kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to # the pool. Since what follows could take a really long time, we don't # want to tie up RPC workers. utils.spawn_n(_locked_do_build_and_run_instance, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits)
try: with timeutils.StopWatch() as timer: self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties) LOG.info(_LI('Took %0.2f seconds to build instance.'), timer.elapsed(), instance=instance) return build_results.ACTIVE except exception.RescheduledException as e: ... except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): ... except exception.BuildAbortException as e: ... except Exception as e: ...
ComputeMangager会负责虚机的实际创建,其中会与neutron,cinder等子系统进行交互来创建虚机的资源等。下一节详细分析虚机的创建流程。 class ComputeManager(manager.Manager): “””Manages the running instances from creation to destruction.”””
# How long to wait in seconds before re-issuing a shutdown # signal to an instance during power off. The overall # time to wait is set by CONF.shutdown_timeout. SHUTDOWN_RETRY_INTERVAL = 10
# NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully # initialized before that happens. self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.use_legacy_block_device_info = self.driver.need_legacy_block_device_info