-
调用
/nova/api/openstack/compute/servers.py
文件中ServersController
类的create
方法# 主要是调用:self.compute_api.create(...) self.compute_api = compute.API() # compute.API()指向/nova/compute/__init__.py CELL_TYPE_TO_CLS_NAME = { 'api': 'nova.compute.cells_api.ComputeCellsAPI', 'compute': 'nova.compute.api.API', None: 'nova.compute.api.API' } def _get_compute_api_class_name(): """Returns the name of compute API class.""" cell_type = nova.cells.opts.get_cell_type() return CELL_TYPE_TO_CLS_NAME[cell_type] def API(*args, **kwargs): class_name = _get_compute_api_class_name() return importutils.import_object(class_name, *args, **kwargs) # nova.cells.opts.py def get_cell_type(): """Return the cell type, 'api', 'compute', or None (if cells is disabled). """ if not CONF.cells.enable: return return CONF.cells.cell_type
-
调用
/nova/compute/api.py
文件中API
类的create
方法# 属性验证 filter_properties = scheduler_utils.build_filter_properties( scheduler_hints, forced_host, forced_node, instance_type) ... # 最后调用 self._create_instance(...)
-
调用
/nova/compute/api.py
文件中API
类的_create_instance
方法# 获取镜像 image_id, boot_meta = self._get_image(context, image_href) # 获取实例数据 base_options, max_net_count, key_pair, security_groups, network_metadata = self._validate_and_build_base_options(...) # 获取块设备映射 block_device_mapping = self._check_and_transform_bdm(...) # 获取实例组 instance_group = self._get_requested_instance_group(...) # 提供实例数据 instances_to_build = self._provision_instances(...) # 生成创建实例参数 instances = [] request_specs = [] build_requests = [] for rs, build_request, im in instances_to_build: build_requests.append(build_request) instance = build_request.get_new_instance(context) instances.append(instance) request_specs.append(rs) # 默认的CONF.cells.enable为False if CONF.cells.enable: pass else: self.compute_task_api.schedule_and_build_instances(...) # 方法指向 from nova import conductor from nova.conductor import api as conductor_api ComputeTaskAPI = conductor_api.ComputeTaskAPI self.compute_task_api = conductor.ComputeTaskAPI()
-
调用
/nova/conductor/api.py
文件中ComputeTaskAPI
类的schedule_and_build_instances
方法# 直接调用 self.conductor_compute_rpcapi.schedule_and_build_instances(...) # 方法指向 from nova.conductor import rpcapi self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
-
调用
/nova/conductor/rpcapi.py
文件中ComputeTaskAPI
类的schedule_and_build_instances
方法cctxt.cast(context, 'schedule_and_build_instances', **kw) # 通过rpc服务将数据发送到mq队列中
-
调用
/nova/conductor/manager.py
文件中ComputeTaskManager
类的schedule_and_build_instances
方法# 获取主机列表 host_lists = self._schedule_instances(context, request_specs[0], instance_uuids, return_alternates=True) # 通过for循环获取实例创建节点 for (build_request, request_spec, host_list) in six.moves.zip(build_requests, request_specs, host_lists): .... # 获取cell cell = host_mapping.cell_mapping # ....调度 # 调用/nova/compute/utils.py下的check_num_instances_quota方法检查配额 compute_utils.check_num_instances_quota(...) # 直接调用 self.compute_rpcapi.build_and_run_instance(...) # 方法指向 from nova.compute import rpcapi as compute_rpcapi self.compute_rpcapi = compute_rpcapi.ComputeAPI()
-
调用
/nova/compute/rpcapi.py
文件中ComputeAPI
类的build_and_run_instance
方法cctxt.cast(ctxt, 'build_and_run_instance', **kwargs) # 通过rpc服务将数据发送到mq队列中
-
调用
/nova/compute/manager.py
文件中ComputeManager
类下的build_and_run_instance
方法# 执行下面的方法,其实就是启动eventlet协程 utils.spawn_n(_locked_do_build_and_run_instance, ....) # 然后执行 _locked_do_build_and_run_instance(...) # 最后执行 result = self._do_build_and_run_instance(*args, **kwargs)
-
调用
/nova/compute/manager.py
文件中ComputeManager
类下的_do_build_and_run_instance
方法# 实例状态更新 instance.vm_state = vm_states.BUILDING instance.task_state = None instance.save(expected_task_state=(task_states.SCHEDULING, None)) # 最后执行 self._build_and_run_instance(...)
-
调用
/nova/compute/manager.py
文件中ComputeManager
类下的_build_and_run_instance
方法# 通知数据库操作 self._notify_about_instance_usage(...) compute_utils.notify_about_instance_create(...) # 获取请求数据 request_group_resource_providers_mapping = self._get_request_group_mapping(request_spec) # 获取调度数据 scheduler_hints = self._get_scheduler_hints(filter_properties, request_spec) # 调用上下文管理器方法 with self._build_resources(...) as resources: ... with timeutils.StopWatch() as timer: # 生成实例,调用/nova/virt/libvirt/driver.py下的spawn方法 self.driver.spawn(...)
-
调用
/nova/compute/manager.py
文件中ComputeManager
类下的_build_resources
方法# 准备网络数据 network_info = self._build_networks_for_instance( context, instance, requested_networks, security_groups, resource_provider_mapping ) resources['network_info'] = network_info # 准备块设备 block_device_info = self._prep_block_device(context, instance, block_device_mapping) -> 调用`/nova/compute/manager.py`中`ComputeManager`类下`_prep_block_device`方法 -> 调用`/nova/virt/block_device.py`中`attach_block_devices`方法 def attch_block_devices(block_device_mapping, *attach_args, **attach_kwargs): def _log_and_attch(bdm): bdm.attch(*attach_args, **attach_kwargs) for device in block_device_mapping: _log_and_attach(device) return block_device_mapping -> # for循环中分别是执行`/nova/virt/block_device.py`下`DriverVolumeBlockDevice`, # `DriverVolSnapshotBlockDevice`,`DriverVolImageBlockDevice`,`DriverVolBlankBlockDevice`等类 # 下`attch`方法,按需执行,不一定四个`attch`方法全部执行,根据参数确定执行哪些。 # 下面以`DriverVolumeBlockDevice`下的`attch`方法为例说明。 -> 调用`/nova/virt/block_device.py`下`DriverVolumeBlockDevice`类的`attach`方法 # 主要是执行self._do_attach(...)方法。 # self._do_attch()方法下主要是执行self._volume_attach(...)或self._legacy_volume_attach(...)方法。 # self._volume_attach(...)或self._legacy_volume_attach(...)方法主要执行了 # virt_driver.attach_volume(...)方法。 # 由于使用的是livirt驱动方式,所以virt_driver即为/nova/virt/libvirt/dirver.py下的LibvirtDriver类。 -> 即调用`LibvirtDriver`类下的`attach_volume(...)`方法 # 主要是调用self._connect_volume(context, connection_info, instance, encryption=encryption) # 获取卷驱动:vol_driver = self._get_volume_driver(connection_info) # 连接卷:vol_driver.connect_volume(connection_info, instance) -> # 加密连接:self._attach_encryptor(context, connection_info, encryption, allow_native_luks) -> # 执行encryptor._format_volume(...)或encryptor.attach_volume(...)方法进行卷处理 -> 最终通过eventlet.event进行处理,主要是数据库操作,关联镜像,网络和存储数据
-
调用
/nova/virt/libvirit/driver.py
文件中LibvirtDriver
类下的spawn
方法# 获取磁盘数据 disk_info = blockinfo.get_disk_info(...) # 注入数据处理 injection_info = InjectionInfo(...) # 配置数据源处理 gen_confdrive = functools.partial(self._create_configdrive, ...) -> cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md) -> with cdb: ... cdb.make_drive(config_disk_local_path) -> 调用/nova/virt/configdrive.py文件中ConfigDriveBuilder泪下的make_drive方法 # CONF.config_drive_format默认值为iso9660 # 执行self._make_iso9660(path, tmpdir) # 最终调用processutils.execute(...)方法 # from oslo_concurrency import processutils # 虚拟机路径,磁盘处理 created_instance_dir, created_disks = self._create_image(...) # 获取实例用户的xml格式数据 xml = self._get_guest_xml(...) # 分配虚拟cpu介导设备 mdevs = self._allocate_mdevs(allocations) # 生成一个xml文件 xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, mdevs=mdevs) # 域和网路处理 self._create_domain_and_network(context, xml, ...)
-
调用
/nova/virt/libvirit/driver.py
文件中LibvirtDriver
类下的_create_domain_and_network
方法# 执行下面方法 guest = self._create_domain(xml, pause=pause, power_on=power_on, post_xml_callback=post_xml_callback) # /nova/virt/libvirt/guest.py文件下 guest = libvirt_guest.Guest.create(xml, self._host) # /nova/virt/libvirt/host.py文件下 guest = host.write_instance_config(xml) domain = self.get_connection().defineXML(xml) # 相当于执行命令:virsh define ***.xml # 此时已经创建了虚拟机,启动虚拟机时执行命令:virsh start ***