繼Linux Kernel(16.1)- platform_driver_register()之如何调用driver.probe()之後,我們來看如何由platform_device_register调用driver.probe()
int platform_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); arch_setup_pdev_archdata(pdev); return platform_device_add(pdev); } EXPORT_SYMBOL_GPL(platform_device_register); int platform_device_add(struct platform_device *pdev) { int i, ret; if (!pdev) return -EINVAL; if (!pdev->dev.parent) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; switch (pdev->id) { default: dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); break; case PLATFORM_DEVID_NONE: dev_set_name(&pdev->dev, "%s", pdev->name); break; case PLATFORM_DEVID_AUTO: /* * Automatically allocated device ID. We mark it as such so * that we remember it must be freed, and we append a suffix * to avoid namespace collision with explicit IDs. */ ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); if (ret < 0) goto err_out; pdev->id = ret; pdev->id_auto = true; dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); break; } for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(&pdev->dev); p = r->parent; if (!p) { if (resource_type(r) == IORESOURCE_MEM) p = &iomem_resource; else if (resource_type(r) == IORESOURCE_IO) p = &ioport_resource; } if (p && insert_resource(p, r)) { dev_err(&pdev->dev, "failed to claim resource %d\n", i); ret = -EBUSY; goto failed; } } pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(&pdev->dev), dev_name(pdev->dev.parent)); ret = device_add(&pdev->dev); if (ret == 0) return ret; failed: if (pdev->id_auto) { ida_simple_remove(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } while (--i >= 0) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } err_out: return ret; } int device_add(struct device *dev) { struct device *parent = NULL; struct kobject *kobj; struct class_interface *class_intf; int error = -EINVAL; dev = get_device(dev); if (!dev) goto done; if (!dev->p) { error = device_private_init(dev); if (error) goto done; } /* * for statically allocated devices, which should all be converted * some day, we need to initialize the name. We prevent reading back * the name, and force the use of dev_name() */ if (dev->init_name) { dev_set_name(dev, "%s", dev->init_name); dev->init_name = NULL; } /* subsystems can specify simple device enumeration */ if (!dev_name(dev) && dev->bus && dev->bus->dev_name) dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); if (!dev_name(dev)) { error = -EINVAL; goto name_error; } pr_debug("device: '%s': %s\n", dev_name(dev), __func__); parent = get_device(dev->parent); kobj = get_device_parent(dev, parent); if (kobj) dev->kobj.parent = kobj; /* use parent numa_node */ if (parent) set_dev_node(dev, dev_to_node(parent)); /* first, register with generic layer. */ /* we require the name to be set before, and pass NULL */ error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); if (error) goto Error; /* notify platform of device entry */ if (platform_notify) platform_notify(dev); error = device_create_file(dev, &dev_attr_uevent); if (error) goto attrError; error = device_add_class_symlinks(dev); if (error) goto SymlinkError; error = device_add_attrs(dev); if (error) goto AttrsError; error = bus_add_device(dev); if (error) goto BusError; error = dpm_sysfs_add(dev); if (error) goto DPMError; device_pm_add(dev); if (MAJOR(dev->devt)) { error = device_create_file(dev, &dev_attr_dev); if (error) goto DevAttrError; error = device_create_sys_dev_entry(dev); if (error) goto SysEntryError; devtmpfs_create_node(dev); } /* Notify clients of device addition. This call must come * after dpm_sysfs_add() and before kobject_uevent(). */ if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_ADD_DEVICE, dev); kobject_uevent(&dev->kobj, KOBJ_ADD); bus_probe_device(dev); if (parent) klist_add_tail(&dev->p->knode_parent, &parent->p->klist_children); if (dev->class) { mutex_lock(&dev->class->p->mutex); /* tie the class to the device */ klist_add_tail(&dev->knode_class, &dev->class->p->klist_devices); /* notify any interfaces that the device is here */ list_for_each_entry(class_intf, &dev->class->p->interfaces, node) if (class_intf->add_dev) class_intf->add_dev(dev, class_intf); mutex_unlock(&dev->class->p->mutex); } done: put_device(dev); return error; SysEntryError: if (MAJOR(dev->devt)) device_remove_file(dev, &dev_attr_dev); DevAttrError: device_pm_remove(dev); dpm_sysfs_remove(dev); DPMError: bus_remove_device(dev); BusError: device_remove_attrs(dev); AttrsError: device_remove_class_symlinks(dev); SymlinkError: device_remove_file(dev, &dev_attr_uevent); attrError: kobject_uevent(&dev->kobj, KOBJ_REMOVE); kobject_del(&dev->kobj); Error: cleanup_device_parent(dev); put_device(parent); name_error: kfree(dev->p); dev->p = NULL; goto done; } void bus_probe_device(struct device *dev) { struct bus_type *bus = dev->bus; struct subsys_interface *sif; if (!bus) return; if (bus->p->drivers_autoprobe) device_initial_probe(dev); mutex_lock(&bus->p->mutex); list_for_each_entry(sif, &bus->p->interfaces, node) if (sif->add_dev) sif->add_dev(dev, sif); mutex_unlock(&bus->p->mutex); } void device_initial_probe(struct device *dev) { __device_attach(dev, true); } static int __device_attach(struct device *dev, bool allow_async) { int ret = 0; device_lock(dev); if (dev->driver) { if (klist_node_attached(&dev->p->knode_driver)) { ret = 1; goto out_unlock; } ret = device_bind_driver(dev); if (ret == 0) ret = 1; else { dev->driver = NULL; ret = 0; } } else { struct device_attach_data data = { .dev = dev, .check_async = allow_async, .want_async = false, }; if (dev->parent) pm_runtime_get_sync(dev->parent); ret = bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); if (!ret && allow_async && data.have_async) { /* * If we could not find appropriate driver * synchronously and we are allowed to do * async probes and there are drivers that * want to probe asynchronously, we'll * try them. */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); async_schedule(__device_attach_async_helper, dev); } else { pm_request_idle(dev); } if (dev->parent) pm_runtime_put(dev->parent); } out_unlock: device_unlock(dev); return ret; } static int __device_attach_driver(struct device_driver *drv, void *_data) { struct device_attach_data *data = _data; struct device *dev = data->dev; bool async_allowed; /* * Check if device has already been claimed. This may * happen with driver loading, device discovery/registration, * and deferred probe processing happens all at once with * multiple threads. */ if (dev->driver) return -EBUSY; if (!driver_match_device(drv, dev)) return 0; async_allowed = driver_allows_async_probing(drv); if (async_allowed) data->have_async = true; if (data->check_async && async_allowed != data->want_async) return 0; return driver_probe_device(drv, dev); } static inline int driver_match_device(struct device_driver *drv, struct device *dev) { return drv->bus->match ? drv->bus->match(dev, drv) : 1; }