提交 a18d783f 编写于 作者: L Linus Torvalds

Merge tag 'driver-core-4.19-rc1' of...

Merge tag 'driver-core-4.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
 "Here are all of the driver core and related patches for 4.19-rc1.

  Nothing huge here, just a number of small cleanups and the ability to
  now stop the deferred probing after init happens.

  All of these have been in linux-next for a while with only a merge
  issue reported"

* tag 'driver-core-4.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (21 commits)
  base: core: Remove WARN_ON from link dependencies check
  drivers/base: stop new probing during shutdown
  drivers: core: Remove glue dirs from sysfs earlier
  driver core: remove unnecessary function extern declare
  sysfs.h: fix non-kernel-doc comment
  PM / Domains: Stop deferring probe at the end of initcall
  iommu: Remove IOMMU_OF_DECLARE
  iommu: Stop deferring probe at end of initcalls
  pinctrl: Support stopping deferred probe after initcalls
  dt-bindings: pinctrl: add a 'pinctrl-use-default' property
  driver core: allow stopping deferred probe after init
  driver core: add a debugfs entry to show deferred devices
  sysfs: Fix internal_create_group() for named group updates
  base: fix order of OF initialization
  linux/device.h: fix kernel-doc notation warning
  Documentation: update firmware loader fallback reference
  kobject: Replace strncpy with memcpy
  drivers: base: cacheinfo: use OF property_read_u32 instead of get_property,read_number
  kernfs: Replace strncpy with memcpy
  device: Add #define dev_fmt similar to #define pr_fmt
  ...
......@@ -812,6 +812,15 @@
Defaults to the default architecture's huge page size
if not specified.
deferred_probe_timeout=
[KNL] Debugging option to set a timeout in seconds for
deferred probe to give up waiting on dependencies to
probe. Only specific dependencies (subsystems or
drivers) that have opted in will be ignored. A timeout of 0
will timeout at the end of initcalls. This option will also
dump out devices still on the deferred probe list after
retrying.
dhash_entries= [KNL]
Set number of hash buckets for dentry cache.
......
......@@ -103,6 +103,12 @@ Optional properties:
#pinctrl-cells: Number of pin control cells in addition to the index within the
pin controller device instance
pinctrl-use-default: Boolean. Indicates that the OS can use the boot default
pin configuration. This allows using an OS that does not have a
driver for the pin controller. This property can be set either
globally for the pin controller or in child nodes for individual
pin group control.
Pin controller devices should contain the pin configuration nodes that client
devices reference.
......
......@@ -92,7 +92,7 @@ the loading file.
The firmware device used to help load firmware using sysfs is only created if
direct firmware loading fails and if the fallback mechanism is enabled for your
firmware request, this is set up with fw_load_from_user_helper(). It is
firmware request, this is set up with :c:func:`firmware_fallback_sysfs`. It is
important to re-iterate that no device is created if a direct filesystem lookup
succeeded.
......@@ -108,6 +108,11 @@ firmware_data_read() and firmware_loading_show() are just provided for the
test_firmware driver for testing, they are not called in normal use or
expected to be used regularly by userspace.
firmware_fallback_sysfs
-----------------------
.. kernel-doc:: drivers/base/firmware_loader/fallback.c
:functions: firmware_fallback_sysfs
Firmware kobject uevent fallback mechanism
==========================================
......
......@@ -84,8 +84,6 @@ struct device_private {
#define to_device_private_bus(obj) \
container_of(obj, struct device_private, knode_bus)
extern int device_private_init(struct device *dev);
/* initialisation functions */
extern int devices_init(void);
extern int buses_init(void);
......
......@@ -74,52 +74,48 @@ static inline int get_cacheinfo_idx(enum cache_type type)
static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *cache_size;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
cache_size = of_get_property(np, propname, NULL);
if (cache_size)
this_leaf->size = of_read_number(cache_size, 1);
if (of_property_read_u32(np, propname, &this_leaf->size))
this_leaf->size = 0;
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static void cache_get_line_size(struct cacheinfo *this_leaf,
struct device_node *np)
{
const __be32 *line_size;
int i, lim, ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
for (i = 0; i < lim; i++) {
int ret;
u32 line_size;
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
line_size = of_get_property(np, propname, NULL);
if (line_size)
ret = of_property_read_u32(np, propname, &line_size);
if (!ret) {
this_leaf->coherency_line_size = line_size;
break;
}
}
if (line_size)
this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *nr_sets;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
nr_sets = of_get_property(np, propname, NULL);
if (nr_sets)
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
this_leaf->number_of_sets = 0;
}
static void cache_associativity(struct cacheinfo *this_leaf)
......
......@@ -105,7 +105,7 @@ static int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
if (WARN_ON(dev == target))
if (dev == target)
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
......@@ -113,7 +113,7 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (WARN_ON(link->consumer == target))
if (link->consumer == target)
return 1;
ret = device_is_dependent(link->consumer, target);
......@@ -1647,6 +1647,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
return;
mutex_lock(&gdp_mutex);
if (!kobject_has_children(glue_dir))
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
......@@ -1786,7 +1788,7 @@ static void device_remove_sys_dev_entry(struct device *dev)
}
}
int device_private_init(struct device *dev)
static int device_private_init(struct device *dev)
{
dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
if (!dev->p)
......@@ -2859,6 +2861,9 @@ void device_shutdown(void)
{
struct device *dev, *parent;
wait_for_device_probe();
device_block_probing();
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
......@@ -3052,12 +3057,12 @@ void func(const struct device *dev, const char *fmt, ...) \
} \
EXPORT_SYMBOL(func);
define_dev_printk_level(dev_emerg, KERN_EMERG);
define_dev_printk_level(dev_alert, KERN_ALERT);
define_dev_printk_level(dev_crit, KERN_CRIT);
define_dev_printk_level(dev_err, KERN_ERR);
define_dev_printk_level(dev_warn, KERN_WARNING);
define_dev_printk_level(dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_emerg, KERN_EMERG);
define_dev_printk_level(_dev_alert, KERN_ALERT);
define_dev_printk_level(_dev_crit, KERN_CRIT);
define_dev_printk_level(_dev_err, KERN_ERR);
define_dev_printk_level(_dev_warn, KERN_WARNING);
define_dev_printk_level(_dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_info, KERN_INFO);
#endif
......
......@@ -16,6 +16,7 @@
* Copyright (c) 2007-2009 Novell Inc.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
......@@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static struct dentry *deferred_devices;
static bool initcalls_done;
/*
......@@ -62,26 +64,6 @@ static bool initcalls_done;
*/
static bool defer_all_probes;
/*
* For initcall_debug, show the deferred probes executed in late_initcall
* processing.
*/
static void deferred_probe_debug(struct device *dev)
{
ktime_t calltime, delta, rettime;
unsigned long long duration;
printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
task_pid_nr(current));
calltime = ktime_get();
bus_probe_device(dev);
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
dev_name(dev), duration);
}
/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
......@@ -125,11 +107,7 @@ static void deferred_probe_work_func(struct work_struct *work)
device_pm_move_to_tail(dev);
dev_dbg(dev, "Retrying from deferred list\n");
if (initcall_debug && !initcalls_done)
deferred_probe_debug(dev);
else
bus_probe_device(dev);
bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
put_device(dev);
......@@ -224,6 +202,69 @@ void device_unblock_probing(void)
driver_deferred_probe_trigger();
}
/*
* deferred_devs_show() - Show the devices in the deferred probe pending list.
*/
static int deferred_devs_show(struct seq_file *s, void *data)
{
struct device_private *curr;
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
seq_printf(s, "%s\n", dev_name(curr->device));
mutex_unlock(&deferred_probe_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
static int deferred_probe_timeout = -1;
static int __init deferred_probe_timeout_setup(char *str)
{
deferred_probe_timeout = simple_strtol(str, NULL, 10);
return 1;
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
/**
* driver_deferred_probe_check_state() - Check deferred probe state
* @dev: device to check
*
* Returns -ENODEV if init is done and all built-in drivers have had a chance
* to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug
* timeout has expired, or -EPROBE_DEFER if none of those conditions are met.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
*/
int driver_deferred_probe_check_state(struct device *dev)
{
if (initcalls_done) {
if (!deferred_probe_timeout) {
dev_WARN(dev, "deferred probe timeout, ignoring dependency");
return -ETIMEDOUT;
}
dev_warn(dev, "ignoring dependency for device, assuming no driver");
return -ENODEV;
}
return -EPROBE_DEFER;
}
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *private, *p;
deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
dev_info(private->device, "deferred probe pending");
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
......@@ -233,15 +274,36 @@ void device_unblock_probing(void)
*/
static int deferred_probe_initcall(void)
{
deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
NULL, &deferred_devs_fops);
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
initcalls_done = true;
/*
* Trigger deferred probe again, this time we won't defer anything
* that is optional
*/
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
if (deferred_probe_timeout > 0) {
schedule_delayed_work(&deferred_probe_timeout_work,
deferred_probe_timeout * HZ);
}
return 0;
}
late_initcall(deferred_probe_initcall);
static void __exit deferred_probe_exit(void)
{
debugfs_remove_recursive(deferred_devices);
}
__exitcall(deferred_probe_exit);
/**
* device_is_bound() - Check if device is bound to a driver
* @dev: device to check
......@@ -519,6 +581,23 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
/*
* For initcall_debug, show the driver probe time.
*/
static int really_probe_debug(struct device *dev, struct device_driver *drv)
{
ktime_t calltime, delta, rettime;
int ret;
calltime = ktime_get();
ret = really_probe(dev, drv);
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
dev_name(dev), ret, (s64) ktime_to_us(delta));
return ret;
}
/**
* driver_probe_done
* Determine if the probe sequence is finished or not.
......@@ -577,7 +656,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pm_runtime_get_sync(dev->parent);
pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
if (initcall_debug)
ret = really_probe_debug(dev, drv);
else
ret = really_probe(dev, drv);
pm_request_idle(dev);
if (dev->parent)
......
......@@ -30,9 +30,9 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
of_core_init();
platform_bus_init();
cpu_dev_init();
memory_dev_init();
container_dev_init();
of_core_init();
}
......@@ -2253,7 +2253,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
return -EPROBE_DEFER;
return driver_deferred_probe_check_state(dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
......
......@@ -2915,8 +2915,6 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3");
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
......@@ -2211,13 +2211,6 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1");
IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2");
IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400");
IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401");
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500");
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2");
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
......@@ -1390,5 +1390,3 @@ static int __init exynos_iommu_init(void)
return ret;
}
core_initcall(exynos_iommu_init);
IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu");
......@@ -1108,9 +1108,6 @@ static void __exit ipmmu_exit(void)
subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);
IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_LICENSE("GPL v2");
......@@ -877,7 +877,5 @@ static void __exit msm_iommu_driver_exit(void)
subsys_initcall(msm_iommu_driver_init);
module_exit(msm_iommu_driver_exit);
IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
......@@ -27,9 +27,6 @@
#define NO_IOMMU 1
static const struct of_device_id __iommu_of_table_sentinel
__used __section(__iommu_of_table_end);
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
*
......@@ -98,19 +95,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
static bool of_iommu_driver_present(struct device_node *np)
{
/*
* If the IOMMU still isn't ready by the time we reach init, assume
* it never will be. We don't want to defer indefinitely, nor attempt
* to dereference __iommu_of_table after it's been freed.
*/
if (system_state >= SYSTEM_RUNNING)
return false;
return of_match_node(&__iommu_of_table, np);
}
static int of_iommu_xlate(struct device *dev,
struct of_phandle_args *iommu_spec)
{
......@@ -120,8 +104,7 @@ static int of_iommu_xlate(struct device *dev,
ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np) ||
(!ops && !of_iommu_driver_present(iommu_spec->np)))
!of_device_is_available(iommu_spec->np))
return NO_IOMMU;
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
......@@ -133,7 +116,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
return -EPROBE_DEFER;
return driver_deferred_probe_check_state(dev);
return ops->of_xlate(dev, iommu_spec);
}
......
......@@ -945,7 +945,5 @@ static void __exit qcom_iommu_exit(void)
module_init(qcom_iommu_init);
module_exit(qcom_iommu_exit);
IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1");
MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
MODULE_LICENSE("GPL v2");
......@@ -1284,8 +1284,6 @@ static int __init rk_iommu_init(void)
}
subsys_initcall(rk_iommu_init);
IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
MODULE_DESCRIPTION("IOMMU API for Rockchip");
MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
MODULE_ALIAS("platform:rockchip-iommu");
......
......@@ -111,17 +111,24 @@ static int dt_to_map_one_config(struct pinctrl *p,
int ret;
struct pinctrl_map *map;
unsigned num_maps;
bool allow_default = false;
/* Find the pin controller containing np_config */
np_pctldev = of_node_get(np_config);
for (;;) {
if (!allow_default)
allow_default = of_property_read_bool(np_pctldev,
"pinctrl-use-default");
np_pctldev = of_get_next_parent(np_pctldev);
if (!np_pctldev || of_node_is_root(np_pctldev)) {
dev_info(p->dev, "could not find pctldev for node %pOF, deferring probe\n",
np_config);
of_node_put(np_pctldev);
/* OK let's just assume this will appear later then */
return -EPROBE_DEFER;
ret = driver_deferred_probe_check_state(p->dev);
/* keep deferring if modules are enabled unless we've timed out */
if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret == -ENODEV)
ret = -EPROBE_DEFER;
return ret;
}
/* If we're creating a hog we can use the passed pctldev */
if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
......
......@@ -97,7 +97,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
int slen = strlen(kn->name);
len -= slen;
strncpy(s + len, kn->name, slen);
memcpy(s + len, kn->name, slen);
if (len)
s[--len] = '/';
......
......@@ -124,13 +124,22 @@ static int internal_create_group(struct kobject *kobj, int update,
}
kobject_get_ownership(kobj, &uid, &gid);
if (grp->name) {
kn = kernfs_create_dir_ns(kobj->sd, grp->name,
S_IRWXU | S_IRUGO | S_IXUGO,
uid, gid, kobj, NULL);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(kobj->sd, grp->name);
return PTR_ERR(kn);
if (update) {
kn = kernfs_find_and_get(kobj->sd, grp->name);
if (!kn) {
pr_warn("Can't update unknown attr grp name: %s/%s\n",
kobj->name, grp->name);
return -EINVAL;
}
} else {
kn = kernfs_create_dir_ns(kobj->sd, grp->name,
S_IRWXU | S_IRUGO | S_IXUGO,
uid, gid, kobj, NULL);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(kobj->sd, grp->name);
return PTR_ERR(kn);
}
}
} else
kn = kobj->sd;
......@@ -141,6 +150,10 @@ static int internal_create_group(struct kobject *kobj, int update,
kernfs_remove(kn);
}
kernfs_put(kn);
if (grp->name && update)
kernfs_put(kn);
return error;
}
......@@ -205,7 +218,8 @@ EXPORT_SYMBOL_GPL(sysfs_create_groups);
* of the attribute files being created already exist. Furthermore,
* if the visibility of the files has changed through the is_visible()
* callback, it will update the permissions and add or remove the
* relevant files.
* relevant files. Changing a group's name (subdirectory name under
* kobj's directory in sysfs) is not allowed.
*
* The primary use for this function is to call it after making a change
* that affects group visibility.
......
......@@ -218,7 +218,6 @@
#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
......@@ -601,7 +600,6 @@
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
TIMER_OF_TABLES() \
IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
CPUIDLE_METHOD_OF_TABLES() \
KERNEL_DTB() \
......
......@@ -339,6 +339,8 @@ struct device *driver_find_device(struct device_driver *drv,
struct device *start, void *data,
int (*match)(struct device *dev, void *data));
int driver_deferred_probe_check_state(struct device *dev);
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
......@@ -1329,30 +1331,34 @@ struct device_link *device_link_add(struct device *consumer,
void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
#ifndef dev_fmt
#define dev_fmt(fmt) fmt
#endif
#ifdef CONFIG_PRINTK
extern __printf(3, 0)
__printf(3, 0)
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args);
extern __printf(3, 4)
__printf(3, 4)
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
extern __printf(3, 4)
__printf(3, 4)
void dev_printk(const char *level, const struct device *dev,
const char *fmt, ...);
extern __printf(2, 3)
void dev_emerg(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
void dev_alert(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
void dev_crit(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
void dev_err(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
void dev_warn(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
void dev_notice(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
__printf(2, 3)
void _dev_emerg(const struct device *dev, const char *fmt, ...);
__printf(2, 3)
void _dev_alert(const struct device *dev, const char *fmt, ...);
__printf(2, 3)
void _dev_crit(const struct device *dev, const char *fmt, ...);
__printf(2, 3)
void _dev_err(const struct device *dev, const char *fmt, ...);
__printf(2, 3)
void _dev_warn(const struct device *dev, const char *fmt, ...);
__printf(2, 3)
void _dev_notice(const struct device *dev, const char *fmt, ...);
__printf(2, 3)
void _dev_info(const struct device *dev, const char *fmt, ...);
#else
......@@ -1370,26 +1376,26 @@ static inline void __dev_printk(const char *level, const struct device *dev,
{}
static inline __printf(3, 4)
void dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
const char *fmt, ...)
{}
static inline __printf(2, 3)
void dev_emerg(const struct device *dev, const char *fmt, ...)
void _dev_emerg(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void dev_crit(const struct device *dev, const char *fmt, ...)
void _dev_crit(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void dev_alert(const struct device *dev, const char *fmt, ...)
void _dev_alert(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void dev_err(const struct device *dev, const char *fmt, ...)
void _dev_err(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void dev_warn(const struct device *dev, const char *fmt, ...)
void _dev_warn(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void dev_notice(const struct device *dev, const char *fmt, ...)
void _dev_notice(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void _dev_info(const struct device *dev, const char *fmt, ...)
......@@ -1398,27 +1404,36 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#endif
/*
* Stupid hackaround for existing uses of non-printk uses dev_info
*
* Note that the definition of dev_info below is actually _dev_info
* and a macro is used to avoid redefining dev_info
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
#define dev_emerg(dev, fmt, ...) \
_dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_crit(dev, fmt, ...) \
_dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_alert(dev, fmt, ...) \
_dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_err(dev, fmt, ...) \
_dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_warn(dev, fmt, ...) \
_dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_notice(dev, fmt, ...) \
_dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_info(dev, fmt, ...) \
_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) \
do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
#define dev_dbg(dev, fmt, ...) \
dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#elif defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG, dev, format, ##arg)
#define dev_dbg(dev, fmt, ...) \
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#else
#define dev_dbg(dev, format, arg...) \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
#define dev_dbg(dev, fmt, ...) \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
})
#endif
......@@ -1490,7 +1505,7 @@ do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
__dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
......@@ -1500,23 +1515,23 @@ do { \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
if (0) \
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#endif
#ifdef VERBOSE_DEBUG
#define dev_vdbg dev_dbg
#else
#define dev_vdbg(dev, format, arg...) \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
#define dev_vdbg(dev, fmt, ...) \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
})
#endif
......
......@@ -119,6 +119,23 @@ extern void kobject_get_ownership(struct kobject *kobj,
kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
/**
* kobject_has_children - Returns whether a kobject has children.
* @kobj: the object to test
*
* This will return whether a kobject has other kobjects as children.
*
* It does NOT account for the presence of attribute files, only sub
* directories. It also assumes there is no concurrent addition or
* removal of such children, and thus relies on external locking.
*/
static inline bool kobject_has_children(struct kobject *kobj)
{
WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
return kobj->sd && kobj->sd->dir.subdirs;
}
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
......
......@@ -32,8 +32,4 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
extern struct of_device_id __iommu_of_table;
#define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL)
#endif /* __OF_IOMMU_H */
......@@ -91,9 +91,9 @@ struct attribute_group {
struct bin_attribute **bin_attrs;
};
/**
* Use these macros to make defining attributes easier. See include/linux/device.h
* for examples..
/*
* Use these macros to make defining attributes easier.
* See include/linux/device.h for examples..
*/
#define SYSFS_PREALLOC 010000
......
......@@ -144,7 +144,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
strncpy(path + length, kobject_name(parent), cur);
memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册