提交 e46b272b 编写于 作者: H Harvey Harrison 提交者: Linus Torvalds

md: replace remaining __FUNCTION__ occurrences

__FUNCTION__ is gcc-specific, use __func__
Signed-off-by: NHarvey Harrison <harvey.harrison@gmail.com>
Cc: Neil Brown <neilb@suse.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9a7b2b0f
...@@ -78,7 +78,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, ...@@ -78,7 +78,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
event = dm_uevent_alloc(md); event = dm_uevent_alloc(md);
if (!event) { if (!event) {
DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__); DMERR("%s: dm_uevent_alloc() failed", __func__);
goto err_nomem; goto err_nomem;
} }
...@@ -86,32 +86,32 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, ...@@ -86,32 +86,32 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
DMERR("%s: add_uevent_var() for DM_TARGET failed", DMERR("%s: add_uevent_var() for DM_TARGET failed",
__FUNCTION__); __func__);
goto err_add; goto err_add;
} }
if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
DMERR("%s: add_uevent_var() for DM_ACTION failed", DMERR("%s: add_uevent_var() for DM_ACTION failed",
__FUNCTION__); __func__);
goto err_add; goto err_add;
} }
if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
dm_next_uevent_seq(md))) { dm_next_uevent_seq(md))) {
DMERR("%s: add_uevent_var() for DM_SEQNUM failed", DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
__FUNCTION__); __func__);
goto err_add; goto err_add;
} }
if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__); DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
goto err_add; goto err_add;
} }
if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
nr_valid_paths)) { nr_valid_paths)) {
DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
__FUNCTION__); __func__);
goto err_add; goto err_add;
} }
...@@ -146,25 +146,25 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj) ...@@ -146,25 +146,25 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
if (dm_copy_name_and_uuid(event->md, event->name, if (dm_copy_name_and_uuid(event->md, event->name,
event->uuid)) { event->uuid)) {
DMERR("%s: dm_copy_name_and_uuid() failed", DMERR("%s: dm_copy_name_and_uuid() failed",
__FUNCTION__); __func__);
goto uevent_free; goto uevent_free;
} }
if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
DMERR("%s: add_uevent_var() for DM_NAME failed", DMERR("%s: add_uevent_var() for DM_NAME failed",
__FUNCTION__); __func__);
goto uevent_free; goto uevent_free;
} }
if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
DMERR("%s: add_uevent_var() for DM_UUID failed", DMERR("%s: add_uevent_var() for DM_UUID failed",
__FUNCTION__); __func__);
goto uevent_free; goto uevent_free;
} }
r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
if (r) if (r)
DMERR("%s: kobject_uevent_env failed", __FUNCTION__); DMERR("%s: kobject_uevent_env failed", __func__);
uevent_free: uevent_free:
dm_uevent_free(event); dm_uevent_free(event);
} }
...@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, ...@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
struct dm_uevent *event; struct dm_uevent *event;
if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type); DMERR("%s: Invalid event_type %d", __func__, event_type);
goto out; goto out;
} }
......
...@@ -433,7 +433,7 @@ static void ops_run_io(struct stripe_head *sh) ...@@ -433,7 +433,7 @@ static void ops_run_io(struct stripe_head *sh)
bi->bi_bdev = rdev->bdev; bi->bi_bdev = rdev->bdev;
pr_debug("%s: for %llu schedule op %ld on disc %d\n", pr_debug("%s: for %llu schedule op %ld on disc %d\n",
__FUNCTION__, (unsigned long long)sh->sector, __func__, (unsigned long long)sh->sector,
bi->bi_rw, i); bi->bi_rw, i);
atomic_inc(&sh->count); atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset; bi->bi_sector = sh->sector + rdev->data_offset;
...@@ -520,7 +520,7 @@ static void ops_complete_biofill(void *stripe_head_ref) ...@@ -520,7 +520,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
int i; int i;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
/* clear completed biofills */ /* clear completed biofills */
...@@ -569,7 +569,7 @@ static void ops_run_biofill(struct stripe_head *sh) ...@@ -569,7 +569,7 @@ static void ops_run_biofill(struct stripe_head *sh)
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
int i; int i;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
for (i = sh->disks; i--; ) { for (i = sh->disks; i--; ) {
...@@ -600,7 +600,7 @@ static void ops_complete_compute5(void *stripe_head_ref) ...@@ -600,7 +600,7 @@ static void ops_complete_compute5(void *stripe_head_ref)
int target = sh->ops.target; int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target]; struct r5dev *tgt = &sh->dev[target];
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
set_bit(R5_UPTODATE, &tgt->flags); set_bit(R5_UPTODATE, &tgt->flags);
...@@ -625,7 +625,7 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending) ...@@ -625,7 +625,7 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending)
int i; int i;
pr_debug("%s: stripe %llu block: %d\n", pr_debug("%s: stripe %llu block: %d\n",
__FUNCTION__, (unsigned long long)sh->sector, target); __func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
for (i = disks; i--; ) for (i = disks; i--; )
...@@ -653,7 +653,7 @@ static void ops_complete_prexor(void *stripe_head_ref) ...@@ -653,7 +653,7 @@ static void ops_complete_prexor(void *stripe_head_ref)
{ {
struct stripe_head *sh = stripe_head_ref; struct stripe_head *sh = stripe_head_ref;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
...@@ -670,7 +670,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) ...@@ -670,7 +670,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
/* existing parity data subtracted */ /* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
for (i = disks; i--; ) { for (i = disks; i--; ) {
...@@ -699,7 +699,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, ...@@ -699,7 +699,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
*/ */
int prexor = test_bit(STRIPE_OP_PREXOR, &pending); int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
for (i = disks; i--; ) { for (i = disks; i--; ) {
...@@ -744,7 +744,7 @@ static void ops_complete_postxor(void *stripe_head_ref) ...@@ -744,7 +744,7 @@ static void ops_complete_postxor(void *stripe_head_ref)
{ {
struct stripe_head *sh = stripe_head_ref; struct stripe_head *sh = stripe_head_ref;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
...@@ -757,7 +757,7 @@ static void ops_complete_write(void *stripe_head_ref) ...@@ -757,7 +757,7 @@ static void ops_complete_write(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref; struct stripe_head *sh = stripe_head_ref;
int disks = sh->disks, i, pd_idx = sh->pd_idx; int disks = sh->disks, i, pd_idx = sh->pd_idx;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
for (i = disks; i--; ) { for (i = disks; i--; ) {
...@@ -787,7 +787,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, ...@@ -787,7 +787,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
unsigned long flags; unsigned long flags;
dma_async_tx_callback callback; dma_async_tx_callback callback;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
/* check if prexor is active which means only process blocks /* check if prexor is active which means only process blocks
...@@ -837,7 +837,7 @@ static void ops_complete_check(void *stripe_head_ref) ...@@ -837,7 +837,7 @@ static void ops_complete_check(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref; struct stripe_head *sh = stripe_head_ref;
int pd_idx = sh->pd_idx; int pd_idx = sh->pd_idx;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
...@@ -859,7 +859,7 @@ static void ops_run_check(struct stripe_head *sh) ...@@ -859,7 +859,7 @@ static void ops_run_check(struct stripe_head *sh)
int count = 0, pd_idx = sh->pd_idx, i; int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
for (i = disks; i--; ) { for (i = disks; i--; ) {
...@@ -1759,7 +1759,7 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand) ...@@ -1759,7 +1759,7 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
locked++; locked++;
pr_debug("%s: stripe %llu locked: %d pending: %lx\n", pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
__FUNCTION__, (unsigned long long)sh->sector, __func__, (unsigned long long)sh->sector,
locked, sh->ops.pending); locked, sh->ops.pending);
return locked; return locked;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册