host.c 9.4 KB
Newer Older
1 2 3 4
/*
 *  linux/drivers/mmc/core/host.c
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
P
Pierre Ossman 已提交
5
 *  Copyright (C) 2007-2008 Pierre Ossman
6
 *  Copyright (C) 2010 Linus Walleij
7 8 9 10 11 12 13 14 15 16 17 18
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  MMC host class device management
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/pagemap.h>
P
Pierre Ossman 已提交
19
#include <linux/leds.h>
20
#include <linux/slab.h>
21
#include <linux/suspend.h>
22 23

#include <linux/mmc/host.h>
24
#include <linux/mmc/card.h>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54

#include "core.h"
#include "host.h"

#define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)

static void mmc_host_classdev_release(struct device *dev)
{
	struct mmc_host *host = cls_dev_to_mmc_host(dev);
	kfree(host);
}

static struct class mmc_host_class = {
	.name		= "mmc_host",
	.dev_release	= mmc_host_classdev_release,
};

int mmc_register_host_class(void)
{
	return class_register(&mmc_host_class);
}

void mmc_unregister_host_class(void)
{
	class_unregister(&mmc_host_class);
}

static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
#ifdef CONFIG_MMC_CLKGATE

/*
 * Enabling clock gating will make the core call out to the host
 * once up and once down when it performs a request or card operation
 * intermingled in any fashion. The driver will see this through
 * set_ios() operations with ios.clock field set to 0 to gate (disable)
 * the block clock, and to the old frequency to enable it again.
 */
static void mmc_host_clk_gate_delayed(struct mmc_host *host)
{
	unsigned long tick_ns;
	unsigned long freq = host->ios.clock;
	unsigned long flags;

	if (!freq) {
		pr_debug("%s: frequency set to 0 in disable function, "
			 "this means the clock is already disabled.\n",
			 mmc_hostname(host));
		return;
	}
	/*
	 * New requests may have appeared while we were scheduling,
	 * then there is no reason to delay the check before
	 * clk_disable().
	 */
	spin_lock_irqsave(&host->clk_lock, flags);

	/*
	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
	 * to disable the MCI block clock. The reference count may have
	 * gone up again after this delay due to rescheduling!
	 */
	if (!host->clk_requests) {
		spin_unlock_irqrestore(&host->clk_lock, flags);
		tick_ns = DIV_ROUND_UP(1000000000, freq);
		ndelay(host->clk_delay * tick_ns);
	} else {
		/* New users appeared while waiting for this work */
		spin_unlock_irqrestore(&host->clk_lock, flags);
		return;
	}
97
	mutex_lock(&host->clk_gate_mutex);
98 99 100 101 102 103 104 105 106
	spin_lock_irqsave(&host->clk_lock, flags);
	if (!host->clk_requests) {
		spin_unlock_irqrestore(&host->clk_lock, flags);
		/* This will set host->ios.clock to 0 */
		mmc_gate_clock(host);
		spin_lock_irqsave(&host->clk_lock, flags);
		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
	}
	spin_unlock_irqrestore(&host->clk_lock, flags);
107
	mutex_unlock(&host->clk_gate_mutex);
108 109 110 111 112 113 114 115 116 117 118 119 120 121
}

/*
 * Internal work. Work to disable the clock at some later point.
 */
static void mmc_host_clk_gate_work(struct work_struct *work)
{
	struct mmc_host *host = container_of(work, struct mmc_host,
					      clk_gate_work);

	mmc_host_clk_gate_delayed(host);
}

/**
122
 *	mmc_host_clk_hold - ungate hardware MCI clocks
123 124 125 126 127 128
 *	@host: host to ungate.
 *
 *	Makes sure the host ios.clock is restored to a non-zero value
 *	past this call.	Increase clock reference count and ungate clock
 *	if we're the first user.
 */
129
void mmc_host_clk_hold(struct mmc_host *host)
130 131 132
{
	unsigned long flags;

133
	mutex_lock(&host->clk_gate_mutex);
134 135 136 137 138 139 140 141 142
	spin_lock_irqsave(&host->clk_lock, flags);
	if (host->clk_gated) {
		spin_unlock_irqrestore(&host->clk_lock, flags);
		mmc_ungate_clock(host);
		spin_lock_irqsave(&host->clk_lock, flags);
		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
	}
	host->clk_requests++;
	spin_unlock_irqrestore(&host->clk_lock, flags);
143
	mutex_unlock(&host->clk_gate_mutex);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
}

/**
 *	mmc_host_may_gate_card - check if this card may be gated
 *	@card: card to check.
 */
static bool mmc_host_may_gate_card(struct mmc_card *card)
{
	/* If there is no card we may gate it */
	if (!card)
		return true;
	/*
	 * Don't gate SDIO cards! These need to be clocked at all times
	 * since they may be independent systems generating interrupts
	 * and other events. The clock requests counter from the core will
	 * go down to zero since the core does not need it, but we will not
	 * gate the clock, because there is somebody out there that may still
	 * be using it.
	 */
163
	return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
164 165 166
}

/**
167
 *	mmc_host_clk_release - gate off hardware MCI clocks
168 169 170 171 172 173
 *	@host: host to gate.
 *
 *	Calls the host driver with ios.clock set to zero as often as possible
 *	in order to gate off hardware MCI clocks. Decrease clock reference
 *	count and schedule disabling of clock.
 */
174
void mmc_host_clk_release(struct mmc_host *host)
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
{
	unsigned long flags;

	spin_lock_irqsave(&host->clk_lock, flags);
	host->clk_requests--;
	if (mmc_host_may_gate_card(host->card) &&
	    !host->clk_requests)
		schedule_work(&host->clk_gate_work);
	spin_unlock_irqrestore(&host->clk_lock, flags);
}

/**
 *	mmc_host_clk_rate - get current clock frequency setting
 *	@host: host to get the clock frequency for.
 *
 *	Returns current clock frequency regardless of gating.
 */
unsigned int mmc_host_clk_rate(struct mmc_host *host)
{
	unsigned long freq;
	unsigned long flags;

	spin_lock_irqsave(&host->clk_lock, flags);
	if (host->clk_gated)
		freq = host->clk_old;
	else
		freq = host->ios.clock;
	spin_unlock_irqrestore(&host->clk_lock, flags);
	return freq;
}

/**
 *	mmc_host_clk_init - set up clock gating code
 *	@host: host with potential clock to control
 */
static inline void mmc_host_clk_init(struct mmc_host *host)
{
	host->clk_requests = 0;
	/* Hold MCI clock for 8 cycles by default */
	host->clk_delay = 8;
	host->clk_gated = false;
	INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
	spin_lock_init(&host->clk_lock);
218
	mutex_init(&host->clk_gate_mutex);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
}

/**
 *	mmc_host_clk_exit - shut down clock gating code
 *	@host: host with potential clock to control
 */
static inline void mmc_host_clk_exit(struct mmc_host *host)
{
	/*
	 * Wait for any outstanding gate and then make sure we're
	 * ungated before exiting.
	 */
	if (cancel_work_sync(&host->clk_gate_work))
		mmc_host_clk_gate_delayed(host);
	if (host->clk_gated)
234
		mmc_host_clk_hold(host);
235 236
	/* There should be only one user now */
	WARN_ON(host->clk_requests > 1);
237 238 239 240 241 242 243 244 245 246 247 248 249 250
}

#else

static inline void mmc_host_clk_init(struct mmc_host *host)
{
}

static inline void mmc_host_clk_exit(struct mmc_host *host)
{
}

#endif

251 252 253 254 255 256 257 258 259
/**
 *	mmc_alloc_host - initialise the per-host structure.
 *	@extra: sizeof private data structure
 *	@dev: pointer to host device model structure
 *
 *	Initialise the per-host structure.
 */
struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
P
Pierre Ossman 已提交
260
	int err;
261 262
	struct mmc_host *host;

P
Pierre Ossman 已提交
263 264 265
	if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
		return NULL;

266
	host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
267 268 269
	if (!host)
		return NULL;

P
Pierre Ossman 已提交
270 271 272 273 274 275
	spin_lock(&mmc_host_lock);
	err = idr_get_new(&mmc_host_idr, host, &host->index);
	spin_unlock(&mmc_host_lock);
	if (err)
		goto free;

276
	dev_set_name(&host->class_dev, "mmc%d", host->index);
P
Pierre Ossman 已提交
277

278 279 280 281 282
	host->parent = dev;
	host->class_dev.parent = dev;
	host->class_dev.class = &mmc_host_class;
	device_initialize(&host->class_dev);

283 284
	mmc_host_clk_init(host);

285 286 287
	spin_lock_init(&host->lock);
	init_waitqueue_head(&host->wq);
	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
288
	INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
289
#ifdef CONFIG_PM
290
	host->pm_notify.notifier_call = mmc_pm_notify;
291
#endif
292 293 294 295 296

	/*
	 * By default, hosts do not support SGIO or large requests.
	 * They have to set these according to their abilities.
	 */
297
	host->max_segs = 1;
298 299 300 301 302 303 304
	host->max_seg_size = PAGE_CACHE_SIZE;

	host->max_req_size = PAGE_CACHE_SIZE;
	host->max_blk_size = 512;
	host->max_blk_count = PAGE_CACHE_SIZE / 512;

	return host;
P
Pierre Ossman 已提交
305 306 307 308

free:
	kfree(host);
	return NULL;
309 310 311 312 313 314 315
}

EXPORT_SYMBOL(mmc_alloc_host);

/**
 *	mmc_add_host - initialise host hardware
 *	@host: mmc host
P
Pierre Ossman 已提交
316 317 318 319
 *
 *	Register the host with the driver model. The host must be
 *	prepared to start servicing requests before this function
 *	completes.
320 321 322 323 324
 */
int mmc_add_host(struct mmc_host *host)
{
	int err;

325 326 327
	WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
		!host->ops->enable_sdio_irq);

328 329 330 331
	err = device_add(&host->class_dev);
	if (err)
		return err;

332 333
	led_trigger_register_simple(dev_name(&host->class_dev), &host->led);

334 335 336 337
#ifdef CONFIG_DEBUG_FS
	mmc_add_host_debugfs(host);
#endif

338
	mmc_start_host(host);
339
	register_pm_notifier(&host->pm_notify);
340 341 342 343 344 345 346 347 348 349 350

	return 0;
}

EXPORT_SYMBOL(mmc_add_host);

/**
 *	mmc_remove_host - remove host hardware
 *	@host: mmc host
 *
 *	Unregister and remove all cards associated with this host,
P
Pierre Ossman 已提交
351 352
 *	and power down the MMC bus. No new requests will be issued
 *	after this function has returned.
353 354 355
 */
void mmc_remove_host(struct mmc_host *host)
{
356
	unregister_pm_notifier(&host->pm_notify);
357 358
	mmc_stop_host(host);

359 360 361 362
#ifdef CONFIG_DEBUG_FS
	mmc_remove_host_debugfs(host);
#endif

363 364
	device_del(&host->class_dev);

365
	led_trigger_unregister_simple(host->led);
366 367

	mmc_host_clk_exit(host);
368 369 370 371 372 373 374 375 376 377 378 379
}

EXPORT_SYMBOL(mmc_remove_host);

/**
 *	mmc_free_host - free the host structure
 *	@host: mmc host
 *
 *	Free the host once all references to it have been dropped.
 */
void mmc_free_host(struct mmc_host *host)
{
P
Pierre Ossman 已提交
380 381 382 383
	spin_lock(&mmc_host_lock);
	idr_remove(&mmc_host_idr, host->index);
	spin_unlock(&mmc_host_lock);

384 385 386 387
	put_device(&host->class_dev);
}

EXPORT_SYMBOL(mmc_free_host);