ipipe.c 10.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/* -*- linux-c -*-
 * linux/arch/blackfin/kernel/ipipe.c
 *
 * Copyright (C) 2005-2007 Philippe Gerum.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
 * USA; either version 2 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Architecture-dependent I-pipe support for the Blackfin.
 */

#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/kthread.h>
32 33
#include <linux/unistd.h>
#include <linux/io.h>
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#include <asm/system.h>
#include <asm/atomic.h>

DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);

asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);

static void __ipipe_no_irqtail(void);

unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
EXPORT_SYMBOL(__ipipe_irq_tail_hook);

unsigned long __ipipe_core_clock;
EXPORT_SYMBOL(__ipipe_core_clock);

unsigned long __ipipe_freq_scale;
EXPORT_SYMBOL(__ipipe_freq_scale);

atomic_t __ipipe_irq_lvdepth[IVG15 + 1];

54
unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
EXPORT_SYMBOL(__ipipe_irq_lvmask);

static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
{
	desc->ipipe_ack(irq, desc);
}

/*
 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
 * interrupts are off, and secondary CPUs are still lost in space.
 */
void __ipipe_enable_pipeline(void)
{
	unsigned irq;

	__ipipe_core_clock = get_cclk(); /* Fetch this once. */
	__ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;

	for (irq = 0; irq < NR_IRQS; ++irq)
		ipipe_virtualize_irq(ipipe_root_domain,
				     irq,
				     (ipipe_irq_handler_t)&asm_do_IRQ,
				     NULL,
				     &__ipipe_ack_irq,
				     IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
}

/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain. Hw
 * interrupts are masked on entry.
 */
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
89
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
90 91
	struct ipipe_domain *this_domain, *next_domain;
	struct list_head *head, *pos;
92
	struct ipipe_irqdesc *idesc;
93 94 95 96 97 98 99 100 101
	int m_ack, s = -1;

	/*
	 * Software-triggered IRQs do not need any ack.  The contents
	 * of the register frame should only be used when processing
	 * the timer interrupt, but not for handling any other
	 * interrupt.
	 */
	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
102
	this_domain = __ipipe_current_domain;
103
	idesc = &this_domain->irqs[irq];
104

105
	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control)))
106 107 108 109
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
110 111 112 113
		idesc = &next_domain->irqs[irq];
		if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) {
			if (!m_ack && idesc->acknowledge != NULL)
				idesc->acknowledge(irq, irq_to_desc(irq));
114
			if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
115 116
				s = __test_and_set_bit(IPIPE_STALL_FLAG,
						       &p->status);
117
			__ipipe_dispatch_wired(next_domain, irq);
118
			goto out;
119 120 121 122 123 124 125 126
		}
	}

	/* Ack the interrupt. */

	pos = head;
	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
127 128
		idesc = &next_domain->irqs[irq];
		if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) {
129
			__ipipe_set_irq_pending(next_domain, irq);
130 131
			if (!m_ack && idesc->acknowledge != NULL) {
				idesc->acknowledge(irq, irq_to_desc(irq));
132 133 134
				m_ack = 1;
			}
		}
135
		if (!test_bit(IPIPE_PASS_FLAG, &idesc->control))
136 137 138 139 140 141 142 143 144 145
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline. We also enforce the
146 147 148 149
	 * additional root stage lock (blackfin-specific).
	 */
	if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
		s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
150

151 152 153 154 155 156
	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
157
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
158
		goto out;
159 160

	__ipipe_walk_pipeline(head);
161
out:
162
	if (!s)
163
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
164 165 166 167
}

void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
168
	struct irq_desc *desc = irq_to_desc(irq);
169
	int prio = __ipipe_get_irq_priority(irq);
170 171 172 173 174 175 176 177 178 179

	desc->depth = 0;
	if (ipd != &ipipe_root &&
	    atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
		__set_bit(prio, &__ipipe_irq_lvmask);
}
EXPORT_SYMBOL(__ipipe_enable_irqdesc);

void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
180
	int prio = __ipipe_get_irq_priority(irq);
181 182 183 184 185 186 187

	if (ipd != &ipipe_root &&
	    atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
		__clear_bit(prio, &__ipipe_irq_lvmask);
}
EXPORT_SYMBOL(__ipipe_disable_irqdesc);

188
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
189
{
190
	struct ipipe_percpu_domain_data *p;
191
	void (*hook)(void);
192
	int ret;
193

194 195
	WARN_ON_ONCE(irqs_disabled_hw());

196
	/*
197 198 199
	 * We need to run the IRQ tail hook each time we intercept a
	 * syscall, because we know that important operations might be
	 * pending there (e.g. Xenomai deferred rescheduling).
200
	 */
201 202
	hook = (__typeof__(hook))__ipipe_irq_tail_hook;
	hook();
203 204 205 206

	/*
	 * This routine either returns:
	 * 0 -- if the syscall is to be passed to Linux;
207
	 * >0 -- if the syscall should not be passed to Linux, and no
208
	 * tail work should be performed;
209
	 * <0 -- if the syscall should not be passed to Linux but the
210 211 212
	 * tail work has to be performed (for handling signals etc).
	 */

213 214
	if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
	    !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
215 216 217 218
		return 0;

	ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

219
	hard_local_irq_disable();
220

221 222 223 224 225 226 227
	/*
	 * This is the end of the syscall path, so we may
	 * safely assume a valid Linux task stack here.
	 */
	if (current->ipipe_flags & PF_EVTRET) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
228 229
	}

230 231 232 233 234 235 236
	if (!__ipipe_root_domain_p)
		ret = -1;
	else {
		p = ipipe_root_cpudom_ptr();
		if (__ipipe_ipending_p(p))
			__ipipe_sync_pipeline();
	}
237

238
	hard_local_irq_enable();
239 240

	return -ret;
241 242 243 244 245 246 247 248
}

static void __ipipe_no_irqtail(void)
{
}

int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
{
249 250 251 252 253
	info->sys_nr_cpus = num_online_cpus();
	info->sys_cpu_freq = ipipe_cpu_freq();
	info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
	info->sys_hrtimer_freq = __ipipe_core_clock;
	info->sys_hrclock_freq = __ipipe_core_clock;
254 255 256 257 258 259 260 261 262 263 264 265 266

	return 0;
}

/*
 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
 * just like if it has been actually received from a hw source. Also
 * works for virtual interrupts.
 */
int ipipe_trigger_irq(unsigned irq)
{
	unsigned long flags;

267
#ifdef CONFIG_IPIPE_DEBUG
268 269 270 271
	if (irq >= IPIPE_NR_IRQS ||
	    (ipipe_virtual_irq_p(irq)
	     && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
		return -EINVAL;
272
#endif
273

274
	flags = hard_local_irq_save();
275
	__ipipe_handle_irq(irq, NULL);
276
	hard_local_irq_restore(flags);
277 278 279 280

	return 1;
}

281
asmlinkage void __ipipe_sync_root(void)
282
{
283
	void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
284
	struct ipipe_percpu_domain_data *p;
285
	unsigned long flags;
286

287
	BUG_ON(irqs_disabled());
288

289
	flags = hard_local_irq_save();
290

291 292 293
	if (irq_tail_hook)
		irq_tail_hook();

294
	clear_thread_flag(TIF_IRQ_SYNC);
295

296 297 298
	p = ipipe_root_cpudom_ptr();
	if (__ipipe_ipending_p(p))
		__ipipe_sync_pipeline();
299

300
	hard_local_irq_restore(flags);
301 302
}

303
void ___ipipe_sync_pipeline(void)
304
{
305 306 307
	if (__ipipe_root_domain_p &&
	    test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
		return;
308

309
	__ipipe_sync_stage();
310
}
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

void __ipipe_disable_root_irqs_hw(void)
{
	/*
	 * This code is called by the ins{bwl} routines (see
	 * arch/blackfin/lib/ins.S), which are heavily used by the
	 * network stack. It masks all interrupts but those handled by
	 * non-root domains, so that we keep decent network transfer
	 * rates for Linux without inducing pathological jitter for
	 * the real-time domain.
	 */
	bfin_sti(__ipipe_irq_lvmask);
	__set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
}

void __ipipe_enable_root_irqs_hw(void)
{
	__clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
	bfin_sti(bfin_irq_flags);
}
331 332 333 334 335 336 337 338 339 340

/*
 * We could use standard atomic bitops in the following root status
 * manipulation routines, but let's prepare for SMP support in the
 * same move, preventing CPU migration as required.
 */
void __ipipe_stall_root(void)
{
	unsigned long *p, flags;

341
	flags = hard_local_irq_save();
342 343
	p = &__ipipe_root_status;
	__set_bit(IPIPE_STALL_FLAG, p);
344
	hard_local_irq_restore(flags);
345 346 347 348 349 350 351 352
}
EXPORT_SYMBOL(__ipipe_stall_root);

unsigned long __ipipe_test_and_stall_root(void)
{
	unsigned long *p, flags;
	int x;

353
	flags = hard_local_irq_save();
354 355
	p = &__ipipe_root_status;
	x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
356
	hard_local_irq_restore(flags);
357 358 359 360 361 362 363 364 365 366 367

	return x;
}
EXPORT_SYMBOL(__ipipe_test_and_stall_root);

unsigned long __ipipe_test_root(void)
{
	const unsigned long *p;
	unsigned long flags;
	int x;

368
	flags = hard_local_irq_save_smp();
369 370
	p = &__ipipe_root_status;
	x = test_bit(IPIPE_STALL_FLAG, p);
371
	hard_local_irq_restore_smp(flags);
372 373 374 375 376 377 378 379 380

	return x;
}
EXPORT_SYMBOL(__ipipe_test_root);

void __ipipe_lock_root(void)
{
	unsigned long *p, flags;

381
	flags = hard_local_irq_save();
382 383
	p = &__ipipe_root_status;
	__set_bit(IPIPE_SYNCDEFER_FLAG, p);
384
	hard_local_irq_restore(flags);
385 386 387 388 389 390 391
}
EXPORT_SYMBOL(__ipipe_lock_root);

void __ipipe_unlock_root(void)
{
	unsigned long *p, flags;

392
	flags = hard_local_irq_save();
393 394
	p = &__ipipe_root_status;
	__clear_bit(IPIPE_SYNCDEFER_FLAG, p);
395
	hard_local_irq_restore(flags);
396 397
}
EXPORT_SYMBOL(__ipipe_unlock_root);