Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
/* interrupt.h */
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H

#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/cache.h>
#include <asm/bitops.h>
#include <asm/atomic.h>

struct irqaction {
	void (*handler)(int, void *, struct pt_regs *);
	unsigned long flags;
	unsigned long mask;
	const char *name;
	void *dev_id;
	struct irqaction *next;
};


/* Who gets which entry in bh_base.  Things which will occur most often
   should come first */
   
enum {
	TIMER_BH = 0,
	CONSOLE_BH,
	TQUEUE_BH,
	DIGI_BH,
	SERIAL_BH,
	RISCOM8_BH,
	SPECIALIX_BH,
	AURORA_BH,
	ESP_BH,
	SCSI_BH,
	IMMEDIATE_BH,
	CYCLADES_BH,
	CM206_BH,
	JS_BH,
	MACSERIAL_BH,
	ISICOM_BH
};

#include <asm/hardirq.h>
#include <asm/softirq.h>



/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
   frequency threaded job scheduling. For almost all the purposes
   tasklets are more than enough. F.e. KEYBOARD_BH, CONSOLE_BH, all serial
   device BHs et al. are converted to tasklets, not to softirqs.
 */

enum
{
	HI_SOFTIRQ=0,
	NET_TX_SOFTIRQ,
	NET_RX_SOFTIRQ,
	TASKLET_SOFTIRQ
};

#if SMP_CACHE_BYTES <= 32
/* It is trick to make assembly easier. */
#define SOFTIRQ_STATE_PAD 32
#else
#define SOFTIRQ_STATE_PAD SMP_CACHE_BYTES
#endif

struct softirq_state
{
	__u32	active;
	__u32	mask;
} __attribute__ ((__aligned__(SOFTIRQ_STATE_PAD)));

extern struct softirq_state softirq_state[NR_CPUS];

struct softirq_action
{
	void	(*action)(struct softirq_action *);
	void	*data;
};

asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);

extern __inline__ void __cpu_raise_softirq(int cpu, int nr)
{
	softirq_state[cpu].active |= (1<<nr);
}


/* I do not want to use atomic variables now, so that cli/sti */
extern __inline__ void raise_softirq(int nr)
{
	unsigned long flags;

	local_irq_save(flags);
	__cpu_raise_softirq(smp_processor_id(), nr);
	local_irq_restore(flags);
}

extern void softirq_init(void);



/* Tasklets --- multithreaded analogue of BHs.

   Main feature differing them of generic softirqs: tasklet
   is running only on one CPU simultaneously.

   Main feature differing them of BHs: different tasklets
   may be run simultaneously on different CPUs.

   Properties:
   * If tasklet_schedule() is called, then tasklet is guaranteed
     to be executed on some cpu at least once after this.
   * If the tasklet is already scheduled, but its excecution is still not
     started, it will be executed only once.
   * If this tasklet is already running on another CPU (or schedule is called
     from tasklet itself), it is rescheduled for later.
   * Tasklet is strictly serialized wrt itself, but not
     wrt another tasklets. If client needs some intertask synchronization,
     he makes it with spinlocks.
 */

struct tasklet_struct
{
	struct tasklet_struct *next;
	unsigned long state;
	atomic_t count;
	void (*func)(unsigned long);
	unsigned long data;
};

#define DECLARE_TASKLET(name, func, data) \
struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }

#define DECLARE_TASKLET_DISABLED(name, func, data) \
struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }


enum
{
	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
};

struct tasklet_head
{
	struct tasklet_struct *list;
} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));

extern struct tasklet_head tasklet_vec[NR_CPUS];
extern struct tasklet_head tasklet_hi_vec[NR_CPUS];

#ifdef __SMP__
#define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state))
#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { /* NOTHING */ }
#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state)
#else
#define tasklet_trylock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif

extern __inline__ void tasklet_schedule(struct tasklet_struct *t)
{
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
		int cpu = smp_processor_id();
		unsigned long flags;

		local_irq_save(flags);
		t->next = tasklet_vec[cpu].list;
		tasklet_vec[cpu].list = t;
		__cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
		local_irq_restore(flags);
	}
}

extern __inline__ void tasklet_hi_schedule(struct tasklet_struct *t)
{
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
		int cpu = smp_processor_id();
		unsigned long flags;

		local_irq_save(flags);
		t->next = tasklet_hi_vec[cpu].list;
		tasklet_hi_vec[cpu].list = t;
		__cpu_raise_softirq(cpu, HI_SOFTIRQ);
		local_irq_restore(flags);
	}
}


extern __inline__ void tasklet_disable_nosync(struct tasklet_struct *t)
{
	atomic_inc(&t->count);
}

extern __inline__ void tasklet_disable(struct tasklet_struct *t)
{
	tasklet_disable_nosync(t);
	tasklet_unlock_wait(t);
}

extern __inline__ void tasklet_enable(struct tasklet_struct *t)
{
	atomic_dec(&t->count);
}

extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_init(struct tasklet_struct *t,
			 void (*func)(unsigned long), unsigned long data);

#ifdef __SMP__

#define SMP_TIMER_NAME(name) name##__thr

#define SMP_TIMER_DEFINE(name, task) \
DECLARE_TASKLET(task, name##__thr, 0); \
static void name (unsigned long dummy) \
{ \
	tasklet_schedule(&(task)); \
}

#else /* __SMP__ */

#define SMP_TIMER_NAME(name) name
#define SMP_TIMER_DEFINE(name, task)

#endif /* __SMP__ */


/* Old BH definitions */

extern struct tasklet_struct bh_task_vec[];

/* It is exported _ONLY_ for wait_on_irq(). */
extern spinlock_t global_bh_lock;

extern __inline__ void mark_bh(int nr)
{
	tasklet_hi_schedule(bh_task_vec+nr);
}

extern __inline__ void disable_bh_nosync(int nr)
{
	tasklet_disable_nosync(bh_task_vec+nr);
}

extern __inline__ void disable_bh(int nr)
{
	tasklet_disable_nosync(bh_task_vec+nr);
	if (!in_interrupt())
		tasklet_unlock_wait(bh_task_vec+nr);
}

extern __inline__ void enable_bh(int nr)
{
	tasklet_enable(bh_task_vec+nr);
}


extern void init_bh(int nr, void (*routine)(void));
extern void remove_bh(int nr);


/*
 * Autoprobing for irqs:
 *
 * probe_irq_on() and probe_irq_off() provide robust primitives
 * for accurate IRQ probing during kernel initialization.  They are
 * reasonably simple to use, are not "fooled" by spurious interrupts,
 * and, unlike other attempts at IRQ probing, they do not get hung on
 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
 *
 * For reasonably foolproof probing, use them as follows:
 *
 * 1. clear and/or mask the device's internal interrupt.
 * 2. sti();
 * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
 * 4. enable the device and cause it to trigger an interrupt.
 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
 * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
 * 7. service the device to clear its pending interrupt.
 * 8. loop again if paranoia is required.
 *
 * probe_irq_on() returns a mask of allocated irq's.
 *
 * probe_irq_off() takes the mask as a parameter,
 * and returns the irq number which occurred,
 * or zero if none occurred, or a negative irq number
 * if more than one irq occurred.
 */
extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */

#endif