Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | /* * BK Id: SCCS/s.i8259.c 1.7 05/17/01 18:14:21 cort */ #include <linux/stddef.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/signal.h> #include <asm/io.h> #include "i8259.h" unsigned char cached_8259[2] = { 0xff, 0xff }; #define cached_A1 (cached_8259[0]) #define cached_21 (cached_8259[1]) spinlock_t i8259_lock = SPIN_LOCK_UNLOCKED; int i8259_pic_irq_offset; int i8259_irq(int cpu) { int irq; spin_lock/*_irqsave*/(&i8259_lock/*, flags*/); /* * Perform an interrupt acknowledge cycle on controller 1 */ outb(0x0C, 0x20); irq = inb(0x20) & 7; if (irq == 2) { /* * Interrupt is cascaded so perform interrupt * acknowledge on controller 2 */ outb(0x0C, 0xA0); irq = (inb(0xA0) & 7) + 8; } else if (irq==7) { /* * This may be a spurious interrupt * * Read the interrupt status register. If the most * significant bit is not set then there is no valid * interrupt */ outb(0x0b, 0x20); if(~inb(0x20)&0x80) { spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/); return -1; } } spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/); return irq; } static void i8259_mask_and_ack_irq(unsigned int irq_nr) { unsigned long flags; spin_lock_irqsave(&i8259_lock, flags); if ( irq_nr >= i8259_pic_irq_offset ) irq_nr -= i8259_pic_irq_offset; if (irq_nr > 7) { cached_A1 |= 1 << (irq_nr-8); inb(0xA1); /* DUMMY */ outb(cached_A1,0xA1); outb(0x20,0xA0); /* Non-specific EOI */ outb(0x20,0x20); /* Non-specific EOI to cascade */ } else { cached_21 |= 1 << irq_nr; inb(0x21); /* DUMMY */ outb(cached_21,0x21); outb(0x20,0x20); /* Non-specific EOI */ } spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_set_irq_mask(int irq_nr) { outb(cached_A1,0xA1); outb(cached_21,0x21); } static void i8259_mask_irq(unsigned int irq_nr) { unsigned long flags; spin_lock_irqsave(&i8259_lock, flags); if ( irq_nr >= i8259_pic_irq_offset ) irq_nr -= i8259_pic_irq_offset; if ( irq_nr < 8 ) cached_21 |= 1 << irq_nr; else cached_A1 |= 1 << (irq_nr-8); i8259_set_irq_mask(irq_nr); spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_unmask_irq(unsigned int irq_nr) { unsigned long flags; spin_lock_irqsave(&i8259_lock, flags); if ( irq_nr >= i8259_pic_irq_offset ) irq_nr -= i8259_pic_irq_offset; if ( irq_nr < 8 ) cached_21 &= ~(1 << irq_nr); else cached_A1 &= ~(1 << (irq_nr-8)); i8259_set_irq_mask(irq_nr); spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_end_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) i8259_unmask_irq(irq); } struct hw_interrupt_type i8259_pic = { " i8259 ", NULL, NULL, i8259_unmask_irq, i8259_mask_irq, i8259_mask_and_ack_irq, i8259_end_irq, NULL }; void __init i8259_init(void) { unsigned long flags; spin_lock_irqsave(&i8259_lock, flags); /* init master interrupt controller */ outb(0x11, 0x20); /* Start init sequence */ outb(0x00, 0x21); /* Vector base */ outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ outb(0x01, 0x21); /* Select 8086 mode */ outb(0xFF, 0x21); /* Mask all */ /* init slave interrupt controller */ outb(0x11, 0xA0); /* Start init sequence */ outb(0x08, 0xA1); /* Vector base */ outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ outb(0x01, 0xA1); /* Select 8086 mode */ outb(0xFF, 0xA1); /* Mask all */ outb(cached_A1, 0xA1); outb(cached_21, 0x21); spin_unlock_irqrestore(&i8259_lock, flags); request_irq( i8259_pic_irq_offset + 2, no_action, SA_INTERRUPT, "82c59 secondary cascade", NULL ); } |